root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include "blk.h"
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * look for a free request in the first N entries.
 104  * NOTE: interrupts must be disabled on the way in, and will still
 105  *       be disabled on the way out.
 106  */
 107 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         static struct request *prev_found = NULL, *prev_limit = NULL;
 110         register struct request *req, *limit;
 111 
 112         if (n <= 0)
 113                 panic("get_request(%d): impossible!\n", n);
 114 
 115         limit = all_requests + n;
 116         if (limit != prev_limit) {
 117                 prev_limit = limit;
 118                 prev_found = all_requests;
 119         }
 120         req = prev_found;
 121         for (;;) {
 122                 req = ((req > all_requests) ? req : limit) - 1;
 123                 if (req->dev < 0)
 124                         break;
 125                 if (req == prev_found)
 126                         return NULL;
 127         }
 128         prev_found = req;
 129         req->dev = dev;
 130         return req;
 131 }
 132 
 133 /*
 134  * wait until a free request in the first N entries is available.
 135  * NOTE: interrupts must be disabled on the way in, and will still
 136  *       be disabled on the way out.
 137  */
 138 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         register struct request *req;
 141 
 142         while ((req = get_request(n, dev)) == NULL)
 143                 sleep_on(&wait_for_request);
 144         return req;
 145 }
 146 
 147 /* RO fail safe mechanism */
 148 
 149 static long ro_bits[MAX_BLKDEV][8];
 150 
 151 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         int minor,major;
 154 
 155         major = MAJOR(dev);
 156         minor = MINOR(dev);
 157         if (major < 0 || major >= MAX_BLKDEV) return 0;
 158         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 159 }
 160 
 161 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 162 {
 163         int minor,major;
 164 
 165         major = MAJOR(dev);
 166         minor = MINOR(dev);
 167         if (major < 0 || major >= MAX_BLKDEV) return;
 168         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 169         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 170 }
 171 
 172 /*
 173  * add-request adds a request to the linked list.
 174  * It disables interrupts so that it can muck with the
 175  * request-lists in peace.
 176  */
 177 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         struct request * tmp;
 180         short            disk_index;
 181 
 182         switch (MAJOR(req->dev)) {
 183                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 184                                         if (disk_index < 4)
 185                                                 kstat.dk_drive[disk_index]++;
 186                                         break;
 187                 case HD_MAJOR:
 188                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 189                                         kstat.dk_drive[disk_index]++;
 190                                         break;
 191                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 192                                         kstat.dk_drive[disk_index]++;
 193                 default:                break;
 194         }
 195 
 196         req->next = NULL;
 197         cli();
 198         if (req->bh)
 199                 mark_buffer_clean(req->bh);
 200         if (!(tmp = dev->current_request)) {
 201                 dev->current_request = req;
 202                 (dev->request_fn)();
 203                 sti();
 204                 return;
 205         }
 206         for ( ; tmp->next ; tmp = tmp->next) {
 207                 if ((IN_ORDER(tmp,req) ||
 208                     !IN_ORDER(tmp,tmp->next)) &&
 209                     IN_ORDER(req,tmp->next))
 210                         break;
 211         }
 212         req->next = tmp->next;
 213         tmp->next = req;
 214 
 215 /* for SCSI devices, call request_fn unconditionally */
 216         if (scsi_major(MAJOR(req->dev)))
 217                 (dev->request_fn)();
 218 
 219         sti();
 220 }
 221 
 222 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         unsigned int sector, count;
 225         struct request * req;
 226         int rw_ahead, max_req;
 227 
 228 /* WRITEA/READA is special case - it is not really needed, so if the */
 229 /* buffer is locked, we just forget about it, else it's a normal read */
 230         rw_ahead = (rw == READA || rw == WRITEA);
 231         if (rw_ahead) {
 232                 if (bh->b_lock)
 233                         return;
 234                 if (rw == READA)
 235                         rw = READ;
 236                 else
 237                         rw = WRITE;
 238         }
 239         if (rw!=READ && rw!=WRITE) {
 240                 printk("Bad block dev command, must be R/W/RA/WA\n");
 241                 return;
 242         }
 243         count = bh->b_size >> 9;
 244         sector = bh->b_blocknr * count;
 245         if (blk_size[major])
 246                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 247                         bh->b_dirt = bh->b_uptodate = 0;
 248                         bh->b_req = 0;
 249                         return;
 250                 }
 251         /* Uhhuh.. Nasty dead-lock possible here.. */
 252         if (bh->b_lock)
 253                 return;
 254         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 255         lock_buffer(bh);
 256         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 257                 unlock_buffer(bh);
 258                 return;
 259         }
 260 
 261 /* we don't allow the write-requests to fill up the queue completely:
 262  * we want some room for reads: they take precedence. The last third
 263  * of the requests are only for reads.
 264  */
 265         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 266 
 267 /* big loop: look for a free request. */
 268 
 269 repeat:
 270         cli();
 271 
 272 /* The scsi disk drivers and the IDE driver completely remove the request
 273  * from the queue when they start processing an entry.  For this reason
 274  * it is safe to continue to add links to the top entry for those devices.
 275  */
 276         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 277              || major == IDE1_MAJOR
 278              || major == FLOPPY_MAJOR
 279              || major == SCSI_DISK_MAJOR
 280              || major == SCSI_CDROM_MAJOR)
 281             && (req = blk_dev[major].current_request))
 282         {
 283 #ifdef CONFIG_BLK_DEV_HD
 284                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 285 #else
 286                 if (major == FLOPPY_MAJOR)
 287 #endif CONFIG_BLK_DEV_HD
 288                         req = req->next;
 289                 while (req) {
 290                         if (req->dev == bh->b_dev &&
 291                             !req->sem &&
 292                             req->cmd == rw &&
 293                             req->sector + req->nr_sectors == sector &&
 294                             req->nr_sectors < 244)
 295                         {
 296                                 req->bhtail->b_reqnext = bh;
 297                                 req->bhtail = bh;
 298                                 req->nr_sectors += count;
 299                                 mark_buffer_clean(bh);
 300                                 sti();
 301                                 return;
 302                         }
 303 
 304                         if (req->dev == bh->b_dev &&
 305                             !req->sem &&
 306                             req->cmd == rw &&
 307                             req->sector - count == sector &&
 308                             req->nr_sectors < 244)
 309                         {
 310                                 req->nr_sectors += count;
 311                                 bh->b_reqnext = req->bh;
 312                                 req->buffer = bh->b_data;
 313                                 req->current_nr_sectors = count;
 314                                 req->sector = sector;
 315                                 mark_buffer_clean(bh);
 316                                 req->bh = bh;
 317                                 sti();
 318                                 return;
 319                         }    
 320 
 321                         req = req->next;
 322                 }
 323         }
 324 
 325 /* find an unused request. */
 326         req = get_request(max_req, bh->b_dev);
 327 
 328 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 329         if (! req) {
 330                 if (rw_ahead) {
 331                         sti();
 332                         unlock_buffer(bh);
 333                         return;
 334                 }
 335                 sleep_on(&wait_for_request);
 336                 sti();
 337                 goto repeat;
 338         }
 339 
 340 /* we found a request. */
 341         sti();
 342 
 343 /* fill up the request-info, and add it to the queue */
 344         req->cmd = rw;
 345         req->errors = 0;
 346         req->sector = sector;
 347         req->nr_sectors = count;
 348         req->current_nr_sectors = count;
 349         req->buffer = bh->b_data;
 350         req->sem = NULL;
 351         req->bh = bh;
 352         req->bhtail = bh;
 353         req->next = NULL;
 354         add_request(major+blk_dev,req);
 355 }
 356 
 357 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 358 {
 359         struct request * req;
 360         unsigned int major = MAJOR(dev);
 361         struct semaphore sem = MUTEX_LOCKED;
 362 
 363         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 364                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 365                 return;
 366         }
 367         if (rw!=READ && rw!=WRITE)
 368                 panic("Bad block dev command, must be R/W");
 369         if (rw == WRITE && is_read_only(dev)) {
 370                 printk("Can't page to read-only device 0x%X\n",dev);
 371                 return;
 372         }
 373         cli();
 374         req = get_request_wait(NR_REQUEST, dev);
 375         sti();
 376 /* fill up the request-info, and add it to the queue */
 377         req->cmd = rw;
 378         req->errors = 0;
 379         req->sector = page<<3;
 380         req->nr_sectors = 8;
 381         req->current_nr_sectors = 8;
 382         req->buffer = buffer;
 383         req->sem = &sem;
 384         req->bh = NULL;
 385         req->next = NULL;
 386         add_request(major+blk_dev,req);
 387         down(&sem);
 388 }
 389 
 390 /* This function can be used to request a number of buffers from a block
 391    device. Currently the only restriction is that all buffers must belong to
 392    the same device */
 393 
 394 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 395 {
 396         unsigned int major;
 397         struct request plug;
 398         int plugged;
 399         int correct_size;
 400         struct blk_dev_struct * dev;
 401         int i;
 402 
 403         /* Make sure that the first block contains something reasonable */
 404         while (!*bh) {
 405                 bh++;
 406                 if (--nr <= 0)
 407                         return;
 408         };
 409 
 410         dev = NULL;
 411         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 412                 dev = blk_dev + major;
 413         if (!dev || !dev->request_fn) {
 414                 printk(
 415         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 416                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 417                 goto sorry;
 418         }
 419 
 420         /* Determine correct block size for this device.  */
 421         correct_size = BLOCK_SIZE;
 422         if (blksize_size[major]) {
 423                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 424                 if (i)
 425                         correct_size = i;
 426         }
 427 
 428         /* Verify requested block sizes.  */
 429         for (i = 0; i < nr; i++) {
 430                 if (bh[i] && bh[i]->b_size != correct_size) {
 431                         printk(
 432                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 433                                correct_size, bh[i]->b_size);
 434                         goto sorry;
 435                 }
 436         }
 437 
 438         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 439                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 440                 goto sorry;
 441         }
 442 
 443         /* If there are no pending requests for this device, then we insert
 444            a dummy request for that device.  This will prevent the request
 445            from starting until we have shoved all of the blocks into the
 446            queue, and then we let it rip.  */
 447 
 448         plugged = 0;
 449         cli();
 450         if (!dev->current_request && nr > 1) {
 451                 dev->current_request = &plug;
 452                 plug.dev = -1;
 453                 plug.next = NULL;
 454                 plugged = 1;
 455         }
 456         sti();
 457         for (i = 0; i < nr; i++) {
 458                 if (bh[i]) {
 459                         bh[i]->b_req = 1;
 460                         make_request(major, rw, bh[i]);
 461                         if (rw == READ || rw == READA)
 462                                 kstat.pgpgin++;
 463                         else
 464                                 kstat.pgpgout++;
 465                 }
 466         }
 467         if (plugged) {
 468                 cli();
 469                 dev->current_request = plug.next;
 470                 (dev->request_fn)();
 471                 sti();
 472         }
 473         return;
 474 
 475       sorry:
 476         for (i = 0; i < nr; i++) {
 477                 if (bh[i])
 478                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 479         }
 480         return;
 481 }
 482 
 483 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 484 {
 485         int i;
 486         int buffersize;
 487         struct request * req;
 488         unsigned int major = MAJOR(dev);
 489         struct semaphore sem = MUTEX_LOCKED;
 490 
 491         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 492                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 493                 return;
 494         }
 495 
 496         if (rw!=READ && rw!=WRITE) {
 497                 printk("ll_rw_swap: bad block dev command, must be R/W");
 498                 return;
 499         }
 500         if (rw == WRITE && is_read_only(dev)) {
 501                 printk("Can't swap to read-only device 0x%X\n",dev);
 502                 return;
 503         }
 504         
 505         buffersize = PAGE_SIZE / nb;
 506 
 507         for (i=0; i<nb; i++, buf += buffersize)
 508         {
 509                 cli();
 510                 req = get_request_wait(NR_REQUEST, dev);
 511                 sti();
 512                 req->cmd = rw;
 513                 req->errors = 0;
 514                 req->sector = (b[i] * buffersize) >> 9;
 515                 req->nr_sectors = buffersize >> 9;
 516                 req->current_nr_sectors = buffersize >> 9;
 517                 req->buffer = buf;
 518                 req->sem = &sem;
 519                 req->bh = NULL;
 520                 req->next = NULL;
 521                 add_request(major+blk_dev,req);
 522                 down(&sem);
 523         }
 524 }
 525 
 526 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 527 {
 528         struct request * req;
 529 
 530         req = all_requests + NR_REQUEST;
 531         while (--req >= all_requests) {
 532                 req->dev = -1;
 533                 req->next = NULL;
 534         }
 535         memset(ro_bits,0,sizeof(ro_bits));
 536 #ifdef CONFIG_BLK_DEV_HD
 537         mem_start = hd_init(mem_start,mem_end);
 538 #endif
 539 #ifdef CONFIG_BLK_DEV_IDE
 540         mem_start = ide_init(mem_start,mem_end);
 541 #endif
 542 #ifdef CONFIG_BLK_DEV_XD
 543         mem_start = xd_init(mem_start,mem_end);
 544 #endif
 545 #ifdef CONFIG_CDU31A
 546         mem_start = cdu31a_init(mem_start,mem_end);
 547 #endif
 548 #ifdef CONFIG_MCD
 549         mem_start = mcd_init(mem_start,mem_end);
 550 #endif
 551 #ifdef CONFIG_AZTCD
 552         mem_start = aztcd_init(mem_start,mem_end);
 553 #endif
 554 #ifdef CONFIG_BLK_DEV_FD
 555         floppy_init();
 556 #else
 557         outb_p(0xc, 0x3f2);
 558 #endif
 559 #ifdef CONFIG_SBPCD
 560         mem_start = sbpcd_init(mem_start, mem_end);
 561 #endif CONFIG_SBPCD
 562         if (ramdisk_size)
 563                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 564         return mem_start;
 565 }

/* [previous][next][first][last][top][bottom][index][help] */