root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/io.h>
  21 #include "blk.h"
  22 
  23 /*
  24  * The request-struct contains all necessary data
  25  * to load a nr of sectors into memory
  26  */
  27 static struct request all_requests[NR_REQUEST];
  28 
  29 /*
  30  * used to wait on when there are no free requests
  31  */
  32 struct wait_queue * wait_for_request = NULL;
  33 
  34 /* This specifies how many sectors to read ahead on the disk.  */
  35 
  36 int read_ahead[MAX_BLKDEV] = {0, };
  37 
  38 /* blk_dev_struct is:
  39  *      do_request-address
  40  *      next-request
  41  */
  42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  43         { NULL, NULL },         /* 0 no_dev */
  44         { NULL, NULL },         /* 1 dev mem */
  45         { NULL, NULL },         /* 2 dev fd */
  46         { NULL, NULL },         /* 3 dev ide0 or hd */
  47         { NULL, NULL },         /* 4 dev ttyx */
  48         { NULL, NULL },         /* 5 dev tty */
  49         { NULL, NULL },         /* 6 dev lp */
  50         { NULL, NULL },         /* 7 dev pipes */
  51         { NULL, NULL },         /* 8 dev sd */
  52         { NULL, NULL },         /* 9 dev st */
  53         { NULL, NULL },         /* 10 */
  54         { NULL, NULL },         /* 11 */
  55         { NULL, NULL },         /* 12 */
  56         { NULL, NULL },         /* 13 */
  57         { NULL, NULL },         /* 14 */
  58         { NULL, NULL },         /* 15 */
  59         { NULL, NULL },         /* 16 */
  60         { NULL, NULL },         /* 17 */
  61         { NULL, NULL },         /* 18 */
  62         { NULL, NULL },         /* 19 */
  63         { NULL, NULL },         /* 20 */
  64         { NULL, NULL },         /* 21 */
  65         { NULL, NULL }          /* 22 dev ide1 */
  66 };
  67 
  68 /*
  69  * blk_size contains the size of all block-devices in units of 1024 byte
  70  * sectors:
  71  *
  72  * blk_size[MAJOR][MINOR]
  73  *
  74  * if (!blk_size[MAJOR]) then no minor size checking is done.
  75  */
  76 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  77 
  78 /*
  79  * blksize_size contains the size of all block-devices:
  80  *
  81  * blksize_size[MAJOR][MINOR]
  82  *
  83  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  84  */
  85 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  86 
  87 /*
  88  * hardsect_size contains the size of the hardware sector of a device.
  89  *
  90  * hardsect_size[MAJOR][MINOR]
  91  *
  92  * if (!hardsect_size[MAJOR])
  93  *              then 512 bytes is assumed.
  94  * else
  95  *              sector_size is hardsect_size[MAJOR][MINOR]
  96  * This is currently set by some scsi device and read by the msdos fs driver
  97  * This might be a some uses later.
  98  */
  99 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 100 
 101 /*
 102  * look for a free request in the first N entries.
 103  * NOTE: interrupts must be disabled on the way in, and will still
 104  *       be disabled on the way out.
 105  */
 106 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {
 108         static struct request *prev_found = NULL, *prev_limit = NULL;
 109         register struct request *req, *limit;
 110 
 111         if (n <= 0)
 112                 panic("get_request(%d): impossible!\n", n);
 113 
 114         limit = all_requests + n;
 115         if (limit != prev_limit) {
 116                 prev_limit = limit;
 117                 prev_found = all_requests;
 118         }
 119         req = prev_found;
 120         for (;;) {
 121                 req = ((req > all_requests) ? req : limit) - 1;
 122                 if (req->dev < 0)
 123                         break;
 124                 if (req == prev_found)
 125                         return NULL;
 126         }
 127         prev_found = req;
 128         req->dev = dev;
 129         return req;
 130 }
 131 
 132 /*
 133  * wait until a free request in the first N entries is available.
 134  * NOTE: interrupts must be disabled on the way in, and will still
 135  *       be disabled on the way out.
 136  */
 137 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         register struct request *req;
 140 
 141         while ((req = get_request(n, dev)) == NULL)
 142                 sleep_on(&wait_for_request);
 143         return req;
 144 }
 145 
 146 /* RO fail safe mechanism */
 147 
 148 static long ro_bits[MAX_BLKDEV][8];
 149 
 150 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         int minor,major;
 153 
 154         major = MAJOR(dev);
 155         minor = MINOR(dev);
 156         if (major < 0 || major >= MAX_BLKDEV) return 0;
 157         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 158 }
 159 
 160 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 {
 162         int minor,major;
 163 
 164         major = MAJOR(dev);
 165         minor = MINOR(dev);
 166         if (major < 0 || major >= MAX_BLKDEV) return;
 167         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 168         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 169 }
 170 
 171 /*
 172  * add-request adds a request to the linked list.
 173  * It disables interrupts so that it can muck with the
 174  * request-lists in peace.
 175  */
 176 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         struct request * tmp;
 179         short            disk_index;
 180 
 181         switch (MAJOR(req->dev)) {
 182                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 183                                         if (disk_index < 4)
 184                                                 kstat.dk_drive[disk_index]++;
 185                                         break;
 186                 case HD_MAJOR:
 187                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 188                                         kstat.dk_drive[disk_index]++;
 189                                         break;
 190                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 191                                         kstat.dk_drive[disk_index]++;
 192                 default:                break;
 193         }
 194 
 195         req->next = NULL;
 196         cli();
 197         if (req->bh)
 198                 mark_buffer_clean(req->bh);
 199         if (!(tmp = dev->current_request)) {
 200                 dev->current_request = req;
 201                 (dev->request_fn)();
 202                 sti();
 203                 return;
 204         }
 205         for ( ; tmp->next ; tmp = tmp->next) {
 206                 if ((IN_ORDER(tmp,req) ||
 207                     !IN_ORDER(tmp,tmp->next)) &&
 208                     IN_ORDER(req,tmp->next))
 209                         break;
 210         }
 211         req->next = tmp->next;
 212         tmp->next = req;
 213 
 214 /* for SCSI devices, call request_fn unconditionally */
 215         if (scsi_major(MAJOR(req->dev)))
 216                 (dev->request_fn)();
 217 
 218         sti();
 219 }
 220 
 221 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         unsigned int sector, count;
 224         struct request * req;
 225         int rw_ahead, max_req;
 226 
 227 /* WRITEA/READA is special case - it is not really needed, so if the */
 228 /* buffer is locked, we just forget about it, else it's a normal read */
 229         rw_ahead = (rw == READA || rw == WRITEA);
 230         if (rw_ahead) {
 231                 if (bh->b_lock)
 232                         return;
 233                 if (rw == READA)
 234                         rw = READ;
 235                 else
 236                         rw = WRITE;
 237         }
 238         if (rw!=READ && rw!=WRITE) {
 239                 printk("Bad block dev command, must be R/W/RA/WA\n");
 240                 return;
 241         }
 242         count = bh->b_size >> 9;
 243         sector = bh->b_blocknr * count;
 244         if (blk_size[major])
 245                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 246                         bh->b_dirt = bh->b_uptodate = 0;
 247                         bh->b_req = 0;
 248                         return;
 249                 }
 250         /* Uhhuh.. Nasty dead-lock possible here.. */
 251         if (bh->b_lock)
 252                 return;
 253         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 254         lock_buffer(bh);
 255         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 256                 unlock_buffer(bh);
 257                 return;
 258         }
 259 
 260 /* we don't allow the write-requests to fill up the queue completely:
 261  * we want some room for reads: they take precedence. The last third
 262  * of the requests are only for reads.
 263  */
 264         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 265 
 266 /* big loop: look for a free request. */
 267 
 268 repeat:
 269         cli();
 270 
 271 /* The scsi disk drivers and the IDE driver completely remove the request
 272  * from the queue when they start processing an entry.  For this reason
 273  * it is safe to continue to add links to the top entry for those devices.
 274  */
 275         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 276              || major == IDE1_MAJOR
 277              || major == FLOPPY_MAJOR
 278              || major == SCSI_DISK_MAJOR
 279              || major == SCSI_CDROM_MAJOR)
 280             && (req = blk_dev[major].current_request))
 281         {
 282 #ifdef CONFIG_BLK_DEV_HD
 283                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 284 #else
 285                 if (major == FLOPPY_MAJOR)
 286 #endif CONFIG_BLK_DEV_HD
 287                         req = req->next;
 288                 while (req) {
 289                         if (req->dev == bh->b_dev &&
 290                             !req->sem &&
 291                             req->cmd == rw &&
 292                             req->sector + req->nr_sectors == sector &&
 293                             req->nr_sectors < 244)
 294                         {
 295                                 req->bhtail->b_reqnext = bh;
 296                                 req->bhtail = bh;
 297                                 req->nr_sectors += count;
 298                                 mark_buffer_clean(bh);
 299                                 sti();
 300                                 return;
 301                         }
 302 
 303                         if (req->dev == bh->b_dev &&
 304                             !req->sem &&
 305                             req->cmd == rw &&
 306                             req->sector - count == sector &&
 307                             req->nr_sectors < 244)
 308                         {
 309                                 req->nr_sectors += count;
 310                                 bh->b_reqnext = req->bh;
 311                                 req->buffer = bh->b_data;
 312                                 req->current_nr_sectors = count;
 313                                 req->sector = sector;
 314                                 mark_buffer_clean(bh);
 315                                 req->bh = bh;
 316                                 sti();
 317                                 return;
 318                         }    
 319 
 320                         req = req->next;
 321                 }
 322         }
 323 
 324 /* find an unused request. */
 325         req = get_request(max_req, bh->b_dev);
 326 
 327 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 328         if (! req) {
 329                 if (rw_ahead) {
 330                         sti();
 331                         unlock_buffer(bh);
 332                         return;
 333                 }
 334                 sleep_on(&wait_for_request);
 335                 sti();
 336                 goto repeat;
 337         }
 338 
 339 /* we found a request. */
 340         sti();
 341 
 342 /* fill up the request-info, and add it to the queue */
 343         req->cmd = rw;
 344         req->errors = 0;
 345         req->sector = sector;
 346         req->nr_sectors = count;
 347         req->current_nr_sectors = count;
 348         req->buffer = bh->b_data;
 349         req->sem = NULL;
 350         req->bh = bh;
 351         req->bhtail = bh;
 352         req->next = NULL;
 353         add_request(major+blk_dev,req);
 354 }
 355 
 356 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         struct request * req;
 359         unsigned int major = MAJOR(dev);
 360         struct semaphore sem = MUTEX_LOCKED;
 361 
 362         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 363                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 364                 return;
 365         }
 366         if (rw!=READ && rw!=WRITE)
 367                 panic("Bad block dev command, must be R/W");
 368         if (rw == WRITE && is_read_only(dev)) {
 369                 printk("Can't page to read-only device 0x%X\n",dev);
 370                 return;
 371         }
 372         cli();
 373         req = get_request_wait(NR_REQUEST, dev);
 374         sti();
 375 /* fill up the request-info, and add it to the queue */
 376         req->cmd = rw;
 377         req->errors = 0;
 378         req->sector = page<<3;
 379         req->nr_sectors = 8;
 380         req->current_nr_sectors = 8;
 381         req->buffer = buffer;
 382         req->sem = &sem;
 383         req->bh = NULL;
 384         req->next = NULL;
 385         add_request(major+blk_dev,req);
 386         down(&sem);
 387 }
 388 
 389 /* This function can be used to request a number of buffers from a block
 390    device. Currently the only restriction is that all buffers must belong to
 391    the same device */
 392 
 393 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         unsigned int major;
 396         struct request plug;
 397         int plugged;
 398         int correct_size;
 399         struct blk_dev_struct * dev;
 400         int i;
 401 
 402         /* Make sure that the first block contains something reasonable */
 403         while (!*bh) {
 404                 bh++;
 405                 if (--nr <= 0)
 406                         return;
 407         };
 408 
 409         dev = NULL;
 410         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 411                 dev = blk_dev + major;
 412         if (!dev || !dev->request_fn) {
 413                 printk(
 414         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 415                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 416                 goto sorry;
 417         }
 418 
 419         /* Determine correct block size for this device.  */
 420         correct_size = BLOCK_SIZE;
 421         if (blksize_size[major]) {
 422                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 423                 if (i)
 424                         correct_size = i;
 425         }
 426 
 427         /* Verify requested block sizes.  */
 428         for (i = 0; i < nr; i++) {
 429                 if (bh[i] && bh[i]->b_size != correct_size) {
 430                         printk(
 431                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 432                                correct_size, bh[i]->b_size);
 433                         goto sorry;
 434                 }
 435         }
 436 
 437         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 438                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 439                 goto sorry;
 440         }
 441 
 442         /* If there are no pending requests for this device, then we insert
 443            a dummy request for that device.  This will prevent the request
 444            from starting until we have shoved all of the blocks into the
 445            queue, and then we let it rip.  */
 446 
 447         plugged = 0;
 448         cli();
 449         if (!dev->current_request && nr > 1) {
 450                 dev->current_request = &plug;
 451                 plug.dev = -1;
 452                 plug.next = NULL;
 453                 plugged = 1;
 454         }
 455         sti();
 456         for (i = 0; i < nr; i++) {
 457                 if (bh[i]) {
 458                         bh[i]->b_req = 1;
 459                         make_request(major, rw, bh[i]);
 460                         if (rw == READ || rw == READA)
 461                                 kstat.pgpgin++;
 462                         else
 463                                 kstat.pgpgout++;
 464                 }
 465         }
 466         if (plugged) {
 467                 cli();
 468                 dev->current_request = plug.next;
 469                 (dev->request_fn)();
 470                 sti();
 471         }
 472         return;
 473 
 474       sorry:
 475         for (i = 0; i < nr; i++) {
 476                 if (bh[i])
 477                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 478         }
 479         return;
 480 }
 481 
 482 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 483 {
 484         int i;
 485         int buffersize;
 486         struct request * req;
 487         unsigned int major = MAJOR(dev);
 488         struct semaphore sem = MUTEX_LOCKED;
 489 
 490         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 491                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 492                 return;
 493         }
 494 
 495         if (rw!=READ && rw!=WRITE) {
 496                 printk("ll_rw_swap: bad block dev command, must be R/W");
 497                 return;
 498         }
 499         if (rw == WRITE && is_read_only(dev)) {
 500                 printk("Can't swap to read-only device 0x%X\n",dev);
 501                 return;
 502         }
 503         
 504         buffersize = PAGE_SIZE / nb;
 505 
 506         for (i=0; i<nb; i++, buf += buffersize)
 507         {
 508                 cli();
 509                 req = get_request_wait(NR_REQUEST, dev);
 510                 sti();
 511                 req->cmd = rw;
 512                 req->errors = 0;
 513                 req->sector = (b[i] * buffersize) >> 9;
 514                 req->nr_sectors = buffersize >> 9;
 515                 req->current_nr_sectors = buffersize >> 9;
 516                 req->buffer = buf;
 517                 req->sem = &sem;
 518                 req->bh = NULL;
 519                 req->next = NULL;
 520                 add_request(major+blk_dev,req);
 521                 down(&sem);
 522         }
 523 }
 524 
 525 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527         struct request * req;
 528 
 529         req = all_requests + NR_REQUEST;
 530         while (--req >= all_requests) {
 531                 req->dev = -1;
 532                 req->next = NULL;
 533         }
 534         memset(ro_bits,0,sizeof(ro_bits));
 535 #ifdef CONFIG_BLK_DEV_HD
 536         mem_start = hd_init(mem_start,mem_end);
 537 #endif
 538 #ifdef CONFIG_BLK_DEV_IDE
 539         mem_start = ide_init(mem_start,mem_end);
 540 #endif
 541 #ifdef CONFIG_BLK_DEV_XD
 542         mem_start = xd_init(mem_start,mem_end);
 543 #endif
 544 #ifdef CONFIG_CDU31A
 545         mem_start = cdu31a_init(mem_start,mem_end);
 546 #endif
 547 #ifdef CONFIG_MCD
 548         mem_start = mcd_init(mem_start,mem_end);
 549 #endif
 550 #ifdef CONFIG_BLK_DEV_FD
 551         floppy_init();
 552 #else
 553         outb_p(0xc, 0x3f2);
 554 #endif
 555 #ifdef CONFIG_SBPCD
 556         mem_start = sbpcd_init(mem_start, mem_end);
 557 #endif CONFIG_SBPCD
 558         if (ramdisk_size)
 559                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 560         return mem_start;
 561 }

/* [previous][next][first][last][top][bottom][index][help] */