root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. add_request
  9. make_request
  10. ll_rw_page
  11. ll_rw_block
  12. ll_rw_swap_file
  13. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include "blk.h"
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->dev = -1;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->dev == -1 && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->dev < 0)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->dev = dev;
 167         return req;
 168 }
 169 
 170 /*
 171  * wait until a free request in the first N entries is available.
 172  */
 173 static struct request * __get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         register struct request *req;
 176         struct wait_queue wait = { current, NULL };
 177 
 178         add_wait_queue(&wait_for_request, &wait);
 179         for (;;) {
 180                 unplug_device(MAJOR(dev)+blk_dev);
 181                 current->state = TASK_UNINTERRUPTIBLE;
 182                 cli();
 183                 req = get_request(n, dev);
 184                 sti();
 185                 if (req)
 186                         break;
 187                 schedule();
 188         }
 189         remove_wait_queue(&wait_for_request, &wait);
 190         current->state = TASK_RUNNING;
 191         return req;
 192 }
 193 
 194 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         register struct request *req;
 197 
 198         cli();
 199         req = get_request(n, dev);
 200         sti();
 201         if (req)
 202                 return req;
 203         return __get_request_wait(n, dev);
 204 }
 205 
 206 /* RO fail safe mechanism */
 207 
 208 static long ro_bits[MAX_BLKDEV][8];
 209 
 210 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         int minor,major;
 213 
 214         major = MAJOR(dev);
 215         minor = MINOR(dev);
 216         if (major < 0 || major >= MAX_BLKDEV) return 0;
 217         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 218 }
 219 
 220 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         int minor,major;
 223 
 224         major = MAJOR(dev);
 225         minor = MINOR(dev);
 226         if (major < 0 || major >= MAX_BLKDEV) return;
 227         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 228         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 229 }
 230 
 231 /*
 232  * add-request adds a request to the linked list.
 233  * It disables interrupts so that it can muck with the
 234  * request-lists in peace.
 235  */
 236 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct request * tmp;
 239         short            disk_index;
 240 
 241         switch (MAJOR(req->dev)) {
 242                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 243                                         if (disk_index < 4)
 244                                                 kstat.dk_drive[disk_index]++;
 245                                         break;
 246                 case HD_MAJOR:
 247                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 248                                         kstat.dk_drive[disk_index]++;
 249                                         break;
 250                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 251                                         kstat.dk_drive[disk_index]++;
 252                 default:                break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 (dev->request_fn)();
 262                 sti();
 263                 return;
 264         }
 265         for ( ; tmp->next ; tmp = tmp->next) {
 266                 if ((IN_ORDER(tmp,req) ||
 267                     !IN_ORDER(tmp,tmp->next)) &&
 268                     IN_ORDER(req,tmp->next))
 269                         break;
 270         }
 271         req->next = tmp->next;
 272         tmp->next = req;
 273 
 274 /* for SCSI devices, call request_fn unconditionally */
 275         if (scsi_major(MAJOR(req->dev)))
 276                 (dev->request_fn)();
 277 
 278         sti();
 279 }
 280 
 281 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         unsigned int sector, count;
 284         struct request * req;
 285         int rw_ahead, max_req;
 286 
 287 /* WRITEA/READA is special case - it is not really needed, so if the */
 288 /* buffer is locked, we just forget about it, else it's a normal read */
 289         rw_ahead = (rw == READA || rw == WRITEA);
 290         if (rw_ahead) {
 291                 if (bh->b_lock)
 292                         return;
 293                 if (rw == READA)
 294                         rw = READ;
 295                 else
 296                         rw = WRITE;
 297         }
 298         if (rw!=READ && rw!=WRITE) {
 299                 printk("Bad block dev command, must be R/W/RA/WA\n");
 300                 return;
 301         }
 302         count = bh->b_size >> 9;
 303         sector = bh->b_blocknr * count;
 304         if (blk_size[major])
 305                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 306                         bh->b_dirt = bh->b_uptodate = 0;
 307                         bh->b_req = 0;
 308                         return;
 309                 }
 310         /* Uhhuh.. Nasty dead-lock possible here.. */
 311         if (bh->b_lock)
 312                 return;
 313         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 314         lock_buffer(bh);
 315         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 316                 unlock_buffer(bh);
 317                 return;
 318         }
 319 
 320 /* we don't allow the write-requests to fill up the queue completely:
 321  * we want some room for reads: they take precedence. The last third
 322  * of the requests are only for reads.
 323  */
 324         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 325 
 326 /* look for a free request. */
 327         cli();
 328 
 329 /* The scsi disk drivers and the IDE driver completely remove the request
 330  * from the queue when they start processing an entry.  For this reason
 331  * it is safe to continue to add links to the top entry for those devices.
 332  */
 333         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 334              || major == IDE1_MAJOR
 335              || major == FLOPPY_MAJOR
 336              || major == SCSI_DISK_MAJOR
 337              || major == SCSI_CDROM_MAJOR)
 338             && (req = blk_dev[major].current_request))
 339         {
 340 #ifdef CONFIG_BLK_DEV_HD
 341                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 342 #else
 343                 if (major == FLOPPY_MAJOR)
 344 #endif CONFIG_BLK_DEV_HD
 345                         req = req->next;
 346                 while (req) {
 347                         if (req->dev == bh->b_dev &&
 348                             !req->sem &&
 349                             req->cmd == rw &&
 350                             req->sector + req->nr_sectors == sector &&
 351                             req->nr_sectors < 244)
 352                         {
 353                                 req->bhtail->b_reqnext = bh;
 354                                 req->bhtail = bh;
 355                                 req->nr_sectors += count;
 356                                 mark_buffer_clean(bh);
 357                                 sti();
 358                                 return;
 359                         }
 360 
 361                         if (req->dev == bh->b_dev &&
 362                             !req->sem &&
 363                             req->cmd == rw &&
 364                             req->sector - count == sector &&
 365                             req->nr_sectors < 244)
 366                         {
 367                                 req->nr_sectors += count;
 368                                 bh->b_reqnext = req->bh;
 369                                 req->buffer = bh->b_data;
 370                                 req->current_nr_sectors = count;
 371                                 req->sector = sector;
 372                                 mark_buffer_clean(bh);
 373                                 req->bh = bh;
 374                                 sti();
 375                                 return;
 376                         }    
 377 
 378                         req = req->next;
 379                 }
 380         }
 381 
 382 /* find an unused request. */
 383         req = get_request(max_req, bh->b_dev);
 384         sti();
 385 
 386 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 387         if (!req) {
 388                 if (rw_ahead) {
 389                         unlock_buffer(bh);
 390                         return;
 391                 }
 392                 req = __get_request_wait(max_req, bh->b_dev);
 393         }
 394 
 395 /* fill up the request-info, and add it to the queue */
 396         req->cmd = rw;
 397         req->errors = 0;
 398         req->sector = sector;
 399         req->nr_sectors = count;
 400         req->current_nr_sectors = count;
 401         req->buffer = bh->b_data;
 402         req->sem = NULL;
 403         req->bh = bh;
 404         req->bhtail = bh;
 405         req->next = NULL;
 406         add_request(major+blk_dev,req);
 407 }
 408 
 409 void ll_rw_page(int rw, int dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 410 {
 411         struct request * req;
 412         unsigned int major = MAJOR(dev);
 413         unsigned long sector = page * (PAGE_SIZE / 512);
 414         struct semaphore sem = MUTEX_LOCKED;
 415 
 416         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 417                 printk("Trying to read nonexistent block-device %04x (%ld)\n",dev,sector);
 418                 return;
 419         }
 420         if (rw!=READ && rw!=WRITE)
 421                 panic("Bad block dev command, must be R/W");
 422         if (rw == WRITE && is_read_only(dev)) {
 423                 printk("Can't page to read-only device 0x%X\n",dev);
 424                 return;
 425         }
 426         req = get_request_wait(NR_REQUEST, dev);
 427 /* fill up the request-info, and add it to the queue */
 428         req->cmd = rw;
 429         req->errors = 0;
 430         req->sector = sector;
 431         req->nr_sectors = PAGE_SIZE / 512;
 432         req->current_nr_sectors = PAGE_SIZE / 512;
 433         req->buffer = buffer;
 434         req->sem = &sem;
 435         req->bh = NULL;
 436         req->next = NULL;
 437         add_request(major+blk_dev,req);
 438         down(&sem);
 439 }
 440 
 441 /* This function can be used to request a number of buffers from a block
 442    device. Currently the only restriction is that all buffers must belong to
 443    the same device */
 444 
 445 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 446 {
 447         unsigned int major;
 448         struct request plug;
 449         int correct_size;
 450         struct blk_dev_struct * dev;
 451         int i;
 452 
 453         /* Make sure that the first block contains something reasonable */
 454         while (!*bh) {
 455                 bh++;
 456                 if (--nr <= 0)
 457                         return;
 458         };
 459 
 460         dev = NULL;
 461         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 462                 dev = blk_dev + major;
 463         if (!dev || !dev->request_fn) {
 464                 printk(
 465         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 466                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 467                 goto sorry;
 468         }
 469 
 470         /* Determine correct block size for this device.  */
 471         correct_size = BLOCK_SIZE;
 472         if (blksize_size[major]) {
 473                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 474                 if (i)
 475                         correct_size = i;
 476         }
 477 
 478         /* Verify requested block sizes.  */
 479         for (i = 0; i < nr; i++) {
 480                 if (bh[i] && bh[i]->b_size != correct_size) {
 481                         printk(
 482                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 483                                correct_size, bh[i]->b_size);
 484                         goto sorry;
 485                 }
 486         }
 487 
 488         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 489                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 490                 goto sorry;
 491         }
 492 
 493         /* If there are no pending requests for this device, then we insert
 494            a dummy request for that device.  This will prevent the request
 495            from starting until we have shoved all of the blocks into the
 496            queue, and then we let it rip.  */
 497 
 498         if (nr > 1)
 499                 plug_device(dev, &plug);
 500         for (i = 0; i < nr; i++) {
 501                 if (bh[i]) {
 502                         bh[i]->b_req = 1;
 503                         make_request(major, rw, bh[i]);
 504                         if (rw == READ || rw == READA)
 505                                 kstat.pgpgin++;
 506                         else
 507                                 kstat.pgpgout++;
 508                 }
 509         }
 510         unplug_device(dev);
 511         return;
 512 
 513       sorry:
 514         for (i = 0; i < nr; i++) {
 515                 if (bh[i])
 516                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 517         }
 518         return;
 519 }
 520 
 521 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 522 {
 523         int i, j;
 524         int buffersize;
 525         struct request * req[8];
 526         unsigned int major = MAJOR(dev);
 527         struct semaphore sem = MUTEX_LOCKED;
 528 
 529         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 530                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 531                 return;
 532         }
 533 
 534         if (rw!=READ && rw!=WRITE) {
 535                 printk("ll_rw_swap: bad block dev command, must be R/W");
 536                 return;
 537         }
 538         if (rw == WRITE && is_read_only(dev)) {
 539                 printk("Can't swap to read-only device 0x%X\n",dev);
 540                 return;
 541         }
 542         
 543         buffersize = PAGE_SIZE / nb;
 544 
 545         for (j=0, i=0; i<nb;)
 546         {
 547                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 548                 {
 549                         if (j == 0) {
 550                                 req[j] = get_request_wait(NR_REQUEST, dev);
 551                         } else {
 552                                 cli();
 553                                 req[j] = get_request(NR_REQUEST, dev);
 554                                 sti();
 555                                 if (req[j] == NULL)
 556                                         break;
 557                         }
 558                         req[j]->cmd = rw;
 559                         req[j]->errors = 0;
 560                         req[j]->sector = (b[i] * buffersize) >> 9;
 561                         req[j]->nr_sectors = buffersize >> 9;
 562                         req[j]->current_nr_sectors = buffersize >> 9;
 563                         req[j]->buffer = buf;
 564                         req[j]->sem = &sem;
 565                         req[j]->bh = NULL;
 566                         req[j]->next = NULL;
 567                         add_request(major+blk_dev,req[j]);
 568                 }
 569                 while (j > 0) {
 570                         j--;
 571                         down(&sem);
 572                 }
 573         }
 574 }
 575 
 576 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 577 {
 578         struct request * req;
 579 
 580         req = all_requests + NR_REQUEST;
 581         while (--req >= all_requests) {
 582                 req->dev = -1;
 583                 req->next = NULL;
 584         }
 585         memset(ro_bits,0,sizeof(ro_bits));
 586 #ifdef CONFIG_BLK_DEV_HD
 587         mem_start = hd_init(mem_start,mem_end);
 588 #endif
 589 #ifdef CONFIG_BLK_DEV_IDE
 590         mem_start = ide_init(mem_start,mem_end);
 591 #endif
 592 #ifdef CONFIG_BLK_DEV_XD
 593         mem_start = xd_init(mem_start,mem_end);
 594 #endif
 595 #ifdef CONFIG_BLK_DEV_FD
 596         floppy_init();
 597 #else
 598         outb_p(0xc, 0x3f2);
 599 #endif
 600 #ifdef CONFIG_CDU31A
 601         mem_start = cdu31a_init(mem_start,mem_end);
 602 #endif CONFIG_CDU31A
 603 #ifdef CONFIG_MCD
 604         mem_start = mcd_init(mem_start,mem_end);
 605 #endif CONFIG_MCD
 606 #ifdef CONFIG_MCDX
 607         mem_start = mcdx_init(mem_start,mem_end);
 608 #endif CONFIG_MCDX
 609 #ifdef CONFIG_SBPCD
 610         mem_start = sbpcd_init(mem_start, mem_end);
 611 #endif CONFIG_SBPCD
 612 #ifdef CONFIG_AZTCD
 613         mem_start = aztcd_init(mem_start,mem_end);
 614 #endif CONFIG_AZTCD
 615 #ifdef CONFIG_CDU535
 616         mem_start = sony535_init(mem_start,mem_end);
 617 #endif CONFIG_CDU535
 618 #ifdef CONFIG_GSCD
 619         mem_start = gscd_init(mem_start, mem_end);
 620 #endif CONFIG_GSCD
 621 #ifdef CONFIG_CM206
 622         mem_start = cm206_init(mem_start, mem_end);
 623 #endif
 624 #ifdef CONFIG_OPTCD
 625         mem_start = optcd_init(mem_start,mem_end);
 626 #endif CONFIG_OPTCD
 627 #ifdef CONFIG_SJCD
 628         mem_start = sjcd_init(mem_start,mem_end);
 629 #endif CONFIG_SJCD
 630         if (ramdisk_size)
 631                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 632         return mem_start;
 633 }

/* [previous][next][first][last][top][bottom][index][help] */