root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. add_request
  9. make_request
  10. ll_rw_page
  11. ll_rw_block
  12. ll_rw_swap_file
  13. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include "blk.h"
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->dev = -1;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->dev == -1 && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->dev < 0)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->dev = dev;
 167         return req;
 168 }
 169 
 170 /*
 171  * wait until a free request in the first N entries is available.
 172  */
 173 static struct request * __get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         register struct request *req;
 176         struct wait_queue wait = { current, NULL };
 177 
 178         add_wait_queue(&wait_for_request, &wait);
 179         for (;;) {
 180                 unplug_device(MAJOR(dev)+blk_dev);
 181                 current->state = TASK_UNINTERRUPTIBLE;
 182                 cli();
 183                 req = get_request(n, dev);
 184                 sti();
 185                 if (req)
 186                         break;
 187                 schedule();
 188         }
 189         remove_wait_queue(&wait_for_request, &wait);
 190         current->state = TASK_RUNNING;
 191         return req;
 192 }
 193 
 194 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         register struct request *req;
 197 
 198         cli();
 199         req = get_request(n, dev);
 200         sti();
 201         if (req)
 202                 return req;
 203         return __get_request_wait(n, dev);
 204 }
 205 
 206 /* RO fail safe mechanism */
 207 
 208 static long ro_bits[MAX_BLKDEV][8];
 209 
 210 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         int minor,major;
 213 
 214         major = MAJOR(dev);
 215         minor = MINOR(dev);
 216         if (major < 0 || major >= MAX_BLKDEV) return 0;
 217         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 218 }
 219 
 220 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         int minor,major;
 223 
 224         major = MAJOR(dev);
 225         minor = MINOR(dev);
 226         if (major < 0 || major >= MAX_BLKDEV) return;
 227         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 228         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 229 }
 230 
 231 /*
 232  * add-request adds a request to the linked list.
 233  * It disables interrupts so that it can muck with the
 234  * request-lists in peace.
 235  */
 236 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct request * tmp;
 239         short            disk_index;
 240 
 241         switch (MAJOR(req->dev)) {
 242                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 243                                         if (disk_index < 4)
 244                                                 kstat.dk_drive[disk_index]++;
 245                                         break;
 246                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 247                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 248                                         kstat.dk_drive[disk_index]++;
 249                                         break;
 250                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 251                                         kstat.dk_drive[disk_index]++;
 252                 default:                break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 (dev->request_fn)();
 262                 sti();
 263                 return;
 264         }
 265         for ( ; tmp->next ; tmp = tmp->next) {
 266                 if ((IN_ORDER(tmp,req) ||
 267                     !IN_ORDER(tmp,tmp->next)) &&
 268                     IN_ORDER(req,tmp->next))
 269                         break;
 270         }
 271         req->next = tmp->next;
 272         tmp->next = req;
 273 
 274 /* for SCSI devices, call request_fn unconditionally */
 275         if (scsi_major(MAJOR(req->dev)))
 276                 (dev->request_fn)();
 277 
 278         sti();
 279 }
 280 
 281 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         unsigned int sector, count;
 284         struct request * req;
 285         int rw_ahead, max_req;
 286 
 287 /* WRITEA/READA is special case - it is not really needed, so if the */
 288 /* buffer is locked, we just forget about it, else it's a normal read */
 289         rw_ahead = (rw == READA || rw == WRITEA);
 290         if (rw_ahead) {
 291                 if (bh->b_lock)
 292                         return;
 293                 if (rw == READA)
 294                         rw = READ;
 295                 else
 296                         rw = WRITE;
 297         }
 298         if (rw!=READ && rw!=WRITE) {
 299                 printk("Bad block dev command, must be R/W/RA/WA\n");
 300                 return;
 301         }
 302         count = bh->b_size >> 9;
 303         sector = bh->b_blocknr * count;
 304         if (blk_size[major])
 305                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 306                         bh->b_dirt = bh->b_uptodate = 0;
 307                         bh->b_req = 0;
 308                         printk("attempt to access beyond end of device\n");
 309                         return;
 310                 }
 311         /* Uhhuh.. Nasty dead-lock possible here.. */
 312         if (bh->b_lock)
 313                 return;
 314         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 315         lock_buffer(bh);
 316         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 317                 unlock_buffer(bh);
 318                 return;
 319         }
 320 
 321 /* we don't allow the write-requests to fill up the queue completely:
 322  * we want some room for reads: they take precedence. The last third
 323  * of the requests are only for reads.
 324  */
 325         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 326 
 327 /* look for a free request. */
 328         cli();
 329 
 330 /* The scsi disk drivers and the IDE driver completely remove the request
 331  * from the queue when they start processing an entry.  For this reason
 332  * it is safe to continue to add links to the top entry for those devices.
 333  */
 334         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 335              || major == IDE1_MAJOR
 336              || major == FLOPPY_MAJOR
 337              || major == SCSI_DISK_MAJOR
 338              || major == SCSI_CDROM_MAJOR
 339              || major == IDE2_MAJOR
 340              || major == IDE3_MAJOR)
 341             && (req = blk_dev[major].current_request))
 342         {
 343 #ifdef CONFIG_BLK_DEV_HD
 344                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 345 #else
 346                 if (major == FLOPPY_MAJOR)
 347 #endif CONFIG_BLK_DEV_HD
 348                         req = req->next;
 349                 while (req) {
 350                         if (req->dev == bh->b_dev &&
 351                             !req->sem &&
 352                             req->cmd == rw &&
 353                             req->sector + req->nr_sectors == sector &&
 354                             req->nr_sectors < 244)
 355                         {
 356                                 req->bhtail->b_reqnext = bh;
 357                                 req->bhtail = bh;
 358                                 req->nr_sectors += count;
 359                                 mark_buffer_clean(bh);
 360                                 sti();
 361                                 return;
 362                         }
 363 
 364                         if (req->dev == bh->b_dev &&
 365                             !req->sem &&
 366                             req->cmd == rw &&
 367                             req->sector - count == sector &&
 368                             req->nr_sectors < 244)
 369                         {
 370                                 req->nr_sectors += count;
 371                                 bh->b_reqnext = req->bh;
 372                                 req->buffer = bh->b_data;
 373                                 req->current_nr_sectors = count;
 374                                 req->sector = sector;
 375                                 mark_buffer_clean(bh);
 376                                 req->bh = bh;
 377                                 sti();
 378                                 return;
 379                         }    
 380 
 381                         req = req->next;
 382                 }
 383         }
 384 
 385 /* find an unused request. */
 386         req = get_request(max_req, bh->b_dev);
 387         sti();
 388 
 389 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 390         if (!req) {
 391                 if (rw_ahead) {
 392                         unlock_buffer(bh);
 393                         return;
 394                 }
 395                 req = __get_request_wait(max_req, bh->b_dev);
 396         }
 397 
 398 /* fill up the request-info, and add it to the queue */
 399         req->cmd = rw;
 400         req->errors = 0;
 401         req->sector = sector;
 402         req->nr_sectors = count;
 403         req->current_nr_sectors = count;
 404         req->buffer = bh->b_data;
 405         req->sem = NULL;
 406         req->bh = bh;
 407         req->bhtail = bh;
 408         req->next = NULL;
 409         add_request(major+blk_dev,req);
 410 }
 411 
 412 void ll_rw_page(int rw, int dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 413 {
 414         struct request * req;
 415         unsigned int major = MAJOR(dev);
 416         unsigned long sector = page * (PAGE_SIZE / 512);
 417         struct semaphore sem = MUTEX_LOCKED;
 418 
 419         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 420                 printk("Trying to read nonexistent block-device %04x (%ld)\n",dev,sector);
 421                 return;
 422         }
 423         if (rw!=READ && rw!=WRITE)
 424                 panic("Bad block dev command, must be R/W");
 425         if (rw == WRITE && is_read_only(dev)) {
 426                 printk("Can't page to read-only device 0x%X\n",dev);
 427                 return;
 428         }
 429         req = get_request_wait(NR_REQUEST, dev);
 430 /* fill up the request-info, and add it to the queue */
 431         req->cmd = rw;
 432         req->errors = 0;
 433         req->sector = sector;
 434         req->nr_sectors = PAGE_SIZE / 512;
 435         req->current_nr_sectors = PAGE_SIZE / 512;
 436         req->buffer = buffer;
 437         req->sem = &sem;
 438         req->bh = NULL;
 439         req->next = NULL;
 440         add_request(major+blk_dev,req);
 441         down(&sem);
 442 }
 443 
 444 /* This function can be used to request a number of buffers from a block
 445    device. Currently the only restriction is that all buffers must belong to
 446    the same device */
 447 
 448 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         unsigned int major;
 451         struct request plug;
 452         int correct_size;
 453         struct blk_dev_struct * dev;
 454         int i;
 455 
 456         /* Make sure that the first block contains something reasonable */
 457         while (!*bh) {
 458                 bh++;
 459                 if (--nr <= 0)
 460                         return;
 461         };
 462 
 463         dev = NULL;
 464         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 465                 dev = blk_dev + major;
 466         if (!dev || !dev->request_fn) {
 467                 printk(
 468         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 469                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 470                 goto sorry;
 471         }
 472 
 473         /* Determine correct block size for this device.  */
 474         correct_size = BLOCK_SIZE;
 475         if (blksize_size[major]) {
 476                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 477                 if (i)
 478                         correct_size = i;
 479         }
 480 
 481         /* Verify requested block sizes.  */
 482         for (i = 0; i < nr; i++) {
 483                 if (bh[i] && bh[i]->b_size != correct_size) {
 484                         printk(
 485                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 486                                correct_size, bh[i]->b_size);
 487                         goto sorry;
 488                 }
 489         }
 490 
 491         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 492                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 493                 goto sorry;
 494         }
 495 
 496         /* If there are no pending requests for this device, then we insert
 497            a dummy request for that device.  This will prevent the request
 498            from starting until we have shoved all of the blocks into the
 499            queue, and then we let it rip.  */
 500 
 501         if (nr > 1)
 502                 plug_device(dev, &plug);
 503         for (i = 0; i < nr; i++) {
 504                 if (bh[i]) {
 505                         bh[i]->b_req = 1;
 506                         make_request(major, rw, bh[i]);
 507                         if (rw == READ || rw == READA)
 508                                 kstat.pgpgin++;
 509                         else
 510                                 kstat.pgpgout++;
 511                 }
 512         }
 513         unplug_device(dev);
 514         return;
 515 
 516       sorry:
 517         for (i = 0; i < nr; i++) {
 518                 if (bh[i])
 519                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 520         }
 521         return;
 522 }
 523 
 524 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 525 {
 526         int i, j;
 527         int buffersize;
 528         struct request * req[8];
 529         unsigned int major = MAJOR(dev);
 530         struct semaphore sem = MUTEX_LOCKED;
 531 
 532         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 533                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 534                 return;
 535         }
 536 
 537         if (rw!=READ && rw!=WRITE) {
 538                 printk("ll_rw_swap: bad block dev command, must be R/W");
 539                 return;
 540         }
 541         if (rw == WRITE && is_read_only(dev)) {
 542                 printk("Can't swap to read-only device 0x%X\n",dev);
 543                 return;
 544         }
 545         
 546         buffersize = PAGE_SIZE / nb;
 547 
 548         for (j=0, i=0; i<nb;)
 549         {
 550                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 551                 {
 552                         if (j == 0) {
 553                                 req[j] = get_request_wait(NR_REQUEST, dev);
 554                         } else {
 555                                 cli();
 556                                 req[j] = get_request(NR_REQUEST, dev);
 557                                 sti();
 558                                 if (req[j] == NULL)
 559                                         break;
 560                         }
 561                         req[j]->cmd = rw;
 562                         req[j]->errors = 0;
 563                         req[j]->sector = (b[i] * buffersize) >> 9;
 564                         req[j]->nr_sectors = buffersize >> 9;
 565                         req[j]->current_nr_sectors = buffersize >> 9;
 566                         req[j]->buffer = buf;
 567                         req[j]->sem = &sem;
 568                         req[j]->bh = NULL;
 569                         req[j]->next = NULL;
 570                         add_request(major+blk_dev,req[j]);
 571                 }
 572                 while (j > 0) {
 573                         j--;
 574                         down(&sem);
 575                 }
 576         }
 577 }
 578 
 579 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 580 {
 581         struct request * req;
 582 
 583         req = all_requests + NR_REQUEST;
 584         while (--req >= all_requests) {
 585                 req->dev = -1;
 586                 req->next = NULL;
 587         }
 588         memset(ro_bits,0,sizeof(ro_bits));
 589 #ifdef CONFIG_BLK_DEV_IDE
 590         mem_start = ide_init(mem_start,mem_end); /* this MUST preceed hd_init */
 591 #endif
 592 #ifdef CONFIG_BLK_DEV_HD
 593         mem_start = hd_init(mem_start,mem_end);
 594 #endif
 595 #ifdef CONFIG_BLK_DEV_XD
 596         mem_start = xd_init(mem_start,mem_end);
 597 #endif
 598 #ifdef CONFIG_BLK_DEV_FD
 599         floppy_init();
 600 #else
 601         outb_p(0xc, 0x3f2);
 602 #endif
 603 #ifdef CONFIG_CDU31A
 604         mem_start = cdu31a_init(mem_start,mem_end);
 605 #endif CONFIG_CDU31A
 606 #ifdef CONFIG_MCD
 607         mem_start = mcd_init(mem_start,mem_end);
 608 #endif CONFIG_MCD
 609 #ifdef CONFIG_MCDX
 610         mem_start = mcdx_init(mem_start,mem_end);
 611 #endif CONFIG_MCDX
 612 #ifdef CONFIG_SBPCD
 613         mem_start = sbpcd_init(mem_start, mem_end);
 614 #endif CONFIG_SBPCD
 615 #ifdef CONFIG_AZTCD
 616         mem_start = aztcd_init(mem_start,mem_end);
 617 #endif CONFIG_AZTCD
 618 #ifdef CONFIG_CDU535
 619         mem_start = sony535_init(mem_start,mem_end);
 620 #endif CONFIG_CDU535
 621 #ifdef CONFIG_GSCD
 622         mem_start = gscd_init(mem_start, mem_end);
 623 #endif CONFIG_GSCD
 624 #ifdef CONFIG_CM206
 625         mem_start = cm206_init(mem_start, mem_end);
 626 #endif
 627 #ifdef CONFIG_OPTCD
 628         mem_start = optcd_init(mem_start,mem_end);
 629 #endif CONFIG_OPTCD
 630 #ifdef CONFIG_SJCD
 631         mem_start = sjcd_init(mem_start,mem_end);
 632 #endif CONFIG_SJCD
 633         if (ramdisk_size)
 634                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 635         return mem_start;
 636 }

/* [previous][next][first][last][top][bottom][index][help] */