root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. get_request_wait
  5. is_read_only
  6. set_device_ro
  7. add_request
  8. make_request
  9. ll_rw_page
  10. ll_rw_block
  11. ll_rw_swap_file
  12. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include "blk.h"
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->dev = -1;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->dev == -1 && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->dev < 0)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->dev = dev;
 167         return req;
 168 }
 169 
 170 /*
 171  * wait until a free request in the first N entries is available.
 172  * NOTE: interrupts must be disabled on the way in, and will still
 173  *       be disabled on the way out.
 174  */
 175 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         register struct request *req;
 178 
 179         while ((req = get_request(n, dev)) == NULL) {
 180                 unplug_device(MAJOR(dev)+blk_dev);
 181                 sleep_on(&wait_for_request);
 182         }
 183         return req;
 184 }
 185 
 186 /* RO fail safe mechanism */
 187 
 188 static long ro_bits[MAX_BLKDEV][8];
 189 
 190 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 191 {
 192         int minor,major;
 193 
 194         major = MAJOR(dev);
 195         minor = MINOR(dev);
 196         if (major < 0 || major >= MAX_BLKDEV) return 0;
 197         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 198 }
 199 
 200 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         int minor,major;
 203 
 204         major = MAJOR(dev);
 205         minor = MINOR(dev);
 206         if (major < 0 || major >= MAX_BLKDEV) return;
 207         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 208         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 209 }
 210 
 211 /*
 212  * add-request adds a request to the linked list.
 213  * It disables interrupts so that it can muck with the
 214  * request-lists in peace.
 215  */
 216 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 217 {
 218         struct request * tmp;
 219         short            disk_index;
 220 
 221         switch (MAJOR(req->dev)) {
 222                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 223                                         if (disk_index < 4)
 224                                                 kstat.dk_drive[disk_index]++;
 225                                         break;
 226                 case HD_MAJOR:
 227                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 228                                         kstat.dk_drive[disk_index]++;
 229                                         break;
 230                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 231                                         kstat.dk_drive[disk_index]++;
 232                 default:                break;
 233         }
 234 
 235         req->next = NULL;
 236         cli();
 237         if (req->bh)
 238                 mark_buffer_clean(req->bh);
 239         if (!(tmp = dev->current_request)) {
 240                 dev->current_request = req;
 241                 (dev->request_fn)();
 242                 sti();
 243                 return;
 244         }
 245         for ( ; tmp->next ; tmp = tmp->next) {
 246                 if ((IN_ORDER(tmp,req) ||
 247                     !IN_ORDER(tmp,tmp->next)) &&
 248                     IN_ORDER(req,tmp->next))
 249                         break;
 250         }
 251         req->next = tmp->next;
 252         tmp->next = req;
 253 
 254 /* for SCSI devices, call request_fn unconditionally */
 255         if (scsi_major(MAJOR(req->dev)))
 256                 (dev->request_fn)();
 257 
 258         sti();
 259 }
 260 
 261 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 262 {
 263         unsigned int sector, count;
 264         struct request * req;
 265         int rw_ahead, max_req;
 266 
 267 /* WRITEA/READA is special case - it is not really needed, so if the */
 268 /* buffer is locked, we just forget about it, else it's a normal read */
 269         rw_ahead = (rw == READA || rw == WRITEA);
 270         if (rw_ahead) {
 271                 if (bh->b_lock)
 272                         return;
 273                 if (rw == READA)
 274                         rw = READ;
 275                 else
 276                         rw = WRITE;
 277         }
 278         if (rw!=READ && rw!=WRITE) {
 279                 printk("Bad block dev command, must be R/W/RA/WA\n");
 280                 return;
 281         }
 282         count = bh->b_size >> 9;
 283         sector = bh->b_blocknr * count;
 284         if (blk_size[major])
 285                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 286                         bh->b_dirt = bh->b_uptodate = 0;
 287                         bh->b_req = 0;
 288                         return;
 289                 }
 290         /* Uhhuh.. Nasty dead-lock possible here.. */
 291         if (bh->b_lock)
 292                 return;
 293         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 294         lock_buffer(bh);
 295         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 296                 unlock_buffer(bh);
 297                 return;
 298         }
 299 
 300 /* we don't allow the write-requests to fill up the queue completely:
 301  * we want some room for reads: they take precedence. The last third
 302  * of the requests are only for reads.
 303  */
 304         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 305 
 306 /* big loop: look for a free request. */
 307 
 308 repeat:
 309         cli();
 310 
 311 /* The scsi disk drivers and the IDE driver completely remove the request
 312  * from the queue when they start processing an entry.  For this reason
 313  * it is safe to continue to add links to the top entry for those devices.
 314  */
 315         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 316              || major == IDE1_MAJOR
 317              || major == FLOPPY_MAJOR
 318              || major == SCSI_DISK_MAJOR
 319              || major == SCSI_CDROM_MAJOR)
 320             && (req = blk_dev[major].current_request))
 321         {
 322 #ifdef CONFIG_BLK_DEV_HD
 323                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 324 #else
 325                 if (major == FLOPPY_MAJOR)
 326 #endif CONFIG_BLK_DEV_HD
 327                         req = req->next;
 328                 while (req) {
 329                         if (req->dev == bh->b_dev &&
 330                             !req->sem &&
 331                             req->cmd == rw &&
 332                             req->sector + req->nr_sectors == sector &&
 333                             req->nr_sectors < 244)
 334                         {
 335                                 req->bhtail->b_reqnext = bh;
 336                                 req->bhtail = bh;
 337                                 req->nr_sectors += count;
 338                                 mark_buffer_clean(bh);
 339                                 sti();
 340                                 return;
 341                         }
 342 
 343                         if (req->dev == bh->b_dev &&
 344                             !req->sem &&
 345                             req->cmd == rw &&
 346                             req->sector - count == sector &&
 347                             req->nr_sectors < 244)
 348                         {
 349                                 req->nr_sectors += count;
 350                                 bh->b_reqnext = req->bh;
 351                                 req->buffer = bh->b_data;
 352                                 req->current_nr_sectors = count;
 353                                 req->sector = sector;
 354                                 mark_buffer_clean(bh);
 355                                 req->bh = bh;
 356                                 sti();
 357                                 return;
 358                         }    
 359 
 360                         req = req->next;
 361                 }
 362         }
 363 
 364 /* find an unused request. */
 365         req = get_request(max_req, bh->b_dev);
 366 
 367 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 368         if (! req) {
 369                 if (rw_ahead) {
 370                         sti();
 371                         unlock_buffer(bh);
 372                         return;
 373                 }
 374                 unplug_device(major+blk_dev);
 375                 sleep_on(&wait_for_request);
 376                 sti();
 377                 goto repeat;
 378         }
 379 
 380 /* we found a request. */
 381         sti();
 382 
 383 /* fill up the request-info, and add it to the queue */
 384         req->cmd = rw;
 385         req->errors = 0;
 386         req->sector = sector;
 387         req->nr_sectors = count;
 388         req->current_nr_sectors = count;
 389         req->buffer = bh->b_data;
 390         req->sem = NULL;
 391         req->bh = bh;
 392         req->bhtail = bh;
 393         req->next = NULL;
 394         add_request(major+blk_dev,req);
 395 }
 396 
 397 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 398 {
 399         struct request * req;
 400         unsigned int major = MAJOR(dev);
 401         struct semaphore sem = MUTEX_LOCKED;
 402 
 403         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 404                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 405                 return;
 406         }
 407         if (rw!=READ && rw!=WRITE)
 408                 panic("Bad block dev command, must be R/W");
 409         if (rw == WRITE && is_read_only(dev)) {
 410                 printk("Can't page to read-only device 0x%X\n",dev);
 411                 return;
 412         }
 413         cli();
 414         req = get_request_wait(NR_REQUEST, dev);
 415         sti();
 416 /* fill up the request-info, and add it to the queue */
 417         req->cmd = rw;
 418         req->errors = 0;
 419         req->sector = page<<3;
 420         req->nr_sectors = 8;
 421         req->current_nr_sectors = 8;
 422         req->buffer = buffer;
 423         req->sem = &sem;
 424         req->bh = NULL;
 425         req->next = NULL;
 426         add_request(major+blk_dev,req);
 427         down(&sem);
 428 }
 429 
 430 /* This function can be used to request a number of buffers from a block
 431    device. Currently the only restriction is that all buffers must belong to
 432    the same device */
 433 
 434 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 435 {
 436         unsigned int major;
 437         struct request plug;
 438         int correct_size;
 439         struct blk_dev_struct * dev;
 440         int i;
 441 
 442         /* Make sure that the first block contains something reasonable */
 443         while (!*bh) {
 444                 bh++;
 445                 if (--nr <= 0)
 446                         return;
 447         };
 448 
 449         dev = NULL;
 450         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 451                 dev = blk_dev + major;
 452         if (!dev || !dev->request_fn) {
 453                 printk(
 454         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 455                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 456                 goto sorry;
 457         }
 458 
 459         /* Determine correct block size for this device.  */
 460         correct_size = BLOCK_SIZE;
 461         if (blksize_size[major]) {
 462                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 463                 if (i)
 464                         correct_size = i;
 465         }
 466 
 467         /* Verify requested block sizes.  */
 468         for (i = 0; i < nr; i++) {
 469                 if (bh[i] && bh[i]->b_size != correct_size) {
 470                         printk(
 471                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 472                                correct_size, bh[i]->b_size);
 473                         goto sorry;
 474                 }
 475         }
 476 
 477         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 478                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 479                 goto sorry;
 480         }
 481 
 482         /* If there are no pending requests for this device, then we insert
 483            a dummy request for that device.  This will prevent the request
 484            from starting until we have shoved all of the blocks into the
 485            queue, and then we let it rip.  */
 486 
 487         if (nr > 1)
 488                 plug_device(dev, &plug);
 489         for (i = 0; i < nr; i++) {
 490                 if (bh[i]) {
 491                         bh[i]->b_req = 1;
 492                         make_request(major, rw, bh[i]);
 493                         if (rw == READ || rw == READA)
 494                                 kstat.pgpgin++;
 495                         else
 496                                 kstat.pgpgout++;
 497                 }
 498         }
 499         unplug_device(dev);
 500         return;
 501 
 502       sorry:
 503         for (i = 0; i < nr; i++) {
 504                 if (bh[i])
 505                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 506         }
 507         return;
 508 }
 509 
 510 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 511 {
 512         int i;
 513         int buffersize;
 514         struct request * req;
 515         unsigned int major = MAJOR(dev);
 516         struct semaphore sem = MUTEX_LOCKED;
 517 
 518         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 519                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 520                 return;
 521         }
 522 
 523         if (rw!=READ && rw!=WRITE) {
 524                 printk("ll_rw_swap: bad block dev command, must be R/W");
 525                 return;
 526         }
 527         if (rw == WRITE && is_read_only(dev)) {
 528                 printk("Can't swap to read-only device 0x%X\n",dev);
 529                 return;
 530         }
 531         
 532         buffersize = PAGE_SIZE / nb;
 533 
 534         for (i=0; i<nb; i++, buf += buffersize)
 535         {
 536                 cli();
 537                 req = get_request_wait(NR_REQUEST, dev);
 538                 sti();
 539                 req->cmd = rw;
 540                 req->errors = 0;
 541                 req->sector = (b[i] * buffersize) >> 9;
 542                 req->nr_sectors = buffersize >> 9;
 543                 req->current_nr_sectors = buffersize >> 9;
 544                 req->buffer = buf;
 545                 req->sem = &sem;
 546                 req->bh = NULL;
 547                 req->next = NULL;
 548                 add_request(major+blk_dev,req);
 549                 down(&sem);
 550         }
 551 }
 552 
 553 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 554 {
 555         struct request * req;
 556 
 557         req = all_requests + NR_REQUEST;
 558         while (--req >= all_requests) {
 559                 req->dev = -1;
 560                 req->next = NULL;
 561         }
 562         memset(ro_bits,0,sizeof(ro_bits));
 563 #ifdef CONFIG_BLK_DEV_HD
 564         mem_start = hd_init(mem_start,mem_end);
 565 #endif
 566 #ifdef CONFIG_BLK_DEV_IDE
 567         mem_start = ide_init(mem_start,mem_end);
 568 #endif
 569 #ifdef CONFIG_BLK_DEV_XD
 570         mem_start = xd_init(mem_start,mem_end);
 571 #endif
 572 #ifdef CONFIG_CDU31A
 573         mem_start = cdu31a_init(mem_start,mem_end);
 574 #endif
 575 #ifdef CONFIG_CDU535
 576         mem_start = sony535_init(mem_start,mem_end);
 577 #endif
 578 #ifdef CONFIG_MCD
 579         mem_start = mcd_init(mem_start,mem_end);
 580 #endif
 581 #ifdef CONFIG_AZTCD
 582         mem_start = aztcd_init(mem_start,mem_end);
 583 #endif
 584 #ifdef CONFIG_BLK_DEV_FD
 585         floppy_init();
 586 #else
 587         outb_p(0xc, 0x3f2);
 588 #endif
 589 #ifdef CONFIG_SBPCD
 590         mem_start = sbpcd_init(mem_start, mem_end);
 591 #endif CONFIG_SBPCD
 592         if (ramdisk_size)
 593                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 594         return mem_start;
 595 }

/* [previous][next][first][last][top][bottom][index][help] */