root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. add_request
  9. make_request
  10. ll_rw_page
  11. ll_rw_block
  12. ll_rw_swap_file
  13. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include "blk.h"
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->dev = -1;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->dev == -1 && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->dev < 0)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->dev = dev;
 167         return req;
 168 }
 169 
 170 /*
 171  * wait until a free request in the first N entries is available.
 172  */
 173 static struct request * __get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         register struct request *req;
 176         struct wait_queue wait = { current, NULL };
 177 
 178         add_wait_queue(&wait_for_request, &wait);
 179         for (;;) {
 180                 unplug_device(MAJOR(dev)+blk_dev);
 181                 current->state = TASK_UNINTERRUPTIBLE;
 182                 cli();
 183                 req = get_request(n, dev);
 184                 sti();
 185                 if (req)
 186                         break;
 187                 schedule();
 188         }
 189         remove_wait_queue(&wait_for_request, &wait);
 190         current->state = TASK_RUNNING;
 191         return req;
 192 }
 193 
 194 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         register struct request *req;
 197 
 198         cli();
 199         req = get_request(n, dev);
 200         sti();
 201         if (req)
 202                 return req;
 203         return __get_request_wait(n, dev);
 204 }
 205 
 206 /* RO fail safe mechanism */
 207 
 208 static long ro_bits[MAX_BLKDEV][8];
 209 
 210 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         int minor,major;
 213 
 214         major = MAJOR(dev);
 215         minor = MINOR(dev);
 216         if (major < 0 || major >= MAX_BLKDEV) return 0;
 217         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 218 }
 219 
 220 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         int minor,major;
 223 
 224         major = MAJOR(dev);
 225         minor = MINOR(dev);
 226         if (major < 0 || major >= MAX_BLKDEV) return;
 227         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 228         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 229 }
 230 
 231 /*
 232  * add-request adds a request to the linked list.
 233  * It disables interrupts so that it can muck with the
 234  * request-lists in peace.
 235  */
 236 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct request * tmp;
 239         short            disk_index;
 240 
 241         switch (MAJOR(req->dev)) {
 242                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 243                                         if (disk_index < 4)
 244                                                 kstat.dk_drive[disk_index]++;
 245                                         break;
 246                 case HD_MAJOR:
 247                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x0040) >> 6;
 248                                         kstat.dk_drive[disk_index]++;
 249                                         break;
 250                 case IDE1_MAJOR:        disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
 251                                         kstat.dk_drive[disk_index]++;
 252                 default:                break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 (dev->request_fn)();
 262                 sti();
 263                 return;
 264         }
 265         for ( ; tmp->next ; tmp = tmp->next) {
 266                 if ((IN_ORDER(tmp,req) ||
 267                     !IN_ORDER(tmp,tmp->next)) &&
 268                     IN_ORDER(req,tmp->next))
 269                         break;
 270         }
 271         req->next = tmp->next;
 272         tmp->next = req;
 273 
 274 /* for SCSI devices, call request_fn unconditionally */
 275         if (scsi_major(MAJOR(req->dev)))
 276                 (dev->request_fn)();
 277 
 278         sti();
 279 }
 280 
 281 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         unsigned int sector, count;
 284         struct request * req;
 285         int rw_ahead, max_req;
 286 
 287 /* WRITEA/READA is special case - it is not really needed, so if the */
 288 /* buffer is locked, we just forget about it, else it's a normal read */
 289         rw_ahead = (rw == READA || rw == WRITEA);
 290         if (rw_ahead) {
 291                 if (bh->b_lock)
 292                         return;
 293                 if (rw == READA)
 294                         rw = READ;
 295                 else
 296                         rw = WRITE;
 297         }
 298         if (rw!=READ && rw!=WRITE) {
 299                 printk("Bad block dev command, must be R/W/RA/WA\n");
 300                 return;
 301         }
 302         count = bh->b_size >> 9;
 303         sector = bh->b_blocknr * count;
 304         if (blk_size[major])
 305                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 306                         bh->b_dirt = bh->b_uptodate = 0;
 307                         bh->b_req = 0;
 308                         return;
 309                 }
 310         /* Uhhuh.. Nasty dead-lock possible here.. */
 311         if (bh->b_lock)
 312                 return;
 313         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 314         lock_buffer(bh);
 315         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 316                 unlock_buffer(bh);
 317                 return;
 318         }
 319 
 320 /* we don't allow the write-requests to fill up the queue completely:
 321  * we want some room for reads: they take precedence. The last third
 322  * of the requests are only for reads.
 323  */
 324         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 325 
 326 /* look for a free request. */
 327         cli();
 328 
 329 /* The scsi disk drivers and the IDE driver completely remove the request
 330  * from the queue when they start processing an entry.  For this reason
 331  * it is safe to continue to add links to the top entry for those devices.
 332  */
 333         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 334              || major == IDE1_MAJOR
 335              || major == FLOPPY_MAJOR
 336              || major == SCSI_DISK_MAJOR
 337              || major == SCSI_CDROM_MAJOR)
 338             && (req = blk_dev[major].current_request))
 339         {
 340 #ifdef CONFIG_BLK_DEV_HD
 341                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 342 #else
 343                 if (major == FLOPPY_MAJOR)
 344 #endif CONFIG_BLK_DEV_HD
 345                         req = req->next;
 346                 while (req) {
 347                         if (req->dev == bh->b_dev &&
 348                             !req->sem &&
 349                             req->cmd == rw &&
 350                             req->sector + req->nr_sectors == sector &&
 351                             req->nr_sectors < 244)
 352                         {
 353                                 req->bhtail->b_reqnext = bh;
 354                                 req->bhtail = bh;
 355                                 req->nr_sectors += count;
 356                                 mark_buffer_clean(bh);
 357                                 sti();
 358                                 return;
 359                         }
 360 
 361                         if (req->dev == bh->b_dev &&
 362                             !req->sem &&
 363                             req->cmd == rw &&
 364                             req->sector - count == sector &&
 365                             req->nr_sectors < 244)
 366                         {
 367                                 req->nr_sectors += count;
 368                                 bh->b_reqnext = req->bh;
 369                                 req->buffer = bh->b_data;
 370                                 req->current_nr_sectors = count;
 371                                 req->sector = sector;
 372                                 mark_buffer_clean(bh);
 373                                 req->bh = bh;
 374                                 sti();
 375                                 return;
 376                         }    
 377 
 378                         req = req->next;
 379                 }
 380         }
 381 
 382 /* find an unused request. */
 383         req = get_request(max_req, bh->b_dev);
 384         sti();
 385 
 386 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 387         if (!req) {
 388                 if (rw_ahead) {
 389                         unlock_buffer(bh);
 390                         return;
 391                 }
 392                 req = __get_request_wait(max_req, bh->b_dev);
 393         }
 394 
 395 /* fill up the request-info, and add it to the queue */
 396         req->cmd = rw;
 397         req->errors = 0;
 398         req->sector = sector;
 399         req->nr_sectors = count;
 400         req->current_nr_sectors = count;
 401         req->buffer = bh->b_data;
 402         req->sem = NULL;
 403         req->bh = bh;
 404         req->bhtail = bh;
 405         req->next = NULL;
 406         add_request(major+blk_dev,req);
 407 }
 408 
 409 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 410 {
 411         struct request * req;
 412         unsigned int major = MAJOR(dev);
 413         struct semaphore sem = MUTEX_LOCKED;
 414 
 415         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 416                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 417                 return;
 418         }
 419         if (rw!=READ && rw!=WRITE)
 420                 panic("Bad block dev command, must be R/W");
 421         if (rw == WRITE && is_read_only(dev)) {
 422                 printk("Can't page to read-only device 0x%X\n",dev);
 423                 return;
 424         }
 425         req = get_request_wait(NR_REQUEST, dev);
 426 /* fill up the request-info, and add it to the queue */
 427         req->cmd = rw;
 428         req->errors = 0;
 429         req->sector = page<<3;
 430         req->nr_sectors = 8;
 431         req->current_nr_sectors = 8;
 432         req->buffer = buffer;
 433         req->sem = &sem;
 434         req->bh = NULL;
 435         req->next = NULL;
 436         add_request(major+blk_dev,req);
 437         down(&sem);
 438 }
 439 
 440 /* This function can be used to request a number of buffers from a block
 441    device. Currently the only restriction is that all buffers must belong to
 442    the same device */
 443 
 444 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         unsigned int major;
 447         struct request plug;
 448         int correct_size;
 449         struct blk_dev_struct * dev;
 450         int i;
 451 
 452         /* Make sure that the first block contains something reasonable */
 453         while (!*bh) {
 454                 bh++;
 455                 if (--nr <= 0)
 456                         return;
 457         };
 458 
 459         dev = NULL;
 460         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 461                 dev = blk_dev + major;
 462         if (!dev || !dev->request_fn) {
 463                 printk(
 464         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 465                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 466                 goto sorry;
 467         }
 468 
 469         /* Determine correct block size for this device.  */
 470         correct_size = BLOCK_SIZE;
 471         if (blksize_size[major]) {
 472                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 473                 if (i)
 474                         correct_size = i;
 475         }
 476 
 477         /* Verify requested block sizes.  */
 478         for (i = 0; i < nr; i++) {
 479                 if (bh[i] && bh[i]->b_size != correct_size) {
 480                         printk(
 481                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 482                                correct_size, bh[i]->b_size);
 483                         goto sorry;
 484                 }
 485         }
 486 
 487         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 488                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 489                 goto sorry;
 490         }
 491 
 492         /* If there are no pending requests for this device, then we insert
 493            a dummy request for that device.  This will prevent the request
 494            from starting until we have shoved all of the blocks into the
 495            queue, and then we let it rip.  */
 496 
 497         if (nr > 1)
 498                 plug_device(dev, &plug);
 499         for (i = 0; i < nr; i++) {
 500                 if (bh[i]) {
 501                         bh[i]->b_req = 1;
 502                         make_request(major, rw, bh[i]);
 503                         if (rw == READ || rw == READA)
 504                                 kstat.pgpgin++;
 505                         else
 506                                 kstat.pgpgout++;
 507                 }
 508         }
 509         unplug_device(dev);
 510         return;
 511 
 512       sorry:
 513         for (i = 0; i < nr; i++) {
 514                 if (bh[i])
 515                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 516         }
 517         return;
 518 }
 519 
 520 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 521 {
 522         int i;
 523         int buffersize;
 524         struct request * req;
 525         unsigned int major = MAJOR(dev);
 526         struct semaphore sem = MUTEX_LOCKED;
 527 
 528         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 529                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 530                 return;
 531         }
 532 
 533         if (rw!=READ && rw!=WRITE) {
 534                 printk("ll_rw_swap: bad block dev command, must be R/W");
 535                 return;
 536         }
 537         if (rw == WRITE && is_read_only(dev)) {
 538                 printk("Can't swap to read-only device 0x%X\n",dev);
 539                 return;
 540         }
 541         
 542         buffersize = PAGE_SIZE / nb;
 543 
 544         for (i=0; i<nb; i++, buf += buffersize)
 545         {
 546                 req = get_request_wait(NR_REQUEST, dev);
 547                 req->cmd = rw;
 548                 req->errors = 0;
 549                 req->sector = (b[i] * buffersize) >> 9;
 550                 req->nr_sectors = buffersize >> 9;
 551                 req->current_nr_sectors = buffersize >> 9;
 552                 req->buffer = buf;
 553                 req->sem = &sem;
 554                 req->bh = NULL;
 555                 req->next = NULL;
 556                 add_request(major+blk_dev,req);
 557                 down(&sem);
 558         }
 559 }
 560 
 561 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 562 {
 563         struct request * req;
 564 
 565         req = all_requests + NR_REQUEST;
 566         while (--req >= all_requests) {
 567                 req->dev = -1;
 568                 req->next = NULL;
 569         }
 570         memset(ro_bits,0,sizeof(ro_bits));
 571 #ifdef CONFIG_BLK_DEV_HD
 572         mem_start = hd_init(mem_start,mem_end);
 573 #endif
 574 #ifdef CONFIG_BLK_DEV_IDE
 575         mem_start = ide_init(mem_start,mem_end);
 576 #endif
 577 #ifdef CONFIG_BLK_DEV_XD
 578         mem_start = xd_init(mem_start,mem_end);
 579 #endif
 580 #ifdef CONFIG_CDU31A
 581         mem_start = cdu31a_init(mem_start,mem_end);
 582 #endif
 583 #ifdef CONFIG_CDU535
 584         mem_start = sony535_init(mem_start,mem_end);
 585 #endif
 586 #ifdef CONFIG_MCD
 587         mem_start = mcd_init(mem_start,mem_end);
 588 #endif
 589 #ifdef CONFIG_AZTCD
 590         mem_start = aztcd_init(mem_start,mem_end);
 591 #endif
 592 #ifdef CONFIG_BLK_DEV_FD
 593         floppy_init();
 594 #else
 595         outb_p(0xc, 0x3f2);
 596 #endif
 597 #ifdef CONFIG_SBPCD
 598         mem_start = sbpcd_init(mem_start, mem_end);
 599 #endif CONFIG_SBPCD
 600         if (ramdisk_size)
 601                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 602         return mem_start;
 603 }

/* [previous][next][first][last][top][bottom][index][help] */