root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/io.h>
  21 #include "blk.h"
  22 
  23 /*
  24  * The request-struct contains all necessary data
  25  * to load a nr of sectors into memory
  26  */
  27 static struct request all_requests[NR_REQUEST];
  28 
  29 /*
  30  * used to wait on when there are no free requests
  31  */
  32 struct wait_queue * wait_for_request = NULL;
  33 
  34 /* This specifies how many sectors to read ahead on the disk.  */
  35 
  36 int read_ahead[MAX_BLKDEV] = {0, };
  37 
  38 /* blk_dev_struct is:
  39  *      do_request-address
  40  *      next-request
  41  */
  42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  43         { NULL, NULL },         /* no_dev */
  44         { NULL, NULL },         /* dev mem */
  45         { NULL, NULL },         /* dev fd */
  46         { NULL, NULL },         /* dev hd */
  47         { NULL, NULL },         /* dev ttyx */
  48         { NULL, NULL },         /* dev tty */
  49         { NULL, NULL },         /* dev lp */
  50         { NULL, NULL },         /* dev pipes */
  51         { NULL, NULL },         /* dev sd */
  52         { NULL, NULL }          /* dev st */
  53 };
  54 
  55 /*
  56  * blk_size contains the size of all block-devices in units of 1024 byte
  57  * sectors:
  58  *
  59  * blk_size[MAJOR][MINOR]
  60  *
  61  * if (!blk_size[MAJOR]) then no minor size checking is done.
  62  */
  63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  64 
  65 /*
  66  * blksize_size contains the size of all block-devices:
  67  *
  68  * blksize_size[MAJOR][MINOR]
  69  *
  70  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  71  */
  72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  73 
  74 /*
  75  * hardsect_size contains the size of the hardware sector of a device.
  76  *
  77  * hardsect_size[MAJOR][MINOR]
  78  *
  79  * if (!hardsect_size[MAJOR])
  80  *              then 512 bytes is assumed.
  81  * else
  82  *              sector_size is hardsect_size[MAJOR][MINOR]
  83  * This is currently set by some scsi device and read by the msdos fs driver
  84  * This might be a some uses later.
  85  */
  86 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * look for a free request in the first N entries.
  90  * NOTE: interrupts must be disabled on the way in, and will still
  91  *       be disabled on the way out.
  92  */
  93 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         static struct request *prev_found = NULL, *prev_limit = NULL;
  96         register struct request *req, *limit;
  97 
  98         if (n <= 0)
  99                 panic("get_request(%d): impossible!\n", n);
 100 
 101         limit = all_requests + n;
 102         if (limit != prev_limit) {
 103                 prev_limit = limit;
 104                 prev_found = all_requests;
 105         }
 106         req = prev_found;
 107         for (;;) {
 108                 req = ((req > all_requests) ? req : limit) - 1;
 109                 if (req->dev < 0)
 110                         break;
 111                 if (req == prev_found)
 112                         return NULL;
 113         }
 114         prev_found = req;
 115         req->dev = dev;
 116         return req;
 117 }
 118 
 119 /*
 120  * wait until a free request in the first N entries is available.
 121  * NOTE: interrupts must be disabled on the way in, and will still
 122  *       be disabled on the way out.
 123  */
 124 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         register struct request *req;
 127 
 128         while ((req = get_request(n, dev)) == NULL)
 129                 sleep_on(&wait_for_request);
 130         return req;
 131 }
 132 
 133 /* RO fail safe mechanism */
 134 
 135 static long ro_bits[MAX_BLKDEV][8];
 136 
 137 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         int minor,major;
 140 
 141         major = MAJOR(dev);
 142         minor = MINOR(dev);
 143         if (major < 0 || major >= MAX_BLKDEV) return 0;
 144         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 145 }
 146 
 147 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149         int minor,major;
 150 
 151         major = MAJOR(dev);
 152         minor = MINOR(dev);
 153         if (major < 0 || major >= MAX_BLKDEV) return;
 154         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 155         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 156 }
 157 
 158 /*
 159  * add-request adds a request to the linked list.
 160  * It disables interrupts so that it can muck with the
 161  * request-lists in peace.
 162  */
 163 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165         struct request * tmp;
 166         short            disk_index;
 167 
 168         switch (MAJOR(req->dev)) {
 169                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 170                                         if (disk_index < 4)
 171                                                 kstat.dk_drive[disk_index]++;
 172                                         break;
 173                 case HD_MAJOR:
 174                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
 175                                         if (disk_index < 4)
 176                                                 kstat.dk_drive[disk_index]++;
 177                                         break;
 178                 default:                break;
 179         }
 180 
 181         req->next = NULL;
 182         cli();
 183         if (req->bh)
 184                 mark_buffer_clean(req->bh);
 185         if (!(tmp = dev->current_request)) {
 186                 dev->current_request = req;
 187                 (dev->request_fn)();
 188                 sti();
 189                 return;
 190         }
 191         for ( ; tmp->next ; tmp = tmp->next) {
 192                 if ((IN_ORDER(tmp,req) ||
 193                     !IN_ORDER(tmp,tmp->next)) &&
 194                     IN_ORDER(req,tmp->next))
 195                         break;
 196         }
 197         req->next = tmp->next;
 198         tmp->next = req;
 199 
 200 /* for SCSI devices, call request_fn unconditionally */
 201         if (scsi_major(MAJOR(req->dev)))
 202                 (dev->request_fn)();
 203 
 204         sti();
 205 }
 206 
 207 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 208 {
 209         unsigned int sector, count;
 210         struct request * req;
 211         int rw_ahead, max_req;
 212 
 213 /* WRITEA/READA is special case - it is not really needed, so if the */
 214 /* buffer is locked, we just forget about it, else it's a normal read */
 215         rw_ahead = (rw == READA || rw == WRITEA);
 216         if (rw_ahead) {
 217                 if (bh->b_lock)
 218                         return;
 219                 if (rw == READA)
 220                         rw = READ;
 221                 else
 222                         rw = WRITE;
 223         }
 224         if (rw!=READ && rw!=WRITE) {
 225                 printk("Bad block dev command, must be R/W/RA/WA\n");
 226                 return;
 227         }
 228         count = bh->b_size >> 9;
 229         sector = bh->b_blocknr * count;
 230         if (blk_size[major])
 231                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 232                         bh->b_dirt = bh->b_uptodate = 0;
 233                         bh->b_req = 0;
 234                         return;
 235                 }
 236         /* Uhhuh.. Nasty dead-lock possible here.. */
 237         if (bh->b_lock)
 238                 return;
 239         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 240         lock_buffer(bh);
 241         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 242                 unlock_buffer(bh);
 243                 return;
 244         }
 245 
 246 /* we don't allow the write-requests to fill up the queue completely:
 247  * we want some room for reads: they take precedence. The last third
 248  * of the requests are only for reads.
 249  */
 250         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 251 
 252 /* big loop: look for a free request. */
 253 
 254 repeat:
 255         cli();
 256 
 257 /* The scsi disk drivers completely remove the request from the queue when
 258  * they start processing an entry.  For this reason it is safe to continue
 259  * to add links to the top entry for scsi devices.
 260  */
 261         if ((major == HD_MAJOR
 262              || major == FLOPPY_MAJOR
 263              || major == SCSI_DISK_MAJOR
 264              || major == SCSI_CDROM_MAJOR)
 265             && (req = blk_dev[major].current_request))
 266         {
 267                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 268                         req = req->next;
 269                 while (req) {
 270                         if (req->dev == bh->b_dev &&
 271                             !req->sem &&
 272                             req->cmd == rw &&
 273                             req->sector + req->nr_sectors == sector &&
 274                             req->nr_sectors < 244)
 275                         {
 276                                 req->bhtail->b_reqnext = bh;
 277                                 req->bhtail = bh;
 278                                 req->nr_sectors += count;
 279                                 mark_buffer_clean(bh);
 280                                 sti();
 281                                 return;
 282                         }
 283 
 284                         if (req->dev == bh->b_dev &&
 285                             !req->sem &&
 286                             req->cmd == rw &&
 287                             req->sector - count == sector &&
 288                             req->nr_sectors < 244)
 289                         {
 290                                 req->nr_sectors += count;
 291                                 bh->b_reqnext = req->bh;
 292                                 req->buffer = bh->b_data;
 293                                 req->current_nr_sectors = count;
 294                                 req->sector = sector;
 295                                 mark_buffer_clean(bh);
 296                                 req->bh = bh;
 297                                 sti();
 298                                 return;
 299                         }    
 300 
 301                         req = req->next;
 302                 }
 303         }
 304 
 305 /* find an unused request. */
 306         req = get_request(max_req, bh->b_dev);
 307 
 308 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 309         if (! req) {
 310                 if (rw_ahead) {
 311                         sti();
 312                         unlock_buffer(bh);
 313                         return;
 314                 }
 315                 sleep_on(&wait_for_request);
 316                 sti();
 317                 goto repeat;
 318         }
 319 
 320 /* we found a request. */
 321         sti();
 322 
 323 /* fill up the request-info, and add it to the queue */
 324         req->cmd = rw;
 325         req->errors = 0;
 326         req->sector = sector;
 327         req->nr_sectors = count;
 328         req->current_nr_sectors = count;
 329         req->buffer = bh->b_data;
 330         req->sem = NULL;
 331         req->bh = bh;
 332         req->bhtail = bh;
 333         req->next = NULL;
 334         add_request(major+blk_dev,req);
 335 }
 336 
 337 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 {
 339         struct request * req;
 340         unsigned int major = MAJOR(dev);
 341         struct semaphore sem = MUTEX_LOCKED;
 342 
 343         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 344                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 345                 return;
 346         }
 347         if (rw!=READ && rw!=WRITE)
 348                 panic("Bad block dev command, must be R/W");
 349         if (rw == WRITE && is_read_only(dev)) {
 350                 printk("Can't page to read-only device 0x%X\n",dev);
 351                 return;
 352         }
 353         cli();
 354         req = get_request_wait(NR_REQUEST, dev);
 355         sti();
 356 /* fill up the request-info, and add it to the queue */
 357         req->cmd = rw;
 358         req->errors = 0;
 359         req->sector = page<<3;
 360         req->nr_sectors = 8;
 361         req->current_nr_sectors = 8;
 362         req->buffer = buffer;
 363         req->sem = &sem;
 364         req->bh = NULL;
 365         req->next = NULL;
 366         add_request(major+blk_dev,req);
 367         down(&sem);
 368 }
 369 
 370 /* This function can be used to request a number of buffers from a block
 371    device. Currently the only restriction is that all buffers must belong to
 372    the same device */
 373 
 374 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 375 {
 376         unsigned int major;
 377         struct request plug;
 378         int plugged;
 379         int correct_size;
 380         struct blk_dev_struct * dev;
 381         int i;
 382 
 383         /* Make sure that the first block contains something reasonable */
 384         while (!*bh) {
 385                 bh++;
 386                 if (--nr <= 0)
 387                         return;
 388         };
 389 
 390         dev = NULL;
 391         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 392                 dev = blk_dev + major;
 393         if (!dev || !dev->request_fn) {
 394                 printk(
 395         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 396                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 397                 goto sorry;
 398         }
 399 
 400         /* Determine correct block size for this device.  */
 401         correct_size = BLOCK_SIZE;
 402         if (blksize_size[major]) {
 403                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 404                 if (i)
 405                         correct_size = i;
 406         }
 407 
 408         /* Verify requested block sizes.  */
 409         for (i = 0; i < nr; i++) {
 410                 if (bh[i] && bh[i]->b_size != correct_size) {
 411                         printk(
 412                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 413                                correct_size, bh[i]->b_size);
 414                         goto sorry;
 415                 }
 416         }
 417 
 418         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 419                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 420                 goto sorry;
 421         }
 422 
 423         /* If there are no pending requests for this device, then we insert
 424            a dummy request for that device.  This will prevent the request
 425            from starting until we have shoved all of the blocks into the
 426            queue, and then we let it rip.  */
 427 
 428         plugged = 0;
 429         cli();
 430         if (!dev->current_request && nr > 1) {
 431                 dev->current_request = &plug;
 432                 plug.dev = -1;
 433                 plug.next = NULL;
 434                 plugged = 1;
 435         }
 436         sti();
 437         for (i = 0; i < nr; i++) {
 438                 if (bh[i]) {
 439                         bh[i]->b_req = 1;
 440                         make_request(major, rw, bh[i]);
 441                         if (rw == READ || rw == READA)
 442                                 kstat.pgpgin++;
 443                         else
 444                                 kstat.pgpgout++;
 445                 }
 446         }
 447         if (plugged) {
 448                 cli();
 449                 dev->current_request = plug.next;
 450                 (dev->request_fn)();
 451                 sti();
 452         }
 453         return;
 454 
 455       sorry:
 456         for (i = 0; i < nr; i++) {
 457                 if (bh[i])
 458                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 459         }
 460         return;
 461 }
 462 
 463 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 464 {
 465         int i;
 466         int buffersize;
 467         struct request * req;
 468         unsigned int major = MAJOR(dev);
 469         struct semaphore sem = MUTEX_LOCKED;
 470 
 471         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 472                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 473                 return;
 474         }
 475 
 476         if (rw!=READ && rw!=WRITE) {
 477                 printk("ll_rw_swap: bad block dev command, must be R/W");
 478                 return;
 479         }
 480         if (rw == WRITE && is_read_only(dev)) {
 481                 printk("Can't swap to read-only device 0x%X\n",dev);
 482                 return;
 483         }
 484         
 485         buffersize = PAGE_SIZE / nb;
 486 
 487         for (i=0; i<nb; i++, buf += buffersize)
 488         {
 489                 cli();
 490                 req = get_request_wait(NR_REQUEST, dev);
 491                 sti();
 492                 req->cmd = rw;
 493                 req->errors = 0;
 494                 req->sector = (b[i] * buffersize) >> 9;
 495                 req->nr_sectors = buffersize >> 9;
 496                 req->current_nr_sectors = buffersize >> 9;
 497                 req->buffer = buf;
 498                 req->sem = &sem;
 499                 req->bh = NULL;
 500                 req->next = NULL;
 501                 add_request(major+blk_dev,req);
 502                 down(&sem);
 503         }
 504 }
 505 
 506 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 507 {
 508         struct request * req;
 509 
 510         req = all_requests + NR_REQUEST;
 511         while (--req >= all_requests) {
 512                 req->dev = -1;
 513                 req->next = NULL;
 514         }
 515         memset(ro_bits,0,sizeof(ro_bits));
 516 #ifdef CONFIG_BLK_DEV_HD
 517         mem_start = hd_init(mem_start,mem_end);
 518 #endif
 519 #ifdef CONFIG_BLK_DEV_XD
 520         mem_start = xd_init(mem_start,mem_end);
 521 #endif
 522 #ifdef CONFIG_CDU31A
 523         mem_start = cdu31a_init(mem_start,mem_end);
 524 #endif
 525 #ifdef CONFIG_MCD
 526         mem_start = mcd_init(mem_start,mem_end);
 527 #endif
 528 #ifdef CONFIG_BLK_DEV_FD
 529         floppy_init();
 530 #else
 531         outb_p(0xc, 0x3f2);
 532 #endif
 533 #ifdef CONFIG_SBPCD
 534         mem_start = sbpcd_init(mem_start, mem_end);
 535 #endif CONFIG_SBPCD
 536         if (ramdisk_size)
 537                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 538         return mem_start;
 539 }

/* [previous][next][first][last][top][bottom][index][help] */