root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/io.h>
  21 #include "blk.h"
  22 
  23 /*
  24  * The request-struct contains all necessary data
  25  * to load a nr of sectors into memory
  26  */
  27 static struct request all_requests[NR_REQUEST];
  28 
  29 /*
  30  * used to wait on when there are no free requests
  31  */
  32 struct wait_queue * wait_for_request = NULL;
  33 
  34 /* This specifies how many sectors to read ahead on the disk.  */
  35 
  36 int read_ahead[MAX_BLKDEV] = {0, };
  37 
  38 /* blk_dev_struct is:
  39  *      do_request-address
  40  *      next-request
  41  */
  42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  43         { NULL, NULL },         /* no_dev */
  44         { NULL, NULL },         /* dev mem */
  45         { NULL, NULL },         /* dev fd */
  46         { NULL, NULL },         /* dev hd */
  47         { NULL, NULL },         /* dev ttyx */
  48         { NULL, NULL },         /* dev tty */
  49         { NULL, NULL },         /* dev lp */
  50         { NULL, NULL },         /* dev pipes */
  51         { NULL, NULL },         /* dev sd */
  52         { NULL, NULL }          /* dev st */
  53 };
  54 
  55 /*
  56  * blk_size contains the size of all block-devices in units of 1024 byte
  57  * sectors:
  58  *
  59  * blk_size[MAJOR][MINOR]
  60  *
  61  * if (!blk_size[MAJOR]) then no minor size checking is done.
  62  */
  63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  64 
  65 /*
  66  * blksize_size contains the size of all block-devices:
  67  *
  68  * blksize_size[MAJOR][MINOR]
  69  *
  70  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  71  */
  72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  73 
  74 /*
  75  * look for a free request in the first N entries.
  76  * NOTE: interrupts must be disabled on the way in, and will still
  77  *       be disabled on the way out.
  78  */
  79 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         static struct request *prev_found = NULL, *prev_limit = NULL;
  82         register struct request *req, *limit;
  83 
  84         if (n <= 0)
  85                 panic("get_request(%d): impossible!\n", n);
  86 
  87         limit = all_requests + n;
  88         if (limit != prev_limit) {
  89                 prev_limit = limit;
  90                 prev_found = all_requests;
  91         }
  92         req = prev_found;
  93         for (;;) {
  94                 req = ((req > all_requests) ? req : limit) - 1;
  95                 if (req->dev < 0)
  96                         break;
  97                 if (req == prev_found)
  98                         return NULL;
  99         }
 100         prev_found = req;
 101         req->dev = dev;
 102         return req;
 103 }
 104 
 105 /*
 106  * wait until a free request in the first N entries is available.
 107  * NOTE: interrupts must be disabled on the way in, and will still
 108  *       be disabled on the way out.
 109  */
 110 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         register struct request *req;
 113 
 114         while ((req = get_request(n, dev)) == NULL)
 115                 sleep_on(&wait_for_request);
 116         return req;
 117 }
 118 
 119 /* RO fail safe mechanism */
 120 
 121 static long ro_bits[MAX_BLKDEV][8];
 122 
 123 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         int minor,major;
 126 
 127         major = MAJOR(dev);
 128         minor = MINOR(dev);
 129         if (major < 0 || major >= MAX_BLKDEV) return 0;
 130         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 131 }
 132 
 133 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         int minor,major;
 136 
 137         major = MAJOR(dev);
 138         minor = MINOR(dev);
 139         if (major < 0 || major >= MAX_BLKDEV) return;
 140         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 141         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 142 }
 143 
 144 /*
 145  * add-request adds a request to the linked list.
 146  * It disables interrupts so that it can muck with the
 147  * request-lists in peace.
 148  */
 149 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151         struct request * tmp;
 152         short            disk_index;
 153 
 154         switch (MAJOR(req->dev)) {
 155                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 156                                         if (disk_index < 4)
 157                                                 kstat.dk_drive[disk_index]++;
 158                                         break;
 159                 case HD_MAJOR:
 160                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
 161                                         if (disk_index < 4)
 162                                                 kstat.dk_drive[disk_index]++;
 163                                         break;
 164                 default:                break;
 165         }
 166 
 167         req->next = NULL;
 168         cli();
 169         if (req->bh)
 170                 mark_buffer_clean(req->bh);
 171         if (!(tmp = dev->current_request)) {
 172                 dev->current_request = req;
 173                 (dev->request_fn)();
 174                 sti();
 175                 return;
 176         }
 177         for ( ; tmp->next ; tmp = tmp->next) {
 178                 if ((IN_ORDER(tmp,req) ||
 179                     !IN_ORDER(tmp,tmp->next)) &&
 180                     IN_ORDER(req,tmp->next))
 181                         break;
 182         }
 183         req->next = tmp->next;
 184         tmp->next = req;
 185 
 186 /* for SCSI devices, call request_fn unconditionally */
 187         if (scsi_major(MAJOR(req->dev)))
 188                 (dev->request_fn)();
 189 
 190         sti();
 191 }
 192 
 193 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         unsigned int sector, count;
 196         struct request * req;
 197         int rw_ahead, max_req;
 198 
 199 /* WRITEA/READA is special case - it is not really needed, so if the */
 200 /* buffer is locked, we just forget about it, else it's a normal read */
 201         rw_ahead = (rw == READA || rw == WRITEA);
 202         if (rw_ahead) {
 203                 if (bh->b_lock)
 204                         return;
 205                 if (rw == READA)
 206                         rw = READ;
 207                 else
 208                         rw = WRITE;
 209         }
 210         if (rw!=READ && rw!=WRITE) {
 211                 printk("Bad block dev command, must be R/W/RA/WA\n");
 212                 return;
 213         }
 214         count = bh->b_size >> 9;
 215         sector = bh->b_blocknr * count;
 216         if (blk_size[major])
 217                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 218                         bh->b_dirt = bh->b_uptodate = 0;
 219                         bh->b_req = 0;
 220                         return;
 221                 }
 222         /* Uhhuh.. Nasty dead-lock possible here.. */
 223         if (bh->b_lock)
 224                 return;
 225         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 226         lock_buffer(bh);
 227         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 228                 unlock_buffer(bh);
 229                 return;
 230         }
 231 
 232 /* we don't allow the write-requests to fill up the queue completely:
 233  * we want some room for reads: they take precedence. The last third
 234  * of the requests are only for reads.
 235  */
 236         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 237 
 238 /* big loop: look for a free request. */
 239 
 240 repeat:
 241         cli();
 242 
 243 /* The scsi disk drivers completely remove the request from the queue when
 244  * they start processing an entry.  For this reason it is safe to continue
 245  * to add links to the top entry for scsi devices.
 246  */
 247         if ((major == HD_MAJOR
 248              || major == FLOPPY_MAJOR
 249              || major == SCSI_DISK_MAJOR
 250              || major == SCSI_CDROM_MAJOR)
 251             && (req = blk_dev[major].current_request))
 252         {
 253                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 254                         req = req->next;
 255                 while (req) {
 256                         if (req->dev == bh->b_dev &&
 257                             !req->sem &&
 258                             req->cmd == rw &&
 259                             req->sector + req->nr_sectors == sector &&
 260                             req->nr_sectors < 244)
 261                         {
 262                                 req->bhtail->b_reqnext = bh;
 263                                 req->bhtail = bh;
 264                                 req->nr_sectors += count;
 265                                 mark_buffer_clean(bh);
 266                                 sti();
 267                                 return;
 268                         }
 269 
 270                         if (req->dev == bh->b_dev &&
 271                             !req->sem &&
 272                             req->cmd == rw &&
 273                             req->sector - count == sector &&
 274                             req->nr_sectors < 244)
 275                         {
 276                                 req->nr_sectors += count;
 277                                 bh->b_reqnext = req->bh;
 278                                 req->buffer = bh->b_data;
 279                                 req->current_nr_sectors = count;
 280                                 req->sector = sector;
 281                                 mark_buffer_clean(bh);
 282                                 req->bh = bh;
 283                                 sti();
 284                                 return;
 285                         }    
 286 
 287                         req = req->next;
 288                 }
 289         }
 290 
 291 /* find an unused request. */
 292         req = get_request(max_req, bh->b_dev);
 293 
 294 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 295         if (! req) {
 296                 if (rw_ahead) {
 297                         sti();
 298                         unlock_buffer(bh);
 299                         return;
 300                 }
 301                 sleep_on(&wait_for_request);
 302                 sti();
 303                 goto repeat;
 304         }
 305 
 306 /* we found a request. */
 307         sti();
 308 
 309 /* fill up the request-info, and add it to the queue */
 310         req->cmd = rw;
 311         req->errors = 0;
 312         req->sector = sector;
 313         req->nr_sectors = count;
 314         req->current_nr_sectors = count;
 315         req->buffer = bh->b_data;
 316         req->sem = NULL;
 317         req->bh = bh;
 318         req->bhtail = bh;
 319         req->next = NULL;
 320         add_request(major+blk_dev,req);
 321 }
 322 
 323 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         struct request * req;
 326         unsigned int major = MAJOR(dev);
 327         struct semaphore sem = MUTEX_LOCKED;
 328 
 329         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 330                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 331                 return;
 332         }
 333         if (rw!=READ && rw!=WRITE)
 334                 panic("Bad block dev command, must be R/W");
 335         if (rw == WRITE && is_read_only(dev)) {
 336                 printk("Can't page to read-only device 0x%X\n",dev);
 337                 return;
 338         }
 339         cli();
 340         req = get_request_wait(NR_REQUEST, dev);
 341         sti();
 342 /* fill up the request-info, and add it to the queue */
 343         req->cmd = rw;
 344         req->errors = 0;
 345         req->sector = page<<3;
 346         req->nr_sectors = 8;
 347         req->current_nr_sectors = 8;
 348         req->buffer = buffer;
 349         req->sem = &sem;
 350         req->bh = NULL;
 351         req->next = NULL;
 352         add_request(major+blk_dev,req);
 353         down(&sem);
 354 }
 355 
 356 /* This function can be used to request a number of buffers from a block
 357    device. Currently the only restriction is that all buffers must belong to
 358    the same device */
 359 
 360 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 361 {
 362         unsigned int major;
 363         struct request plug;
 364         int plugged;
 365         int correct_size;
 366         struct blk_dev_struct * dev;
 367         int i;
 368 
 369         /* Make sure that the first block contains something reasonable */
 370         while (!*bh) {
 371                 bh++;
 372                 if (--nr <= 0)
 373                         return;
 374         };
 375 
 376         dev = NULL;
 377         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 378                 dev = blk_dev + major;
 379         if (!dev || !dev->request_fn) {
 380                 printk(
 381         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 382                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 383                 goto sorry;
 384         }
 385 
 386         /* Determine correct block size for this device.  */
 387         correct_size = BLOCK_SIZE;
 388         if (blksize_size[major]) {
 389                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 390                 if (i)
 391                         correct_size = i;
 392         }
 393 
 394         /* Verify requested block sizes.  */
 395         for (i = 0; i < nr; i++) {
 396                 if (bh[i] && bh[i]->b_size != correct_size) {
 397                         printk(
 398                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 399                                correct_size, bh[i]->b_size);
 400                         goto sorry;
 401                 }
 402         }
 403 
 404         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 405                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 406                 goto sorry;
 407         }
 408 
 409         /* If there are no pending requests for this device, then we insert
 410            a dummy request for that device.  This will prevent the request
 411            from starting until we have shoved all of the blocks into the
 412            queue, and then we let it rip.  */
 413 
 414         plugged = 0;
 415         cli();
 416         if (!dev->current_request && nr > 1) {
 417                 dev->current_request = &plug;
 418                 plug.dev = -1;
 419                 plug.next = NULL;
 420                 plugged = 1;
 421         }
 422         sti();
 423         for (i = 0; i < nr; i++) {
 424                 if (bh[i]) {
 425                         bh[i]->b_req = 1;
 426                         make_request(major, rw, bh[i]);
 427                         if (rw == READ || rw == READA)
 428                                 kstat.pgpgin++;
 429                         else
 430                                 kstat.pgpgout++;
 431                 }
 432         }
 433         if (plugged) {
 434                 cli();
 435                 dev->current_request = plug.next;
 436                 (dev->request_fn)();
 437                 sti();
 438         }
 439         return;
 440 
 441       sorry:
 442         for (i = 0; i < nr; i++) {
 443                 if (bh[i])
 444                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 445         }
 446         return;
 447 }
 448 
 449 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         int i;
 452         int buffersize;
 453         struct request * req;
 454         unsigned int major = MAJOR(dev);
 455         struct semaphore sem = MUTEX_LOCKED;
 456 
 457         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 458                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 459                 return;
 460         }
 461 
 462         if (rw!=READ && rw!=WRITE) {
 463                 printk("ll_rw_swap: bad block dev command, must be R/W");
 464                 return;
 465         }
 466         if (rw == WRITE && is_read_only(dev)) {
 467                 printk("Can't swap to read-only device 0x%X\n",dev);
 468                 return;
 469         }
 470         
 471         buffersize = PAGE_SIZE / nb;
 472 
 473         for (i=0; i<nb; i++, buf += buffersize)
 474         {
 475                 cli();
 476                 req = get_request_wait(NR_REQUEST, dev);
 477                 sti();
 478                 req->cmd = rw;
 479                 req->errors = 0;
 480                 req->sector = (b[i] * buffersize) >> 9;
 481                 req->nr_sectors = buffersize >> 9;
 482                 req->current_nr_sectors = buffersize >> 9;
 483                 req->buffer = buf;
 484                 req->sem = &sem;
 485                 req->bh = NULL;
 486                 req->next = NULL;
 487                 add_request(major+blk_dev,req);
 488                 down(&sem);
 489         }
 490 }
 491 
 492 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 493 {
 494         struct request * req;
 495 
 496         req = all_requests + NR_REQUEST;
 497         while (--req >= all_requests) {
 498                 req->dev = -1;
 499                 req->next = NULL;
 500         }
 501         memset(ro_bits,0,sizeof(ro_bits));
 502 #ifdef CONFIG_BLK_DEV_HD
 503         mem_start = hd_init(mem_start,mem_end);
 504 #endif
 505 #ifdef CONFIG_BLK_DEV_XD
 506         mem_start = xd_init(mem_start,mem_end);
 507 #endif
 508 #ifdef CONFIG_CDU31A
 509         mem_start = cdu31a_init(mem_start,mem_end);
 510 #endif
 511 #ifdef CONFIG_MCD
 512         mem_start = mcd_init(mem_start,mem_end);
 513 #endif
 514 #ifdef CONFIG_BLK_DEV_FD
 515         floppy_init();
 516 #else
 517         outb_p(0xc, 0x3f2);
 518 #endif
 519 #ifdef CONFIG_SBPCD
 520         mem_start = sbpcd_init(mem_start, mem_end);
 521 #endif CONFIG_SBPCD
 522         if (ramdisk_size)
 523                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 524         return mem_start;
 525 }

/* [previous][next][first][last][top][bottom][index][help] */