root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/config.h>
  15 #include <linux/locks.h>
  16 
  17 #include <asm/system.h>
  18 
  19 #include "blk.h"
  20 
  21 /*
  22  * The request-struct contains all necessary data
  23  * to load a nr of sectors into memory
  24  */
  25 static struct request all_requests[NR_REQUEST];
  26 
  27 /*
  28  * used to wait on when there are no free requests
  29  */
  30 struct wait_queue * wait_for_request = NULL;
  31 
  32 /* This specifies how many sectors to read ahead on the disk.  */
  33 
  34 int read_ahead[MAX_BLKDEV] = {0, };
  35 
  36 /* blk_dev_struct is:
  37  *      do_request-address
  38  *      next-request
  39  */
  40 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  41         { NULL, NULL },         /* no_dev */
  42         { NULL, NULL },         /* dev mem */
  43         { NULL, NULL },         /* dev fd */
  44         { NULL, NULL },         /* dev hd */
  45         { NULL, NULL },         /* dev ttyx */
  46         { NULL, NULL },         /* dev tty */
  47         { NULL, NULL },         /* dev lp */
  48         { NULL, NULL },         /* dev pipes */
  49         { NULL, NULL },         /* dev sd */
  50         { NULL, NULL }          /* dev st */
  51 };
  52 
  53 /*
  54  * blk_size contains the size of all block-devices in units of 1024 byte
  55  * sectors:
  56  *
  57  * blk_size[MAJOR][MINOR]
  58  *
  59  * if (!blk_size[MAJOR]) then no minor size checking is done.
  60  */
  61 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  62 
  63 /*
  64  * blksize_size contains the size of all block-devices:
  65  *
  66  * blksize_size[MAJOR][MINOR]
  67  *
  68  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  69  */
  70 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  71 
  72 /*
  73  * look for a free request in the first N entries.
  74  * NOTE: interrupts must be disabled on the way in, and will still
  75  *       be disabled on the way out.
  76  */
  77 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         static struct request *prev_found = NULL, *prev_limit = NULL;
  80         register struct request *req, *limit;
  81 
  82         if (n <= 0)
  83                 panic("get_request(%d): impossible!\n", n);
  84 
  85         limit = all_requests + n;
  86         if (limit != prev_limit) {
  87                 prev_limit = limit;
  88                 prev_found = all_requests;
  89         }
  90         req = prev_found;
  91         for (;;) {
  92                 req = ((req > all_requests) ? req : limit) - 1;
  93                 if (req->dev < 0)
  94                         break;
  95                 if (req == prev_found)
  96                         return NULL;
  97         }
  98         prev_found = req;
  99         req->dev = dev;
 100         return req;
 101 }
 102 
 103 /*
 104  * wait until a free request in the first N entries is available.
 105  * NOTE: interrupts must be disabled on the way in, and will still
 106  *       be disabled on the way out.
 107  */
 108 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 109 {
 110         register struct request *req;
 111 
 112         while ((req = get_request(n, dev)) == NULL)
 113                 sleep_on(&wait_for_request);
 114         return req;
 115 }
 116 
 117 /* RO fail safe mechanism */
 118 
 119 static long ro_bits[MAX_BLKDEV][8];
 120 
 121 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 {
 123         int minor,major;
 124 
 125         major = MAJOR(dev);
 126         minor = MINOR(dev);
 127         if (major < 0 || major >= MAX_BLKDEV) return 0;
 128         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 129 }
 130 
 131 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         int minor,major;
 134 
 135         major = MAJOR(dev);
 136         minor = MINOR(dev);
 137         if (major < 0 || major >= MAX_BLKDEV) return;
 138         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 139         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 140 }
 141 
 142 /*
 143  * add-request adds a request to the linked list.
 144  * It disables interrupts so that it can muck with the
 145  * request-lists in peace.
 146  */
 147 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149         struct request * tmp;
 150 
 151         req->next = NULL;
 152         cli();
 153         if (req->bh)
 154                 req->bh->b_dirt = 0;
 155         if (!(tmp = dev->current_request)) {
 156                 dev->current_request = req;
 157                 (dev->request_fn)();
 158                 sti();
 159                 return;
 160         }
 161         for ( ; tmp->next ; tmp = tmp->next) {
 162                 if ((IN_ORDER(tmp,req) ||
 163                     !IN_ORDER(tmp,tmp->next)) &&
 164                     IN_ORDER(req,tmp->next))
 165                         break;
 166         }
 167         req->next = tmp->next;
 168         tmp->next = req;
 169 
 170 /* for SCSI devices, call request_fn unconditionally */
 171         if (scsi_major(MAJOR(req->dev)))
 172                 (dev->request_fn)();
 173 
 174         sti();
 175 }
 176 
 177 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         unsigned int sector, count;
 180         struct request * req;
 181         int rw_ahead, max_req;
 182 
 183 /* WRITEA/READA is special case - it is not really needed, so if the */
 184 /* buffer is locked, we just forget about it, else it's a normal read */
 185         rw_ahead = (rw == READA || rw == WRITEA);
 186         if (rw_ahead) {
 187                 if (bh->b_lock)
 188                         return;
 189                 if (rw == READA)
 190                         rw = READ;
 191                 else
 192                         rw = WRITE;
 193         }
 194         if (rw!=READ && rw!=WRITE) {
 195                 printk("Bad block dev command, must be R/W/RA/WA\n");
 196                 return;
 197         }
 198         count = bh->b_size >> 9;
 199         sector = bh->b_blocknr * count;
 200         if (blk_size[major])
 201                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 202                         bh->b_dirt = bh->b_uptodate = 0;
 203                         return;
 204                 }
 205         lock_buffer(bh);
 206         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 207                 unlock_buffer(bh);
 208                 return;
 209         }
 210 
 211 /* we don't allow the write-requests to fill up the queue completely:
 212  * we want some room for reads: they take precedence. The last third
 213  * of the requests are only for reads.
 214  */
 215         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 216 
 217 /* big loop: look for a free request. */
 218 
 219 repeat:
 220         cli();
 221 
 222 /* The scsi disk drivers completely remove the request from the queue when
 223  * they start processing an entry.  For this reason it is safe to continue
 224  * to add links to the top entry for scsi devices.
 225  */
 226         if ((major == HD_MAJOR
 227              || major == SCSI_DISK_MAJOR
 228              || major == SCSI_CDROM_MAJOR)
 229             && (req = blk_dev[major].current_request))
 230         {
 231                 if (major == HD_MAJOR)
 232                         req = req->next;
 233                 while (req) {
 234                         if (req->dev == bh->b_dev &&
 235                             !req->waiting &&
 236                             req->cmd == rw &&
 237                             req->sector + req->nr_sectors == sector &&
 238                             req->nr_sectors < 254)
 239                         {
 240                                 req->bhtail->b_reqnext = bh;
 241                                 req->bhtail = bh;
 242                                 req->nr_sectors += count;
 243                                 bh->b_dirt = 0;
 244                                 sti();
 245                                 return;
 246                         }
 247 
 248                         if (req->dev == bh->b_dev &&
 249                             !req->waiting &&
 250                             req->cmd == rw &&
 251                             req->sector - count == sector &&
 252                             req->nr_sectors < 254)
 253                         {
 254                                 req->nr_sectors += count;
 255                                 bh->b_reqnext = req->bh;
 256                                 req->buffer = bh->b_data;
 257                                 req->current_nr_sectors = count;
 258                                 req->sector = sector;
 259                                 bh->b_dirt = 0;
 260                                 req->bh = bh;
 261                                 sti();
 262                                 return;
 263                         }    
 264 
 265                         req = req->next;
 266                 }
 267         }
 268 
 269 /* find an unused request. */
 270         req = get_request(max_req, bh->b_dev);
 271 
 272 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 273         if (! req) {
 274                 if (rw_ahead) {
 275                         sti();
 276                         unlock_buffer(bh);
 277                         return;
 278                 }
 279                 sleep_on(&wait_for_request);
 280                 sti();
 281                 goto repeat;
 282         }
 283 
 284 /* we found a request. */
 285         sti();
 286 
 287 /* fill up the request-info, and add it to the queue */
 288         req->cmd = rw;
 289         req->errors = 0;
 290         req->sector = sector;
 291         req->nr_sectors = count;
 292         req->current_nr_sectors = count;
 293         req->buffer = bh->b_data;
 294         req->waiting = NULL;
 295         req->bh = bh;
 296         req->bhtail = bh;
 297         req->next = NULL;
 298         add_request(major+blk_dev,req);
 299 }
 300 
 301 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 302 {
 303         struct request * req;
 304         unsigned int major = MAJOR(dev);
 305 
 306         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 307                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 308                 return;
 309         }
 310         if (rw!=READ && rw!=WRITE)
 311                 panic("Bad block dev command, must be R/W");
 312         if (rw == WRITE && is_read_only(dev)) {
 313                 printk("Can't page to read-only device 0x%X\n",dev);
 314                 return;
 315         }
 316         cli();
 317         req = get_request_wait(NR_REQUEST, dev);
 318         sti();
 319 /* fill up the request-info, and add it to the queue */
 320         req->cmd = rw;
 321         req->errors = 0;
 322         req->sector = page<<3;
 323         req->nr_sectors = 8;
 324         req->current_nr_sectors = 8;
 325         req->buffer = buffer;
 326         req->waiting = current;
 327         req->bh = NULL;
 328         req->next = NULL;
 329         current->state = TASK_SWAPPING;
 330         add_request(major+blk_dev,req);
 331         schedule();
 332 }
 333 
 334 /* This function can be used to request a number of buffers from a block
 335    device. Currently the only restriction is that all buffers must belong to
 336    the same device */
 337 
 338 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340         unsigned int major;
 341         struct request plug;
 342         int plugged;
 343         int correct_size;
 344         struct blk_dev_struct * dev;
 345         int i;
 346 
 347         /* Make sure that the first block contains something reasonable */
 348         while (!*bh) {
 349                 bh++;
 350                 if (--nr <= 0)
 351                         return;
 352         };
 353 
 354         dev = NULL;
 355         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 356                 dev = blk_dev + major;
 357         if (!dev || !dev->request_fn) {
 358                 printk(
 359         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 360                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 361                 goto sorry;
 362         }
 363 
 364         /* Determine correct block size for this device.  */
 365         correct_size = BLOCK_SIZE;
 366         if (blksize_size[major]) {
 367                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 368                 if (i)
 369                         correct_size = i;
 370         }
 371 
 372         /* Verify requested block sizees.  */
 373         for (i = 0; i < nr; i++) {
 374                 if (bh[i] && bh[i]->b_size != correct_size) {
 375                         printk(
 376                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 377                                correct_size, bh[i]->b_size);
 378                         goto sorry;
 379                 }
 380         }
 381 
 382         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 383                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 384                 goto sorry;
 385         }
 386 
 387         /* If there are no pending requests for this device, then we insert
 388            a dummy request for that device.  This will prevent the request
 389            from starting until we have shoved all of the blocks into the
 390            queue, and then we let it rip.  */
 391 
 392         plugged = 0;
 393         cli();
 394         if (!dev->current_request && nr > 1) {
 395                 dev->current_request = &plug;
 396                 plug.dev = -1;
 397                 plug.next = NULL;
 398                 plugged = 1;
 399         }
 400         sti();
 401         for (i = 0; i < nr; i++) {
 402                 if (bh[i]) {
 403                         bh[i]->b_req = 1;
 404                         make_request(major, rw, bh[i]);
 405                 }
 406         }
 407         if (plugged) {
 408                 cli();
 409                 dev->current_request = plug.next;
 410                 (dev->request_fn)();
 411                 sti();
 412         }
 413         return;
 414 
 415       sorry:
 416         for (i = 0; i < nr; i++) {
 417                 if (bh[i])
 418                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 419         }
 420         return;
 421 }
 422 
 423 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 424 {
 425         int i;
 426         int buffersize;
 427         struct request * req;
 428         unsigned int major = MAJOR(dev);
 429 
 430         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 431                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 432                 return;
 433         }
 434 
 435         if (rw!=READ && rw!=WRITE) {
 436                 printk("ll_rw_swap: bad block dev command, must be R/W");
 437                 return;
 438         }
 439         if (rw == WRITE && is_read_only(dev)) {
 440                 printk("Can't swap to read-only device 0x%X\n",dev);
 441                 return;
 442         }
 443         
 444         buffersize = PAGE_SIZE / nb;
 445 
 446         for (i=0; i<nb; i++, buf += buffersize)
 447         {
 448                 cli();
 449                 req = get_request_wait(NR_REQUEST, dev);
 450                 sti();
 451                 req->cmd = rw;
 452                 req->errors = 0;
 453                 req->sector = (b[i] * buffersize) >> 9;
 454                 req->nr_sectors = buffersize >> 9;
 455                 req->current_nr_sectors = buffersize >> 9;
 456                 req->buffer = buf;
 457                 req->waiting = current;
 458                 req->bh = NULL;
 459                 req->next = NULL;
 460                 current->state = TASK_UNINTERRUPTIBLE;
 461                 add_request(major+blk_dev,req);
 462                 schedule();
 463         }
 464 }
 465 
 466 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 467 {
 468         struct request * req;
 469 
 470         req = all_requests + NR_REQUEST;
 471         while (--req >= all_requests) {
 472                 req->dev = -1;
 473                 req->next = NULL;
 474         }
 475         memset(ro_bits,0,sizeof(ro_bits));
 476 #ifdef CONFIG_BLK_DEV_HD
 477         mem_start = hd_init(mem_start,mem_end);
 478 #endif
 479 #ifdef CONFIG_BLK_DEV_XD
 480         mem_start = xd_init(mem_start,mem_end);
 481 #endif
 482 #ifdef CONFIG_CDU31A
 483         mem_start = cdu31a_init(mem_start,mem_end);
 484 #endif
 485 #ifdef CONFIG_MCD
 486         mem_start = mcd_init(mem_start,mem_end);
 487 #endif
 488         if (ramdisk_size)
 489                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 490         return mem_start;
 491 }

/* [previous][next][first][last][top][bottom][index][help] */