root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/kernel_stat.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/config.h>
  16 #include <linux/locks.h>
  17 
  18 #include <asm/system.h>
  19 
  20 #include "blk.h"
  21 
  22 /*
  23  * The request-struct contains all necessary data
  24  * to load a nr of sectors into memory
  25  */
  26 static struct request all_requests[NR_REQUEST];
  27 
  28 /*
  29  * used to wait on when there are no free requests
  30  */
  31 struct wait_queue * wait_for_request = NULL;
  32 
  33 /* This specifies how many sectors to read ahead on the disk.  */
  34 
  35 int read_ahead[MAX_BLKDEV] = {0, };
  36 
  37 /* blk_dev_struct is:
  38  *      do_request-address
  39  *      next-request
  40  */
  41 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  42         { NULL, NULL },         /* no_dev */
  43         { NULL, NULL },         /* dev mem */
  44         { NULL, NULL },         /* dev fd */
  45         { NULL, NULL },         /* dev hd */
  46         { NULL, NULL },         /* dev ttyx */
  47         { NULL, NULL },         /* dev tty */
  48         { NULL, NULL },         /* dev lp */
  49         { NULL, NULL },         /* dev pipes */
  50         { NULL, NULL },         /* dev sd */
  51         { NULL, NULL }          /* dev st */
  52 };
  53 
  54 /*
  55  * blk_size contains the size of all block-devices in units of 1024 byte
  56  * sectors:
  57  *
  58  * blk_size[MAJOR][MINOR]
  59  *
  60  * if (!blk_size[MAJOR]) then no minor size checking is done.
  61  */
  62 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  63 
  64 /*
  65  * blksize_size contains the size of all block-devices:
  66  *
  67  * blksize_size[MAJOR][MINOR]
  68  *
  69  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  70  */
  71 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  72 
  73 /*
  74  * look for a free request in the first N entries.
  75  * NOTE: interrupts must be disabled on the way in, and will still
  76  *       be disabled on the way out.
  77  */
  78 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  79 {
  80         static struct request *prev_found = NULL, *prev_limit = NULL;
  81         register struct request *req, *limit;
  82 
  83         if (n <= 0)
  84                 panic("get_request(%d): impossible!\n", n);
  85 
  86         limit = all_requests + n;
  87         if (limit != prev_limit) {
  88                 prev_limit = limit;
  89                 prev_found = all_requests;
  90         }
  91         req = prev_found;
  92         for (;;) {
  93                 req = ((req > all_requests) ? req : limit) - 1;
  94                 if (req->dev < 0)
  95                         break;
  96                 if (req == prev_found)
  97                         return NULL;
  98         }
  99         prev_found = req;
 100         req->dev = dev;
 101         return req;
 102 }
 103 
 104 /*
 105  * wait until a free request in the first N entries is available.
 106  * NOTE: interrupts must be disabled on the way in, and will still
 107  *       be disabled on the way out.
 108  */
 109 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111         register struct request *req;
 112 
 113         while ((req = get_request(n, dev)) == NULL)
 114                 sleep_on(&wait_for_request);
 115         return req;
 116 }
 117 
 118 /* RO fail safe mechanism */
 119 
 120 static long ro_bits[MAX_BLKDEV][8];
 121 
 122 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 123 {
 124         int minor,major;
 125 
 126         major = MAJOR(dev);
 127         minor = MINOR(dev);
 128         if (major < 0 || major >= MAX_BLKDEV) return 0;
 129         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 130 }
 131 
 132 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 133 {
 134         int minor,major;
 135 
 136         major = MAJOR(dev);
 137         minor = MINOR(dev);
 138         if (major < 0 || major >= MAX_BLKDEV) return;
 139         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 140         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 141 }
 142 
 143 /*
 144  * add-request adds a request to the linked list.
 145  * It disables interrupts so that it can muck with the
 146  * request-lists in peace.
 147  */
 148 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         struct request * tmp;
 151 
 152         req->next = NULL;
 153         cli();
 154         if (req->bh)
 155                 mark_buffer_clean(req->bh);
 156         if (!(tmp = dev->current_request)) {
 157                 dev->current_request = req;
 158                 (dev->request_fn)();
 159                 sti();
 160                 return;
 161         }
 162         for ( ; tmp->next ; tmp = tmp->next) {
 163                 if ((IN_ORDER(tmp,req) ||
 164                     !IN_ORDER(tmp,tmp->next)) &&
 165                     IN_ORDER(req,tmp->next))
 166                         break;
 167         }
 168         req->next = tmp->next;
 169         tmp->next = req;
 170 
 171 /* for SCSI devices, call request_fn unconditionally */
 172         if (scsi_major(MAJOR(req->dev)))
 173                 (dev->request_fn)();
 174 
 175         sti();
 176 }
 177 
 178 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         unsigned int sector, count;
 181         struct request * req;
 182         int rw_ahead, max_req;
 183 
 184 /* WRITEA/READA is special case - it is not really needed, so if the */
 185 /* buffer is locked, we just forget about it, else it's a normal read */
 186         rw_ahead = (rw == READA || rw == WRITEA);
 187         if (rw_ahead) {
 188                 if (bh->b_lock)
 189                         return;
 190                 if (rw == READA)
 191                         rw = READ;
 192                 else
 193                         rw = WRITE;
 194         }
 195         if (rw!=READ && rw!=WRITE) {
 196                 printk("Bad block dev command, must be R/W/RA/WA\n");
 197                 return;
 198         }
 199         count = bh->b_size >> 9;
 200         sector = bh->b_blocknr * count;
 201         if (blk_size[major])
 202                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 203                         bh->b_dirt = bh->b_uptodate = 0;
 204                         return;
 205                 }
 206         lock_buffer(bh);
 207         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 208                 unlock_buffer(bh);
 209                 return;
 210         }
 211 
 212 /* we don't allow the write-requests to fill up the queue completely:
 213  * we want some room for reads: they take precedence. The last third
 214  * of the requests are only for reads.
 215  */
 216         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 217 
 218 /* big loop: look for a free request. */
 219 
 220 repeat:
 221         cli();
 222 
 223 /* The scsi disk drivers completely remove the request from the queue when
 224  * they start processing an entry.  For this reason it is safe to continue
 225  * to add links to the top entry for scsi devices.
 226  */
 227         if ((major == HD_MAJOR
 228              || major == SCSI_DISK_MAJOR
 229              || major == SCSI_CDROM_MAJOR)
 230             && (req = blk_dev[major].current_request))
 231         {
 232                 if (major == HD_MAJOR)
 233                         req = req->next;
 234                 while (req) {
 235                         if (req->dev == bh->b_dev &&
 236                             !req->waiting &&
 237                             req->cmd == rw &&
 238                             req->sector + req->nr_sectors == sector &&
 239                             req->nr_sectors < 244)
 240                         {
 241                                 req->bhtail->b_reqnext = bh;
 242                                 req->bhtail = bh;
 243                                 req->nr_sectors += count;
 244                                 mark_buffer_clean(bh);
 245                                 sti();
 246                                 return;
 247                         }
 248 
 249                         if (req->dev == bh->b_dev &&
 250                             !req->waiting &&
 251                             req->cmd == rw &&
 252                             req->sector - count == sector &&
 253                             req->nr_sectors < 244)
 254                         {
 255                                 req->nr_sectors += count;
 256                                 bh->b_reqnext = req->bh;
 257                                 req->buffer = bh->b_data;
 258                                 req->current_nr_sectors = count;
 259                                 req->sector = sector;
 260                                 mark_buffer_clean(bh);
 261                                 req->bh = bh;
 262                                 sti();
 263                                 return;
 264                         }    
 265 
 266                         req = req->next;
 267                 }
 268         }
 269 
 270 /* find an unused request. */
 271         req = get_request(max_req, bh->b_dev);
 272 
 273 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 274         if (! req) {
 275                 if (rw_ahead) {
 276                         sti();
 277                         unlock_buffer(bh);
 278                         return;
 279                 }
 280                 sleep_on(&wait_for_request);
 281                 sti();
 282                 goto repeat;
 283         }
 284 
 285 /* we found a request. */
 286         sti();
 287 
 288 /* fill up the request-info, and add it to the queue */
 289         req->cmd = rw;
 290         req->errors = 0;
 291         req->sector = sector;
 292         req->nr_sectors = count;
 293         req->current_nr_sectors = count;
 294         req->buffer = bh->b_data;
 295         req->waiting = NULL;
 296         req->bh = bh;
 297         req->bhtail = bh;
 298         req->next = NULL;
 299         add_request(major+blk_dev,req);
 300 }
 301 
 302 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 303 {
 304         struct request * req;
 305         unsigned int major = MAJOR(dev);
 306 
 307         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 308                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 309                 return;
 310         }
 311         if (rw!=READ && rw!=WRITE)
 312                 panic("Bad block dev command, must be R/W");
 313         if (rw == WRITE && is_read_only(dev)) {
 314                 printk("Can't page to read-only device 0x%X\n",dev);
 315                 return;
 316         }
 317         cli();
 318         req = get_request_wait(NR_REQUEST, dev);
 319         sti();
 320 /* fill up the request-info, and add it to the queue */
 321         req->cmd = rw;
 322         req->errors = 0;
 323         req->sector = page<<3;
 324         req->nr_sectors = 8;
 325         req->current_nr_sectors = 8;
 326         req->buffer = buffer;
 327         req->waiting = current;
 328         req->bh = NULL;
 329         req->next = NULL;
 330         current->swapping = 1;
 331         current->state = TASK_SWAPPING;
 332         add_request(major+blk_dev,req);
 333         /* The I/O may have inadvertently chagned the task state.
 334            Make sure we really wait until the I/O is done */
 335         if (current->swapping) current->state = TASK_SWAPPING;
 336         schedule();
 337 }
 338 
 339 /* This function can be used to request a number of buffers from a block
 340    device. Currently the only restriction is that all buffers must belong to
 341    the same device */
 342 
 343 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 344 {
 345         unsigned int major;
 346         struct request plug;
 347         int plugged;
 348         int correct_size;
 349         struct blk_dev_struct * dev;
 350         int i;
 351 
 352         /* Make sure that the first block contains something reasonable */
 353         while (!*bh) {
 354                 bh++;
 355                 if (--nr <= 0)
 356                         return;
 357         };
 358 
 359         dev = NULL;
 360         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 361                 dev = blk_dev + major;
 362         if (!dev || !dev->request_fn) {
 363                 printk(
 364         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 365                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 366                 goto sorry;
 367         }
 368 
 369         /* Determine correct block size for this device.  */
 370         correct_size = BLOCK_SIZE;
 371         if (blksize_size[major]) {
 372                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 373                 if (i)
 374                         correct_size = i;
 375         }
 376 
 377         /* Verify requested block sizees.  */
 378         for (i = 0; i < nr; i++) {
 379                 if (bh[i] && bh[i]->b_size != correct_size) {
 380                         printk(
 381                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 382                                correct_size, bh[i]->b_size);
 383                         goto sorry;
 384                 }
 385         }
 386 
 387         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 388                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 389                 goto sorry;
 390         }
 391 
 392         /* If there are no pending requests for this device, then we insert
 393            a dummy request for that device.  This will prevent the request
 394            from starting until we have shoved all of the blocks into the
 395            queue, and then we let it rip.  */
 396 
 397         plugged = 0;
 398         cli();
 399         if (!dev->current_request && nr > 1) {
 400                 dev->current_request = &plug;
 401                 plug.dev = -1;
 402                 plug.next = NULL;
 403                 plugged = 1;
 404         }
 405         sti();
 406         for (i = 0; i < nr; i++) {
 407                 if (bh[i]) {
 408                         bh[i]->b_req = 1;
 409                         make_request(major, rw, bh[i]);
 410                         if (rw == READ || rw == READA)
 411                                 kstat.pgpgin++;
 412                         else
 413                                 kstat.pgpgout++;
 414                 }
 415         }
 416         if (plugged) {
 417                 cli();
 418                 dev->current_request = plug.next;
 419                 (dev->request_fn)();
 420                 sti();
 421         }
 422         return;
 423 
 424       sorry:
 425         for (i = 0; i < nr; i++) {
 426                 if (bh[i])
 427                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 428         }
 429         return;
 430 }
 431 
 432 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434         int i;
 435         int buffersize;
 436         struct request * req;
 437         unsigned int major = MAJOR(dev);
 438 
 439         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 440                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 441                 return;
 442         }
 443 
 444         if (rw!=READ && rw!=WRITE) {
 445                 printk("ll_rw_swap: bad block dev command, must be R/W");
 446                 return;
 447         }
 448         if (rw == WRITE && is_read_only(dev)) {
 449                 printk("Can't swap to read-only device 0x%X\n",dev);
 450                 return;
 451         }
 452         
 453         buffersize = PAGE_SIZE / nb;
 454 
 455         for (i=0; i<nb; i++, buf += buffersize)
 456         {
 457                 cli();
 458                 req = get_request_wait(NR_REQUEST, dev);
 459                 sti();
 460                 req->cmd = rw;
 461                 req->errors = 0;
 462                 req->sector = (b[i] * buffersize) >> 9;
 463                 req->nr_sectors = buffersize >> 9;
 464                 req->current_nr_sectors = buffersize >> 9;
 465                 req->buffer = buf;
 466                 req->waiting = current;
 467                 req->bh = NULL;
 468                 req->next = NULL;
 469                 current->swapping = 1;
 470                 current->state = TASK_UNINTERRUPTIBLE;
 471                 add_request(major+blk_dev,req);
 472                 /* The I/O may have inadvertently chagned the task state.
 473                    Make sure we really wait until the I/O is done */
 474                 if (current->swapping) current->state = TASK_UNINTERRUPTIBLE;
 475                 schedule();
 476         }
 477 }
 478 
 479 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 480 {
 481         struct request * req;
 482 
 483         req = all_requests + NR_REQUEST;
 484         while (--req >= all_requests) {
 485                 req->dev = -1;
 486                 req->next = NULL;
 487         }
 488         memset(ro_bits,0,sizeof(ro_bits));
 489 #ifdef CONFIG_BLK_DEV_HD
 490         mem_start = hd_init(mem_start,mem_end);
 491 #endif
 492 #ifdef CONFIG_BLK_DEV_XD
 493         mem_start = xd_init(mem_start,mem_end);
 494 #endif
 495 #ifdef CONFIG_CDU31A
 496         mem_start = cdu31a_init(mem_start,mem_end);
 497 #endif
 498 #ifdef CONFIG_MCD
 499         mem_start = mcd_init(mem_start,mem_end);
 500 #endif
 501 #ifdef CONFIG_SBPCD
 502         mem_start = sbpcd_init(mem_start, mem_end);
 503 #endif CONFIG_SBPCD
 504         if (ramdisk_size)
 505                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 506         return mem_start;
 507 }

/* [previous][next][first][last][top][bottom][index][help] */