root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/kernel_stat.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/config.h>
  16 #include <linux/locks.h>
  17 
  18 #include <asm/system.h>
  19 
  20 #include "blk.h"
  21 
  22 /*
  23  * The request-struct contains all necessary data
  24  * to load a nr of sectors into memory
  25  */
  26 static struct request all_requests[NR_REQUEST];
  27 
  28 /*
  29  * used to wait on when there are no free requests
  30  */
  31 struct wait_queue * wait_for_request = NULL;
  32 
  33 /* This specifies how many sectors to read ahead on the disk.  */
  34 
  35 int read_ahead[MAX_BLKDEV] = {0, };
  36 
  37 /* blk_dev_struct is:
  38  *      do_request-address
  39  *      next-request
  40  */
  41 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  42         { NULL, NULL },         /* no_dev */
  43         { NULL, NULL },         /* dev mem */
  44         { NULL, NULL },         /* dev fd */
  45         { NULL, NULL },         /* dev hd */
  46         { NULL, NULL },         /* dev ttyx */
  47         { NULL, NULL },         /* dev tty */
  48         { NULL, NULL },         /* dev lp */
  49         { NULL, NULL },         /* dev pipes */
  50         { NULL, NULL },         /* dev sd */
  51         { NULL, NULL }          /* dev st */
  52 };
  53 
  54 /*
  55  * blk_size contains the size of all block-devices in units of 1024 byte
  56  * sectors:
  57  *
  58  * blk_size[MAJOR][MINOR]
  59  *
  60  * if (!blk_size[MAJOR]) then no minor size checking is done.
  61  */
  62 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  63 
  64 /*
  65  * blksize_size contains the size of all block-devices:
  66  *
  67  * blksize_size[MAJOR][MINOR]
  68  *
  69  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  70  */
  71 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  72 
  73 /*
  74  * look for a free request in the first N entries.
  75  * NOTE: interrupts must be disabled on the way in, and will still
  76  *       be disabled on the way out.
  77  */
  78 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  79 {
  80         static struct request *prev_found = NULL, *prev_limit = NULL;
  81         register struct request *req, *limit;
  82 
  83         if (n <= 0)
  84                 panic("get_request(%d): impossible!\n", n);
  85 
  86         limit = all_requests + n;
  87         if (limit != prev_limit) {
  88                 prev_limit = limit;
  89                 prev_found = all_requests;
  90         }
  91         req = prev_found;
  92         for (;;) {
  93                 req = ((req > all_requests) ? req : limit) - 1;
  94                 if (req->dev < 0)
  95                         break;
  96                 if (req == prev_found)
  97                         return NULL;
  98         }
  99         prev_found = req;
 100         req->dev = dev;
 101         return req;
 102 }
 103 
 104 /*
 105  * wait until a free request in the first N entries is available.
 106  * NOTE: interrupts must be disabled on the way in, and will still
 107  *       be disabled on the way out.
 108  */
 109 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111         register struct request *req;
 112 
 113         while ((req = get_request(n, dev)) == NULL)
 114                 sleep_on(&wait_for_request);
 115         return req;
 116 }
 117 
 118 /* RO fail safe mechanism */
 119 
 120 static long ro_bits[MAX_BLKDEV][8];
 121 
 122 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 123 {
 124         int minor,major;
 125 
 126         major = MAJOR(dev);
 127         minor = MINOR(dev);
 128         if ( major == FLOPPY_MAJOR && floppy_is_wp( minor) ) return 1;
 129         if (major < 0 || major >= MAX_BLKDEV) return 0;
 130         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 131 }
 132 
 133 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         int minor,major;
 136 
 137         major = MAJOR(dev);
 138         minor = MINOR(dev);
 139         if (major < 0 || major >= MAX_BLKDEV) return;
 140         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 141         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 142 }
 143 
 144 /*
 145  * add-request adds a request to the linked list.
 146  * It disables interrupts so that it can muck with the
 147  * request-lists in peace.
 148  */
 149 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151         struct request * tmp;
 152 
 153         req->next = NULL;
 154         cli();
 155         if (req->bh)
 156                 mark_buffer_clean(req->bh);
 157         if (!(tmp = dev->current_request)) {
 158                 dev->current_request = req;
 159                 (dev->request_fn)();
 160                 sti();
 161                 return;
 162         }
 163         for ( ; tmp->next ; tmp = tmp->next) {
 164                 if ((IN_ORDER(tmp,req) ||
 165                     !IN_ORDER(tmp,tmp->next)) &&
 166                     IN_ORDER(req,tmp->next))
 167                         break;
 168         }
 169         req->next = tmp->next;
 170         tmp->next = req;
 171 
 172 /* for SCSI devices, call request_fn unconditionally */
 173         if (scsi_major(MAJOR(req->dev)))
 174                 (dev->request_fn)();
 175 
 176         sti();
 177 }
 178 
 179 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         unsigned int sector, count;
 182         struct request * req;
 183         int rw_ahead, max_req;
 184 
 185 /* WRITEA/READA is special case - it is not really needed, so if the */
 186 /* buffer is locked, we just forget about it, else it's a normal read */
 187         rw_ahead = (rw == READA || rw == WRITEA);
 188         if (rw_ahead) {
 189                 if (bh->b_lock)
 190                         return;
 191                 if (rw == READA)
 192                         rw = READ;
 193                 else
 194                         rw = WRITE;
 195         }
 196         if (rw!=READ && rw!=WRITE) {
 197                 printk("Bad block dev command, must be R/W/RA/WA\n");
 198                 return;
 199         }
 200         count = bh->b_size >> 9;
 201         sector = bh->b_blocknr * count;
 202         if (blk_size[major])
 203                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 204                         bh->b_dirt = bh->b_uptodate = 0;
 205                         bh->b_req = 0;
 206                         return;
 207                 }
 208         lock_buffer(bh);
 209         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 210                 unlock_buffer(bh);
 211                 return;
 212         }
 213 
 214 /* we don't allow the write-requests to fill up the queue completely:
 215  * we want some room for reads: they take precedence. The last third
 216  * of the requests are only for reads.
 217  */
 218         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 219 
 220 /* big loop: look for a free request. */
 221 
 222 repeat:
 223         cli();
 224 
 225 /* The scsi disk drivers completely remove the request from the queue when
 226  * they start processing an entry.  For this reason it is safe to continue
 227  * to add links to the top entry for scsi devices.
 228  */
 229         if ((major == HD_MAJOR
 230              || major == FLOPPY_MAJOR
 231              || major == SCSI_DISK_MAJOR
 232              || major == SCSI_CDROM_MAJOR)
 233             && (req = blk_dev[major].current_request))
 234         {
 235                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 236                         req = req->next;
 237                 while (req) {
 238                         if (req->dev == bh->b_dev &&
 239                             !req->sem &&
 240                             req->cmd == rw &&
 241                             req->sector + req->nr_sectors == sector &&
 242                             req->nr_sectors < 244)
 243                         {
 244                                 req->bhtail->b_reqnext = bh;
 245                                 req->bhtail = bh;
 246                                 req->nr_sectors += count;
 247                                 mark_buffer_clean(bh);
 248                                 sti();
 249                                 return;
 250                         }
 251 
 252                         if (req->dev == bh->b_dev &&
 253                             !req->sem &&
 254                             req->cmd == rw &&
 255                             req->sector - count == sector &&
 256                             req->nr_sectors < 244)
 257                         {
 258                                 req->nr_sectors += count;
 259                                 bh->b_reqnext = req->bh;
 260                                 req->buffer = bh->b_data;
 261                                 req->current_nr_sectors = count;
 262                                 req->sector = sector;
 263                                 mark_buffer_clean(bh);
 264                                 req->bh = bh;
 265                                 sti();
 266                                 return;
 267                         }    
 268 
 269                         req = req->next;
 270                 }
 271         }
 272 
 273 /* find an unused request. */
 274         req = get_request(max_req, bh->b_dev);
 275 
 276 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 277         if (! req) {
 278                 if (rw_ahead) {
 279                         sti();
 280                         unlock_buffer(bh);
 281                         return;
 282                 }
 283                 sleep_on(&wait_for_request);
 284                 sti();
 285                 goto repeat;
 286         }
 287 
 288 /* we found a request. */
 289         sti();
 290 
 291 /* fill up the request-info, and add it to the queue */
 292         req->cmd = rw;
 293         req->errors = 0;
 294         req->sector = sector;
 295         req->nr_sectors = count;
 296         req->current_nr_sectors = count;
 297         req->buffer = bh->b_data;
 298         req->sem = NULL;
 299         req->bh = bh;
 300         req->bhtail = bh;
 301         req->next = NULL;
 302         add_request(major+blk_dev,req);
 303 }
 304 
 305 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         struct request * req;
 308         unsigned int major = MAJOR(dev);
 309         struct semaphore sem = MUTEX_LOCKED;
 310 
 311         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 312                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 313                 return;
 314         }
 315         if (rw!=READ && rw!=WRITE)
 316                 panic("Bad block dev command, must be R/W");
 317         if (rw == WRITE && is_read_only(dev)) {
 318                 printk("Can't page to read-only device 0x%X\n",dev);
 319                 return;
 320         }
 321         cli();
 322         req = get_request_wait(NR_REQUEST, dev);
 323         sti();
 324 /* fill up the request-info, and add it to the queue */
 325         req->cmd = rw;
 326         req->errors = 0;
 327         req->sector = page<<3;
 328         req->nr_sectors = 8;
 329         req->current_nr_sectors = 8;
 330         req->buffer = buffer;
 331         req->sem = &sem;
 332         req->bh = NULL;
 333         req->next = NULL;
 334         add_request(major+blk_dev,req);
 335         down(&sem);
 336 }
 337 
 338 /* This function can be used to request a number of buffers from a block
 339    device. Currently the only restriction is that all buffers must belong to
 340    the same device */
 341 
 342 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 343 {
 344         unsigned int major;
 345         struct request plug;
 346         int plugged;
 347         int correct_size;
 348         struct blk_dev_struct * dev;
 349         int i;
 350 
 351         /* Make sure that the first block contains something reasonable */
 352         while (!*bh) {
 353                 bh++;
 354                 if (--nr <= 0)
 355                         return;
 356         };
 357 
 358         dev = NULL;
 359         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 360                 dev = blk_dev + major;
 361         if (!dev || !dev->request_fn) {
 362                 printk(
 363         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 364                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 365                 goto sorry;
 366         }
 367 
 368         /* Determine correct block size for this device.  */
 369         correct_size = BLOCK_SIZE;
 370         if (blksize_size[major]) {
 371                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 372                 if (i)
 373                         correct_size = i;
 374         }
 375 
 376         /* Verify requested block sizes.  */
 377         for (i = 0; i < nr; i++) {
 378                 if (bh[i] && bh[i]->b_size != correct_size) {
 379                         printk(
 380                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 381                                correct_size, bh[i]->b_size);
 382                         goto sorry;
 383                 }
 384         }
 385 
 386         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 387                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 388                 goto sorry;
 389         }
 390 
 391         /* If there are no pending requests for this device, then we insert
 392            a dummy request for that device.  This will prevent the request
 393            from starting until we have shoved all of the blocks into the
 394            queue, and then we let it rip.  */
 395 
 396         plugged = 0;
 397         cli();
 398         if (!dev->current_request && nr > 1) {
 399                 dev->current_request = &plug;
 400                 plug.dev = -1;
 401                 plug.next = NULL;
 402                 plugged = 1;
 403         }
 404         sti();
 405         for (i = 0; i < nr; i++) {
 406                 if (bh[i]) {
 407                         bh[i]->b_req = 1;
 408                         make_request(major, rw, bh[i]);
 409                         if (rw == READ || rw == READA)
 410                                 kstat.pgpgin++;
 411                         else
 412                                 kstat.pgpgout++;
 413                 }
 414         }
 415         if (plugged) {
 416                 cli();
 417                 dev->current_request = plug.next;
 418                 (dev->request_fn)();
 419                 sti();
 420         }
 421         return;
 422 
 423       sorry:
 424         for (i = 0; i < nr; i++) {
 425                 if (bh[i])
 426                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 427         }
 428         return;
 429 }
 430 
 431 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 432 {
 433         int i;
 434         int buffersize;
 435         struct request * req;
 436         unsigned int major = MAJOR(dev);
 437         struct semaphore sem = MUTEX_LOCKED;
 438 
 439         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 440                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 441                 return;
 442         }
 443 
 444         if (rw!=READ && rw!=WRITE) {
 445                 printk("ll_rw_swap: bad block dev command, must be R/W");
 446                 return;
 447         }
 448         if (rw == WRITE && is_read_only(dev)) {
 449                 printk("Can't swap to read-only device 0x%X\n",dev);
 450                 return;
 451         }
 452         
 453         buffersize = PAGE_SIZE / nb;
 454 
 455         for (i=0; i<nb; i++, buf += buffersize)
 456         {
 457                 cli();
 458                 req = get_request_wait(NR_REQUEST, dev);
 459                 sti();
 460                 req->cmd = rw;
 461                 req->errors = 0;
 462                 req->sector = (b[i] * buffersize) >> 9;
 463                 req->nr_sectors = buffersize >> 9;
 464                 req->current_nr_sectors = buffersize >> 9;
 465                 req->buffer = buf;
 466                 req->sem = &sem;
 467                 req->bh = NULL;
 468                 req->next = NULL;
 469                 add_request(major+blk_dev,req);
 470                 down(&sem);
 471         }
 472 }
 473 
 474 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 475 {
 476         struct request * req;
 477 
 478         req = all_requests + NR_REQUEST;
 479         while (--req >= all_requests) {
 480                 req->dev = -1;
 481                 req->next = NULL;
 482         }
 483         memset(ro_bits,0,sizeof(ro_bits));
 484 #ifdef CONFIG_BLK_DEV_HD
 485         mem_start = hd_init(mem_start,mem_end);
 486 #endif
 487 #ifdef CONFIG_BLK_DEV_XD
 488         mem_start = xd_init(mem_start,mem_end);
 489 #endif
 490 #ifdef CONFIG_CDU31A
 491         mem_start = cdu31a_init(mem_start,mem_end);
 492 #endif
 493 #ifdef CONFIG_MCD
 494         mem_start = mcd_init(mem_start,mem_end);
 495 #endif
 496 #ifdef CONFIG_SBPCD
 497         mem_start = sbpcd_init(mem_start, mem_end);
 498 #endif CONFIG_SBPCD
 499         if (ramdisk_size)
 500                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 501         return mem_start;
 502 }

/* [previous][next][first][last][top][bottom][index][help] */