root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/io.h>
  21 #include "blk.h"
  22 
  23 /*
  24  * The request-struct contains all necessary data
  25  * to load a nr of sectors into memory
  26  */
  27 static struct request all_requests[NR_REQUEST];
  28 
  29 /*
  30  * used to wait on when there are no free requests
  31  */
  32 struct wait_queue * wait_for_request = NULL;
  33 
  34 /* This specifies how many sectors to read ahead on the disk.  */
  35 
  36 int read_ahead[MAX_BLKDEV] = {0, };
  37 
  38 /* blk_dev_struct is:
  39  *      do_request-address
  40  *      next-request
  41  */
  42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  43         { NULL, NULL },         /* no_dev */
  44         { NULL, NULL },         /* dev mem */
  45         { NULL, NULL },         /* dev fd */
  46         { NULL, NULL },         /* dev hd */
  47         { NULL, NULL },         /* dev ttyx */
  48         { NULL, NULL },         /* dev tty */
  49         { NULL, NULL },         /* dev lp */
  50         { NULL, NULL },         /* dev pipes */
  51         { NULL, NULL },         /* dev sd */
  52         { NULL, NULL }          /* dev st */
  53 };
  54 
  55 /*
  56  * blk_size contains the size of all block-devices in units of 1024 byte
  57  * sectors:
  58  *
  59  * blk_size[MAJOR][MINOR]
  60  *
  61  * if (!blk_size[MAJOR]) then no minor size checking is done.
  62  */
  63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  64 
  65 /*
  66  * blksize_size contains the size of all block-devices:
  67  *
  68  * blksize_size[MAJOR][MINOR]
  69  *
  70  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  71  */
  72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  73 
  74 /*
  75  * look for a free request in the first N entries.
  76  * NOTE: interrupts must be disabled on the way in, and will still
  77  *       be disabled on the way out.
  78  */
  79 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         static struct request *prev_found = NULL, *prev_limit = NULL;
  82         register struct request *req, *limit;
  83 
  84         if (n <= 0)
  85                 panic("get_request(%d): impossible!\n", n);
  86 
  87         limit = all_requests + n;
  88         if (limit != prev_limit) {
  89                 prev_limit = limit;
  90                 prev_found = all_requests;
  91         }
  92         req = prev_found;
  93         for (;;) {
  94                 req = ((req > all_requests) ? req : limit) - 1;
  95                 if (req->dev < 0)
  96                         break;
  97                 if (req == prev_found)
  98                         return NULL;
  99         }
 100         prev_found = req;
 101         req->dev = dev;
 102         return req;
 103 }
 104 
 105 /*
 106  * wait until a free request in the first N entries is available.
 107  * NOTE: interrupts must be disabled on the way in, and will still
 108  *       be disabled on the way out.
 109  */
 110 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         register struct request *req;
 113 
 114         while ((req = get_request(n, dev)) == NULL)
 115                 sleep_on(&wait_for_request);
 116         return req;
 117 }
 118 
 119 /* RO fail safe mechanism */
 120 
 121 static long ro_bits[MAX_BLKDEV][8];
 122 
 123 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         int minor,major;
 126 
 127         major = MAJOR(dev);
 128         minor = MINOR(dev);
 129         if (major < 0 || major >= MAX_BLKDEV) return 0;
 130         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 131 }
 132 
 133 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         int minor,major;
 136 
 137         major = MAJOR(dev);
 138         minor = MINOR(dev);
 139         if (major < 0 || major >= MAX_BLKDEV) return;
 140         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 141         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 142 }
 143 
 144 /*
 145  * add-request adds a request to the linked list.
 146  * It disables interrupts so that it can muck with the
 147  * request-lists in peace.
 148  */
 149 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151         struct request * tmp;
 152         short            disk_index;
 153 
 154         switch (MAJOR(req->dev)) {
 155                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 156                                         if (disk_index < 4)
 157                                                 kstat.dk_drive[disk_index]++;
 158                                         break;
 159                 case HD_MAJOR:
 160                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
 161                                         if (disk_index < 4)
 162                                                 kstat.dk_drive[disk_index]++;
 163                                         break;
 164                 default:                break;
 165         }
 166 
 167         req->next = NULL;
 168         cli();
 169         if (req->bh)
 170                 mark_buffer_clean(req->bh);
 171         if (!(tmp = dev->current_request)) {
 172                 dev->current_request = req;
 173                 (dev->request_fn)();
 174                 sti();
 175                 return;
 176         }
 177         for ( ; tmp->next ; tmp = tmp->next) {
 178                 if ((IN_ORDER(tmp,req) ||
 179                     !IN_ORDER(tmp,tmp->next)) &&
 180                     IN_ORDER(req,tmp->next))
 181                         break;
 182         }
 183         req->next = tmp->next;
 184         tmp->next = req;
 185 
 186 /* for SCSI devices, call request_fn unconditionally */
 187         if (scsi_major(MAJOR(req->dev)))
 188                 (dev->request_fn)();
 189 
 190         sti();
 191 }
 192 
 193 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         unsigned int sector, count;
 196         struct request * req;
 197         int rw_ahead, max_req;
 198 
 199 /* WRITEA/READA is special case - it is not really needed, so if the */
 200 /* buffer is locked, we just forget about it, else it's a normal read */
 201         rw_ahead = (rw == READA || rw == WRITEA);
 202         if (rw_ahead) {
 203                 if (bh->b_lock)
 204                         return;
 205                 if (rw == READA)
 206                         rw = READ;
 207                 else
 208                         rw = WRITE;
 209         }
 210         if (rw!=READ && rw!=WRITE) {
 211                 printk("Bad block dev command, must be R/W/RA/WA\n");
 212                 return;
 213         }
 214         count = bh->b_size >> 9;
 215         sector = bh->b_blocknr * count;
 216         if (blk_size[major])
 217                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 218                         bh->b_dirt = bh->b_uptodate = 0;
 219                         bh->b_req = 0;
 220                         return;
 221                 }
 222         lock_buffer(bh);
 223         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 224                 unlock_buffer(bh);
 225                 return;
 226         }
 227 
 228 /* we don't allow the write-requests to fill up the queue completely:
 229  * we want some room for reads: they take precedence. The last third
 230  * of the requests are only for reads.
 231  */
 232         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 233 
 234 /* big loop: look for a free request. */
 235 
 236 repeat:
 237         cli();
 238 
 239 /* The scsi disk drivers completely remove the request from the queue when
 240  * they start processing an entry.  For this reason it is safe to continue
 241  * to add links to the top entry for scsi devices.
 242  */
 243         if ((major == HD_MAJOR
 244              || major == FLOPPY_MAJOR
 245              || major == SCSI_DISK_MAJOR
 246              || major == SCSI_CDROM_MAJOR)
 247             && (req = blk_dev[major].current_request))
 248         {
 249                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 250                         req = req->next;
 251                 while (req) {
 252                         if (req->dev == bh->b_dev &&
 253                             !req->sem &&
 254                             req->cmd == rw &&
 255                             req->sector + req->nr_sectors == sector &&
 256                             req->nr_sectors < 244)
 257                         {
 258                                 req->bhtail->b_reqnext = bh;
 259                                 req->bhtail = bh;
 260                                 req->nr_sectors += count;
 261                                 mark_buffer_clean(bh);
 262                                 sti();
 263                                 return;
 264                         }
 265 
 266                         if (req->dev == bh->b_dev &&
 267                             !req->sem &&
 268                             req->cmd == rw &&
 269                             req->sector - count == sector &&
 270                             req->nr_sectors < 244)
 271                         {
 272                                 req->nr_sectors += count;
 273                                 bh->b_reqnext = req->bh;
 274                                 req->buffer = bh->b_data;
 275                                 req->current_nr_sectors = count;
 276                                 req->sector = sector;
 277                                 mark_buffer_clean(bh);
 278                                 req->bh = bh;
 279                                 sti();
 280                                 return;
 281                         }    
 282 
 283                         req = req->next;
 284                 }
 285         }
 286 
 287 /* find an unused request. */
 288         req = get_request(max_req, bh->b_dev);
 289 
 290 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 291         if (! req) {
 292                 if (rw_ahead) {
 293                         sti();
 294                         unlock_buffer(bh);
 295                         return;
 296                 }
 297                 sleep_on(&wait_for_request);
 298                 sti();
 299                 goto repeat;
 300         }
 301 
 302 /* we found a request. */
 303         sti();
 304 
 305 /* fill up the request-info, and add it to the queue */
 306         req->cmd = rw;
 307         req->errors = 0;
 308         req->sector = sector;
 309         req->nr_sectors = count;
 310         req->current_nr_sectors = count;
 311         req->buffer = bh->b_data;
 312         req->sem = NULL;
 313         req->bh = bh;
 314         req->bhtail = bh;
 315         req->next = NULL;
 316         add_request(major+blk_dev,req);
 317 }
 318 
 319 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         struct request * req;
 322         unsigned int major = MAJOR(dev);
 323         struct semaphore sem = MUTEX_LOCKED;
 324 
 325         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 326                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 327                 return;
 328         }
 329         if (rw!=READ && rw!=WRITE)
 330                 panic("Bad block dev command, must be R/W");
 331         if (rw == WRITE && is_read_only(dev)) {
 332                 printk("Can't page to read-only device 0x%X\n",dev);
 333                 return;
 334         }
 335         cli();
 336         req = get_request_wait(NR_REQUEST, dev);
 337         sti();
 338 /* fill up the request-info, and add it to the queue */
 339         req->cmd = rw;
 340         req->errors = 0;
 341         req->sector = page<<3;
 342         req->nr_sectors = 8;
 343         req->current_nr_sectors = 8;
 344         req->buffer = buffer;
 345         req->sem = &sem;
 346         req->bh = NULL;
 347         req->next = NULL;
 348         add_request(major+blk_dev,req);
 349         down(&sem);
 350 }
 351 
 352 /* This function can be used to request a number of buffers from a block
 353    device. Currently the only restriction is that all buffers must belong to
 354    the same device */
 355 
 356 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         unsigned int major;
 359         struct request plug;
 360         int plugged;
 361         int correct_size;
 362         struct blk_dev_struct * dev;
 363         int i;
 364 
 365         /* Make sure that the first block contains something reasonable */
 366         while (!*bh) {
 367                 bh++;
 368                 if (--nr <= 0)
 369                         return;
 370         };
 371 
 372         dev = NULL;
 373         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 374                 dev = blk_dev + major;
 375         if (!dev || !dev->request_fn) {
 376                 printk(
 377         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 378                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 379                 goto sorry;
 380         }
 381 
 382         /* Determine correct block size for this device.  */
 383         correct_size = BLOCK_SIZE;
 384         if (blksize_size[major]) {
 385                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 386                 if (i)
 387                         correct_size = i;
 388         }
 389 
 390         /* Verify requested block sizes.  */
 391         for (i = 0; i < nr; i++) {
 392                 if (bh[i] && bh[i]->b_size != correct_size) {
 393                         printk(
 394                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 395                                correct_size, bh[i]->b_size);
 396                         goto sorry;
 397                 }
 398         }
 399 
 400         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 401                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 402                 goto sorry;
 403         }
 404 
 405         /* If there are no pending requests for this device, then we insert
 406            a dummy request for that device.  This will prevent the request
 407            from starting until we have shoved all of the blocks into the
 408            queue, and then we let it rip.  */
 409 
 410         plugged = 0;
 411         cli();
 412         if (!dev->current_request && nr > 1) {
 413                 dev->current_request = &plug;
 414                 plug.dev = -1;
 415                 plug.next = NULL;
 416                 plugged = 1;
 417         }
 418         sti();
 419         for (i = 0; i < nr; i++) {
 420                 if (bh[i]) {
 421                         bh[i]->b_req = 1;
 422                         make_request(major, rw, bh[i]);
 423                         if (rw == READ || rw == READA)
 424                                 kstat.pgpgin++;
 425                         else
 426                                 kstat.pgpgout++;
 427                 }
 428         }
 429         if (plugged) {
 430                 cli();
 431                 dev->current_request = plug.next;
 432                 (dev->request_fn)();
 433                 sti();
 434         }
 435         return;
 436 
 437       sorry:
 438         for (i = 0; i < nr; i++) {
 439                 if (bh[i])
 440                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 441         }
 442         return;
 443 }
 444 
 445 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 446 {
 447         int i;
 448         int buffersize;
 449         struct request * req;
 450         unsigned int major = MAJOR(dev);
 451         struct semaphore sem = MUTEX_LOCKED;
 452 
 453         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 454                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 455                 return;
 456         }
 457 
 458         if (rw!=READ && rw!=WRITE) {
 459                 printk("ll_rw_swap: bad block dev command, must be R/W");
 460                 return;
 461         }
 462         if (rw == WRITE && is_read_only(dev)) {
 463                 printk("Can't swap to read-only device 0x%X\n",dev);
 464                 return;
 465         }
 466         
 467         buffersize = PAGE_SIZE / nb;
 468 
 469         for (i=0; i<nb; i++, buf += buffersize)
 470         {
 471                 cli();
 472                 req = get_request_wait(NR_REQUEST, dev);
 473                 sti();
 474                 req->cmd = rw;
 475                 req->errors = 0;
 476                 req->sector = (b[i] * buffersize) >> 9;
 477                 req->nr_sectors = buffersize >> 9;
 478                 req->current_nr_sectors = buffersize >> 9;
 479                 req->buffer = buf;
 480                 req->sem = &sem;
 481                 req->bh = NULL;
 482                 req->next = NULL;
 483                 add_request(major+blk_dev,req);
 484                 down(&sem);
 485         }
 486 }
 487 
 488 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 489 {
 490         struct request * req;
 491 
 492         req = all_requests + NR_REQUEST;
 493         while (--req >= all_requests) {
 494                 req->dev = -1;
 495                 req->next = NULL;
 496         }
 497         memset(ro_bits,0,sizeof(ro_bits));
 498 #ifdef CONFIG_BLK_DEV_HD
 499         mem_start = hd_init(mem_start,mem_end);
 500 #endif
 501 #ifdef CONFIG_BLK_DEV_XD
 502         mem_start = xd_init(mem_start,mem_end);
 503 #endif
 504 #ifdef CONFIG_CDU31A
 505         mem_start = cdu31a_init(mem_start,mem_end);
 506 #endif
 507 #ifdef CONFIG_MCD
 508         mem_start = mcd_init(mem_start,mem_end);
 509 #endif
 510 #ifdef CONFIG_BLK_DEV_FD
 511         floppy_init();
 512 #else
 513         outb_p(0xc, 0x3f2);
 514 #endif
 515 #ifdef CONFIG_SBPCD
 516         mem_start = sbpcd_init(mem_start, mem_end);
 517 #endif CONFIG_SBPCD
 518         if (ramdisk_size)
 519                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 520         return mem_start;
 521 }

/* [previous][next][first][last][top][bottom][index][help] */