root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_request
  2. get_request_wait
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. ll_rw_swap_file
  10. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 
  19 #include <asm/system.h>
  20 
  21 #include "blk.h"
  22 
  23 /*
  24  * The request-struct contains all necessary data
  25  * to load a nr of sectors into memory
  26  */
  27 static struct request all_requests[NR_REQUEST];
  28 
  29 /*
  30  * used to wait on when there are no free requests
  31  */
  32 struct wait_queue * wait_for_request = NULL;
  33 
  34 /* This specifies how many sectors to read ahead on the disk.  */
  35 
  36 int read_ahead[MAX_BLKDEV] = {0, };
  37 
  38 /* blk_dev_struct is:
  39  *      do_request-address
  40  *      next-request
  41  */
  42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  43         { NULL, NULL },         /* no_dev */
  44         { NULL, NULL },         /* dev mem */
  45         { NULL, NULL },         /* dev fd */
  46         { NULL, NULL },         /* dev hd */
  47         { NULL, NULL },         /* dev ttyx */
  48         { NULL, NULL },         /* dev tty */
  49         { NULL, NULL },         /* dev lp */
  50         { NULL, NULL },         /* dev pipes */
  51         { NULL, NULL },         /* dev sd */
  52         { NULL, NULL }          /* dev st */
  53 };
  54 
  55 /*
  56  * blk_size contains the size of all block-devices in units of 1024 byte
  57  * sectors:
  58  *
  59  * blk_size[MAJOR][MINOR]
  60  *
  61  * if (!blk_size[MAJOR]) then no minor size checking is done.
  62  */
  63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  64 
  65 /*
  66  * blksize_size contains the size of all block-devices:
  67  *
  68  * blksize_size[MAJOR][MINOR]
  69  *
  70  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  71  */
  72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  73 
  74 /*
  75  * look for a free request in the first N entries.
  76  * NOTE: interrupts must be disabled on the way in, and will still
  77  *       be disabled on the way out.
  78  */
  79 static inline struct request * get_request(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         static struct request *prev_found = NULL, *prev_limit = NULL;
  82         register struct request *req, *limit;
  83 
  84         if (n <= 0)
  85                 panic("get_request(%d): impossible!\n", n);
  86 
  87         limit = all_requests + n;
  88         if (limit != prev_limit) {
  89                 prev_limit = limit;
  90                 prev_found = all_requests;
  91         }
  92         req = prev_found;
  93         for (;;) {
  94                 req = ((req > all_requests) ? req : limit) - 1;
  95                 if (req->dev < 0)
  96                         break;
  97                 if (req == prev_found)
  98                         return NULL;
  99         }
 100         prev_found = req;
 101         req->dev = dev;
 102         return req;
 103 }
 104 
 105 /*
 106  * wait until a free request in the first N entries is available.
 107  * NOTE: interrupts must be disabled on the way in, and will still
 108  *       be disabled on the way out.
 109  */
 110 static inline struct request * get_request_wait(int n, int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         register struct request *req;
 113 
 114         while ((req = get_request(n, dev)) == NULL)
 115                 sleep_on(&wait_for_request);
 116         return req;
 117 }
 118 
 119 /* RO fail safe mechanism */
 120 
 121 static long ro_bits[MAX_BLKDEV][8];
 122 
 123 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         int minor,major;
 126 
 127         major = MAJOR(dev);
 128         minor = MINOR(dev);
 129         if ( major == FLOPPY_MAJOR && floppy_is_wp( minor) ) return 1;
 130         if (major < 0 || major >= MAX_BLKDEV) return 0;
 131         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 132 }
 133 
 134 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         int minor,major;
 137 
 138         major = MAJOR(dev);
 139         minor = MINOR(dev);
 140         if (major < 0 || major >= MAX_BLKDEV) return;
 141         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 142         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 143 }
 144 
 145 /*
 146  * add-request adds a request to the linked list.
 147  * It disables interrupts so that it can muck with the
 148  * request-lists in peace.
 149  */
 150 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         struct request * tmp;
 153         short            disk_index;
 154 
 155         switch (MAJOR(req->dev)) {
 156                 case SCSI_DISK_MAJOR:   disk_index = (MINOR(req->dev) & 0x0070) >> 4;
 157                                         if (disk_index < 4)
 158                                                 kstat.dk_drive[disk_index]++;
 159                                         break;
 160                 case HD_MAJOR:
 161                 case XT_DISK_MAJOR:     disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
 162                                         if (disk_index < 4)
 163                                                 kstat.dk_drive[disk_index]++;
 164                                         break;
 165                 default:                break;
 166         }
 167 
 168         req->next = NULL;
 169         cli();
 170         if (req->bh)
 171                 mark_buffer_clean(req->bh);
 172         if (!(tmp = dev->current_request)) {
 173                 dev->current_request = req;
 174                 (dev->request_fn)();
 175                 sti();
 176                 return;
 177         }
 178         for ( ; tmp->next ; tmp = tmp->next) {
 179                 if ((IN_ORDER(tmp,req) ||
 180                     !IN_ORDER(tmp,tmp->next)) &&
 181                     IN_ORDER(req,tmp->next))
 182                         break;
 183         }
 184         req->next = tmp->next;
 185         tmp->next = req;
 186 
 187 /* for SCSI devices, call request_fn unconditionally */
 188         if (scsi_major(MAJOR(req->dev)))
 189                 (dev->request_fn)();
 190 
 191         sti();
 192 }
 193 
 194 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         unsigned int sector, count;
 197         struct request * req;
 198         int rw_ahead, max_req;
 199 
 200 /* WRITEA/READA is special case - it is not really needed, so if the */
 201 /* buffer is locked, we just forget about it, else it's a normal read */
 202         rw_ahead = (rw == READA || rw == WRITEA);
 203         if (rw_ahead) {
 204                 if (bh->b_lock)
 205                         return;
 206                 if (rw == READA)
 207                         rw = READ;
 208                 else
 209                         rw = WRITE;
 210         }
 211         if (rw!=READ && rw!=WRITE) {
 212                 printk("Bad block dev command, must be R/W/RA/WA\n");
 213                 return;
 214         }
 215         count = bh->b_size >> 9;
 216         sector = bh->b_blocknr * count;
 217         if (blk_size[major])
 218                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 219                         bh->b_dirt = bh->b_uptodate = 0;
 220                         bh->b_req = 0;
 221                         return;
 222                 }
 223         lock_buffer(bh);
 224         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 225                 unlock_buffer(bh);
 226                 return;
 227         }
 228 
 229 /* we don't allow the write-requests to fill up the queue completely:
 230  * we want some room for reads: they take precedence. The last third
 231  * of the requests are only for reads.
 232  */
 233         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 234 
 235 /* big loop: look for a free request. */
 236 
 237 repeat:
 238         cli();
 239 
 240 /* The scsi disk drivers completely remove the request from the queue when
 241  * they start processing an entry.  For this reason it is safe to continue
 242  * to add links to the top entry for scsi devices.
 243  */
 244         if ((major == HD_MAJOR
 245              || major == FLOPPY_MAJOR
 246              || major == SCSI_DISK_MAJOR
 247              || major == SCSI_CDROM_MAJOR)
 248             && (req = blk_dev[major].current_request))
 249         {
 250                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 251                         req = req->next;
 252                 while (req) {
 253                         if (req->dev == bh->b_dev &&
 254                             !req->sem &&
 255                             req->cmd == rw &&
 256                             req->sector + req->nr_sectors == sector &&
 257                             req->nr_sectors < 244)
 258                         {
 259                                 req->bhtail->b_reqnext = bh;
 260                                 req->bhtail = bh;
 261                                 req->nr_sectors += count;
 262                                 mark_buffer_clean(bh);
 263                                 sti();
 264                                 return;
 265                         }
 266 
 267                         if (req->dev == bh->b_dev &&
 268                             !req->sem &&
 269                             req->cmd == rw &&
 270                             req->sector - count == sector &&
 271                             req->nr_sectors < 244)
 272                         {
 273                                 req->nr_sectors += count;
 274                                 bh->b_reqnext = req->bh;
 275                                 req->buffer = bh->b_data;
 276                                 req->current_nr_sectors = count;
 277                                 req->sector = sector;
 278                                 mark_buffer_clean(bh);
 279                                 req->bh = bh;
 280                                 sti();
 281                                 return;
 282                         }    
 283 
 284                         req = req->next;
 285                 }
 286         }
 287 
 288 /* find an unused request. */
 289         req = get_request(max_req, bh->b_dev);
 290 
 291 /* if no request available: if rw_ahead, forget it; otherwise try again. */
 292         if (! req) {
 293                 if (rw_ahead) {
 294                         sti();
 295                         unlock_buffer(bh);
 296                         return;
 297                 }
 298                 sleep_on(&wait_for_request);
 299                 sti();
 300                 goto repeat;
 301         }
 302 
 303 /* we found a request. */
 304         sti();
 305 
 306 /* fill up the request-info, and add it to the queue */
 307         req->cmd = rw;
 308         req->errors = 0;
 309         req->sector = sector;
 310         req->nr_sectors = count;
 311         req->current_nr_sectors = count;
 312         req->buffer = bh->b_data;
 313         req->sem = NULL;
 314         req->bh = bh;
 315         req->bhtail = bh;
 316         req->next = NULL;
 317         add_request(major+blk_dev,req);
 318 }
 319 
 320 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322         struct request * req;
 323         unsigned int major = MAJOR(dev);
 324         struct semaphore sem = MUTEX_LOCKED;
 325 
 326         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 327                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 328                 return;
 329         }
 330         if (rw!=READ && rw!=WRITE)
 331                 panic("Bad block dev command, must be R/W");
 332         if (rw == WRITE && is_read_only(dev)) {
 333                 printk("Can't page to read-only device 0x%X\n",dev);
 334                 return;
 335         }
 336         cli();
 337         req = get_request_wait(NR_REQUEST, dev);
 338         sti();
 339 /* fill up the request-info, and add it to the queue */
 340         req->cmd = rw;
 341         req->errors = 0;
 342         req->sector = page<<3;
 343         req->nr_sectors = 8;
 344         req->current_nr_sectors = 8;
 345         req->buffer = buffer;
 346         req->sem = &sem;
 347         req->bh = NULL;
 348         req->next = NULL;
 349         add_request(major+blk_dev,req);
 350         down(&sem);
 351 }
 352 
 353 /* This function can be used to request a number of buffers from a block
 354    device. Currently the only restriction is that all buffers must belong to
 355    the same device */
 356 
 357 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 358 {
 359         unsigned int major;
 360         struct request plug;
 361         int plugged;
 362         int correct_size;
 363         struct blk_dev_struct * dev;
 364         int i;
 365 
 366         /* Make sure that the first block contains something reasonable */
 367         while (!*bh) {
 368                 bh++;
 369                 if (--nr <= 0)
 370                         return;
 371         };
 372 
 373         dev = NULL;
 374         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 375                 dev = blk_dev + major;
 376         if (!dev || !dev->request_fn) {
 377                 printk(
 378         "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
 379                        (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
 380                 goto sorry;
 381         }
 382 
 383         /* Determine correct block size for this device.  */
 384         correct_size = BLOCK_SIZE;
 385         if (blksize_size[major]) {
 386                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 387                 if (i)
 388                         correct_size = i;
 389         }
 390 
 391         /* Verify requested block sizes.  */
 392         for (i = 0; i < nr; i++) {
 393                 if (bh[i] && bh[i]->b_size != correct_size) {
 394                         printk(
 395                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 396                                correct_size, bh[i]->b_size);
 397                         goto sorry;
 398                 }
 399         }
 400 
 401         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 402                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 403                 goto sorry;
 404         }
 405 
 406         /* If there are no pending requests for this device, then we insert
 407            a dummy request for that device.  This will prevent the request
 408            from starting until we have shoved all of the blocks into the
 409            queue, and then we let it rip.  */
 410 
 411         plugged = 0;
 412         cli();
 413         if (!dev->current_request && nr > 1) {
 414                 dev->current_request = &plug;
 415                 plug.dev = -1;
 416                 plug.next = NULL;
 417                 plugged = 1;
 418         }
 419         sti();
 420         for (i = 0; i < nr; i++) {
 421                 if (bh[i]) {
 422                         bh[i]->b_req = 1;
 423                         make_request(major, rw, bh[i]);
 424                         if (rw == READ || rw == READA)
 425                                 kstat.pgpgin++;
 426                         else
 427                                 kstat.pgpgout++;
 428                 }
 429         }
 430         if (plugged) {
 431                 cli();
 432                 dev->current_request = plug.next;
 433                 (dev->request_fn)();
 434                 sti();
 435         }
 436         return;
 437 
 438       sorry:
 439         for (i = 0; i < nr; i++) {
 440                 if (bh[i])
 441                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 442         }
 443         return;
 444 }
 445 
 446 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 447 {
 448         int i;
 449         int buffersize;
 450         struct request * req;
 451         unsigned int major = MAJOR(dev);
 452         struct semaphore sem = MUTEX_LOCKED;
 453 
 454         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 455                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 456                 return;
 457         }
 458 
 459         if (rw!=READ && rw!=WRITE) {
 460                 printk("ll_rw_swap: bad block dev command, must be R/W");
 461                 return;
 462         }
 463         if (rw == WRITE && is_read_only(dev)) {
 464                 printk("Can't swap to read-only device 0x%X\n",dev);
 465                 return;
 466         }
 467         
 468         buffersize = PAGE_SIZE / nb;
 469 
 470         for (i=0; i<nb; i++, buf += buffersize)
 471         {
 472                 cli();
 473                 req = get_request_wait(NR_REQUEST, dev);
 474                 sti();
 475                 req->cmd = rw;
 476                 req->errors = 0;
 477                 req->sector = (b[i] * buffersize) >> 9;
 478                 req->nr_sectors = buffersize >> 9;
 479                 req->current_nr_sectors = buffersize >> 9;
 480                 req->buffer = buf;
 481                 req->sem = &sem;
 482                 req->bh = NULL;
 483                 req->next = NULL;
 484                 add_request(major+blk_dev,req);
 485                 down(&sem);
 486         }
 487 }
 488 
 489 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 490 {
 491         struct request * req;
 492 
 493         req = all_requests + NR_REQUEST;
 494         while (--req >= all_requests) {
 495                 req->dev = -1;
 496                 req->next = NULL;
 497         }
 498         memset(ro_bits,0,sizeof(ro_bits));
 499 #ifdef CONFIG_BLK_DEV_HD
 500         mem_start = hd_init(mem_start,mem_end);
 501 #endif
 502 #ifdef CONFIG_BLK_DEV_XD
 503         mem_start = xd_init(mem_start,mem_end);
 504 #endif
 505 #ifdef CONFIG_CDU31A
 506         mem_start = cdu31a_init(mem_start,mem_end);
 507 #endif
 508 #ifdef CONFIG_MCD
 509         mem_start = mcd_init(mem_start,mem_end);
 510 #endif
 511 #ifdef CONFIG_SBPCD
 512         mem_start = sbpcd_init(mem_start, mem_end);
 513 #endif CONFIG_SBPCD
 514         if (ramdisk_size)
 515                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 516         return mem_start;
 517 }

/* [previous][next][first][last][top][bottom][index][help] */