root/kernel/blk_drv/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_read_only
  2. set_device_ro
  3. add_request
  4. make_request
  5. ll_rw_page
  6. ll_rw_block
  7. ll_rw_swap_file
  8. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/config.h>
  15 #include <linux/locks.h>
  16 
  17 #include <asm/system.h>
  18 
  19 #include "blk.h"
  20 
  21 /*
  22  * The request-struct contains all necessary data
  23  * to load a nr of sectors into memory
  24  */
  25 struct request request[NR_REQUEST];
  26 
  27 /*
  28  * used to wait on when there are no free requests
  29  */
  30 struct wait_queue * wait_for_request = NULL;
  31 
  32 /* This specifies how many sectors to read ahead on the disk.  */
  33 
  34 int read_ahead[MAX_BLKDEV] = {0, };
  35 
  36 /* blk_dev_struct is:
  37  *      do_request-address
  38  *      next-request
  39  */
  40 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  41         { NULL, NULL },         /* no_dev */
  42         { NULL, NULL },         /* dev mem */
  43         { NULL, NULL },         /* dev fd */
  44         { NULL, NULL },         /* dev hd */
  45         { NULL, NULL },         /* dev ttyx */
  46         { NULL, NULL },         /* dev tty */
  47         { NULL, NULL },         /* dev lp */
  48         { NULL, NULL },         /* dev pipes */
  49         { NULL, NULL },         /* dev sd */
  50         { NULL, NULL }          /* dev st */
  51 };
  52 
  53 /*
  54  * blk_size contains the size of all block-devices in units of 1024 byte
  55  * sectors:
  56  *
  57  * blk_size[MAJOR][MINOR]
  58  *
  59  * if (!blk_size[MAJOR]) then no minor size checking is done.
  60  */
  61 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  62 
  63 /*
  64  * blksize_size contains the size of all block-devices:
  65  *
  66  * blksize_size[MAJOR][MINOR]
  67  *
  68  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  69  */
  70 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  71 
  72 /* RO fail safe mechanism */
  73 
  74 static long ro_bits[MAX_BLKDEV][8];
  75 
  76 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         int minor,major;
  79 
  80         major = MAJOR(dev);
  81         minor = MINOR(dev);
  82         if (major < 0 || major >= MAX_BLKDEV) return 0;
  83         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
  84 }
  85 
  86 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         int minor,major;
  89 
  90         major = MAJOR(dev);
  91         minor = MINOR(dev);
  92         if (major < 0 || major >= MAX_BLKDEV) return;
  93         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
  94         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
  95 }
  96 
  97 /*
  98  * add-request adds a request to the linked list.
  99  * It disables interrupts so that it can muck with the
 100  * request-lists in peace.
 101  */
 102 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 103 {
 104         struct request * tmp;
 105 
 106         req->next = NULL;
 107         cli();
 108         if (req->bh)
 109                 req->bh->b_dirt = 0;
 110         if (!(tmp = dev->current_request)) {
 111                 dev->current_request = req;
 112                 (dev->request_fn)();
 113                 sti();
 114                 return;
 115         }
 116         for ( ; tmp->next ; tmp = tmp->next) {
 117                 if ((IN_ORDER(tmp,req) ||
 118                     !IN_ORDER(tmp,tmp->next)) &&
 119                     IN_ORDER(req,tmp->next))
 120                         break;
 121         }
 122         req->next = tmp->next;
 123         tmp->next = req;
 124 
 125 /* Scsi devices are treated differently */
 126         if(MAJOR(req->dev) == 8 || 
 127            MAJOR(req->dev) == 9 ||
 128            MAJOR(req->dev) == 11)
 129           (dev->request_fn)();
 130 
 131         sti();
 132 }
 133 
 134 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         unsigned int sector, count;
 137         struct request * req;
 138         int rw_ahead;
 139 
 140 /* WRITEA/READA is special case - it is not really needed, so if the */
 141 /* buffer is locked, we just forget about it, else it's a normal read */
 142         rw_ahead = (rw == READA || rw == WRITEA);
 143         if (rw_ahead) {
 144                 if (bh->b_lock)
 145                         return;
 146                 if (rw == READA)
 147                         rw = READ;
 148                 else
 149                         rw = WRITE;
 150         }
 151         if (rw!=READ && rw!=WRITE) {
 152                 printk("Bad block dev command, must be R/W/RA/WA\n");
 153                 return;
 154         }
 155         count = bh->b_size >> 9;
 156         sector = bh->b_blocknr * count;
 157         if (blk_size[major])
 158                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 159                         bh->b_dirt = bh->b_uptodate = 0;
 160                         return;
 161                 }
 162         lock_buffer(bh);
 163         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 164                 unlock_buffer(bh);
 165                 return;
 166         }
 167 /* The scsi disk drivers completely remove the request from the queue when
 168    they start processing an entry.  For this reason it is safe to continue
 169    to add links to the top entry for scsi devices */
 170 
 171 repeat:
 172         cli();
 173         if ((major == 3 ||  major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
 174                 if(major == 3) req = req->next;
 175                 while (req) {
 176                         if (req->dev == bh->b_dev &&
 177                             !req->waiting &&
 178                             req->cmd == rw &&
 179                             req->sector + req->nr_sectors == sector &&
 180                             req->nr_sectors < 254) {
 181                                 req->bhtail->b_reqnext = bh;
 182                                 req->bhtail = bh;
 183                                 req->nr_sectors += count;
 184                                 bh->b_dirt = 0;
 185                                 sti();
 186                                 return;
 187                               }
 188                         else if ( req->dev == bh->b_dev &&
 189                             !req->waiting &&
 190                             req->cmd == rw &&
 191                             req->sector - count == sector &&
 192                             req->nr_sectors < 254)
 193                                 {
 194                                 req->nr_sectors += count;
 195                                 bh->b_reqnext = req->bh;
 196                                 req->buffer = bh->b_data;
 197                                 req->current_nr_sectors = count;
 198                                 req->sector = sector;
 199                                 bh->b_dirt = 0;
 200                                 req->bh = bh;
 201                                 sti();
 202                                 return;
 203                             }    
 204                         req = req->next;
 205                       }
 206               }
 207 /* we don't allow the write-requests to fill up the queue completely:
 208  * we want some room for reads: they take precedence. The last third
 209  * of the requests are only for reads.
 210  */
 211         if (rw == READ)
 212                 req = request+NR_REQUEST;
 213         else
 214                 req = request+(NR_REQUEST/2);
 215 /* find an empty request */
 216         while (--req >= request)
 217                 if (req->dev < 0)
 218                         goto found;
 219 /* if none found, sleep on new requests: check for rw_ahead */
 220         if (rw_ahead) {
 221                 sti();
 222                 unlock_buffer(bh);
 223                 return;
 224         }
 225         sleep_on(&wait_for_request);
 226         sti();
 227         goto repeat;
 228 
 229 found:
 230 /* fill up the request-info, and add it to the queue */
 231         req->dev = bh->b_dev;
 232         sti();
 233         req->cmd = rw;
 234         req->errors = 0;
 235         req->sector = sector;
 236         req->nr_sectors = count;
 237         req->current_nr_sectors = count;
 238         req->buffer = bh->b_data;
 239         req->waiting = NULL;
 240         req->bh = bh;
 241         req->bhtail = bh;
 242         req->next = NULL;
 243         add_request(major+blk_dev,req);
 244 }
 245 
 246 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 247 {
 248         struct request * req;
 249         unsigned int major = MAJOR(dev);
 250 
 251         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 252                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 253                 return;
 254         }
 255         if (rw!=READ && rw!=WRITE)
 256                 panic("Bad block dev command, must be R/W");
 257         if (rw == WRITE && is_read_only(dev)) {
 258                 printk("Can't page to read-only device 0x%X\n",dev);
 259                 return;
 260         }
 261         cli();
 262 repeat:
 263         req = request+NR_REQUEST;
 264         while (--req >= request)
 265                 if (req->dev<0)
 266                         break;
 267         if (req < request) {
 268                 sleep_on(&wait_for_request);
 269                 goto repeat;
 270         }
 271         sti();
 272 /* fill up the request-info, and add it to the queue */
 273         req->dev = dev;
 274         req->cmd = rw;
 275         req->errors = 0;
 276         req->sector = page<<3;
 277         req->nr_sectors = 8;
 278         req->current_nr_sectors = 8;
 279         req->buffer = buffer;
 280         req->waiting = current;
 281         req->bh = NULL;
 282         req->next = NULL;
 283         current->state = TASK_SWAPPING;
 284         add_request(major+blk_dev,req);
 285         schedule();
 286 }
 287 
 288 /* This function can be used to request a number of buffers from a block
 289    device. Currently the only restriction is that all buffers must belong to
 290    the same device */
 291 
 292 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 293 {
 294         unsigned int major;
 295 
 296         struct request plug;
 297         int plugged;
 298         int correct_size;
 299         struct blk_dev_struct * dev;
 300         int i, j;
 301 
 302         /* Make sure that the first block contains something reasonable */
 303         while(!bh[0]){
 304           bh++;
 305           nr--;
 306           if (nr <= 0) return;
 307         };
 308 
 309         if ((major=MAJOR(bh[0]->b_dev)) >= MAX_BLKDEV ||
 310         !(blk_dev[major].request_fn)) {
 311                 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh[0]->b_dev,bh[0]->b_blocknr);
 312                 for (i=0;i<nr; i++)
 313                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 314                 return;
 315         }
 316 
 317         for(j=0;j<nr; j++){
 318           if(!bh[j]) continue;
 319           /* Determine correct block size for this device */
 320           correct_size = BLOCK_SIZE;
 321           if(blksize_size[major] && blksize_size[major][MINOR(bh[j]->b_dev)])
 322             correct_size = blksize_size[major][MINOR(bh[j]->b_dev)];
 323           
 324           if(bh[j]->b_size != correct_size) {
 325             
 326             printk("ll_rw_block: only %d-char blocks implemented (%d)\n",
 327                    correct_size, bh[j]->b_size);
 328             
 329             for (i=0;i<nr; i++)
 330               if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 331             return;
 332           }
 333         };
 334 
 335         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 336                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 337                 for (i=0;i<nr; i++)
 338                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 339                 return;
 340         }
 341 /* If there are no pending requests for this device, then we insert a dummy
 342    request for that device.  This will prevent the request from starting until
 343    we have shoved all of the blocks into the queue, and then we let it rip */
 344 
 345         plugged = 0;
 346         cli();
 347         if (!blk_dev[major].current_request && nr > 1) {
 348           blk_dev[major].current_request = &plug;
 349           plug.dev = -1;
 350           plug.next = NULL;
 351           plugged = 1;
 352         };
 353         sti();
 354         for (i=0;i<nr; i++)
 355           if (bh[i]) {
 356             bh[i]->b_req = 1;
 357             make_request(major, rw, bh[i]);
 358         }
 359         if(plugged){
 360           cli();
 361           blk_dev[major].current_request = plug.next;
 362           dev = major+blk_dev;
 363           (dev->request_fn)();
 364           sti();
 365         };
 366 }
 367 
 368 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 369 {
 370         int i;
 371         int buffersize;
 372         struct request * req;
 373         unsigned int major = MAJOR(dev);
 374 
 375         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 376                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 377                 return;
 378         }
 379 
 380         if (rw!=READ && rw!=WRITE) {
 381                 printk("ll_rw_swap: bad block dev command, must be R/W");
 382                 return;
 383         }
 384         if (rw == WRITE && is_read_only(dev)) {
 385                 printk("Can't swap to read-only device 0x%X\n",dev);
 386                 return;
 387         }
 388         
 389         buffersize = PAGE_SIZE / nb;
 390 
 391         for (i=0; i<nb; i++, buf += buffersize)
 392         {
 393 repeat:
 394                 req = request+NR_REQUEST;
 395                 while (--req >= request)
 396                         if (req->dev<0)
 397                                 break;
 398                 if (req < request) {
 399                         sleep_on(&wait_for_request);
 400                         goto repeat;
 401                 }
 402 
 403                 req->dev = dev;
 404                 req->cmd = rw;
 405                 req->errors = 0;
 406                 req->sector = (b[i] * buffersize) >> 9;
 407                 req->nr_sectors = buffersize >> 9;
 408                 req->current_nr_sectors = buffersize >> 9;
 409                 req->buffer = buf;
 410                 req->waiting = current;
 411                 req->bh = NULL;
 412                 req->next = NULL;
 413                 current->state = TASK_UNINTERRUPTIBLE;
 414                 add_request(major+blk_dev,req);
 415                 schedule();
 416         }
 417 }
 418 
 419 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 420 {
 421         int i;
 422 
 423         for (i=0 ; i<NR_REQUEST ; i++) {
 424                 request[i].dev = -1;
 425                 request[i].next = NULL;
 426         }
 427         memset(ro_bits,0,sizeof(ro_bits));
 428 #ifdef CONFIG_BLK_DEV_HD
 429         mem_start = hd_init(mem_start,mem_end);
 430 #endif
 431 #ifdef CONFIG_BLK_DEV_XD
 432         mem_start = xd_init(mem_start,mem_end);
 433 #endif
 434 #ifdef CONFIG_CDU31A
 435         mem_start = cdu31a_init(mem_start,mem_end);
 436 #endif
 437 #ifdef CONFIG_MCD
 438         mem_start = mcd_init(mem_start,mem_end);
 439 #endif
 440         if (ramdisk_size)
 441                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 442         return mem_start;
 443 }

/* [previous][next][first][last][top][bottom][index][help] */