root/kernel/blk_drv/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_read_only
  2. set_device_ro
  3. add_request
  4. make_request
  5. ll_rw_page
  6. ll_rw_block
  7. ll_rw_swap_file
  8. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/config.h>
  15 #include <linux/locks.h>
  16 
  17 #include <asm/system.h>
  18 
  19 #include "blk.h"
  20 
  21 /*
  22  * The request-struct contains all necessary data
  23  * to load a nr of sectors into memory
  24  */
  25 struct request request[NR_REQUEST];
  26 
  27 /*
  28  * used to wait on when there are no free requests
  29  */
  30 struct wait_queue * wait_for_request = NULL;
  31 
  32 /* This specifies how many sectors to read ahead on the disk.  */
  33 
  34 int read_ahead[MAX_BLKDEV] = {0, };
  35 
  36 /* blk_dev_struct is:
  37  *      do_request-address
  38  *      next-request
  39  */
  40 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  41         { NULL, NULL },         /* no_dev */
  42         { NULL, NULL },         /* dev mem */
  43         { NULL, NULL },         /* dev fd */
  44         { NULL, NULL },         /* dev hd */
  45         { NULL, NULL },         /* dev ttyx */
  46         { NULL, NULL },         /* dev tty */
  47         { NULL, NULL },         /* dev lp */
  48         { NULL, NULL },         /* dev pipes */
  49         { NULL, NULL },         /* dev sd */
  50         { NULL, NULL }          /* dev st */
  51 };
  52 
  53 /*
  54  * blk_size contains the size of all block-devices in units of 1024 byte
  55  * sectors:
  56  *
  57  * blk_size[MAJOR][MINOR]
  58  *
  59  * if (!blk_size[MAJOR]) then no minor size checking is done.
  60  */
  61 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  62 
  63 /*
  64  * blksize_size contains the size of all block-devices:
  65  *
  66  * blksize_size[MAJOR][MINOR]
  67  *
  68  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  69  */
  70 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  71 
  72 /* RO fail safe mechanism */
  73 
  74 static long ro_bits[MAX_BLKDEV][8];
  75 
  76 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         int minor,major;
  79 
  80         major = MAJOR(dev);
  81         minor = MINOR(dev);
  82         if (major < 0 || major >= MAX_BLKDEV) return 0;
  83         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
  84 }
  85 
  86 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         int minor,major;
  89 
  90         major = MAJOR(dev);
  91         minor = MINOR(dev);
  92         if (major < 0 || major >= MAX_BLKDEV) return;
  93         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
  94         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
  95 }
  96 
  97 /*
  98  * add-request adds a request to the linked list.
  99  * It disables interrupts so that it can muck with the
 100  * request-lists in peace.
 101  */
 102 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 103 {
 104         struct request * tmp;
 105 
 106         req->next = NULL;
 107         cli();
 108         if (req->bh)
 109                 req->bh->b_dirt = 0;
 110         if (!(tmp = dev->current_request)) {
 111                 dev->current_request = req;
 112                 (dev->request_fn)();
 113                 sti();
 114                 return;
 115         }
 116         for ( ; tmp->next ; tmp = tmp->next) {
 117                 if ((IN_ORDER(tmp,req) ||
 118                     !IN_ORDER(tmp,tmp->next)) &&
 119                     IN_ORDER(req,tmp->next))
 120                         break;
 121         }
 122         req->next = tmp->next;
 123         tmp->next = req;
 124 
 125 /* Scsi devices are treated differently */
 126         if(MAJOR(req->dev) == 8 || 
 127            MAJOR(req->dev) == 9 ||
 128            MAJOR(req->dev) == 11)
 129           (dev->request_fn)();
 130 
 131         sti();
 132 }
 133 
 134 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         unsigned int sector, count;
 137         struct request * req;
 138         int rw_ahead;
 139 
 140 /* WRITEA/READA is special case - it is not really needed, so if the */
 141 /* buffer is locked, we just forget about it, else it's a normal read */
 142         rw_ahead = (rw == READA || rw == WRITEA);
 143         if (rw_ahead) {
 144                 if (bh->b_lock)
 145                         return;
 146                 if (rw == READA)
 147                         rw = READ;
 148                 else
 149                         rw = WRITE;
 150         }
 151         if (rw!=READ && rw!=WRITE) {
 152                 printk("Bad block dev command, must be R/W/RA/WA\n");
 153                 return;
 154         }
 155         count = bh->b_size >> 9;
 156         sector = bh->b_blocknr * count;
 157         if (blk_size[major])
 158                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 159                         bh->b_dirt = bh->b_uptodate = 0;
 160                         return;
 161                 }
 162         lock_buffer(bh);
 163         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 164                 unlock_buffer(bh);
 165                 return;
 166         }
 167 /* The scsi disk drivers completely remove the request from the queue when
 168    they start processing an entry.  For this reason it is safe to continue
 169    to add links to the top entry for scsi devices */
 170 
 171 repeat:
 172         cli();
 173         if ((major == 3 ||  major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
 174                 if(major == 3) req = req->next;
 175                 while (req) {
 176                         if (req->dev == bh->b_dev &&
 177                             !req->waiting &&
 178                             req->cmd == rw &&
 179                             req->sector + req->nr_sectors == sector &&
 180                             req->nr_sectors < 254) {
 181                                 req->bhtail->b_reqnext = bh;
 182                                 req->bhtail = bh;
 183                                 req->nr_sectors += count;
 184                                 bh->b_dirt = 0;
 185                                 sti();
 186                                 return;
 187                               }
 188                         req = req->next;
 189                       }
 190               }
 191 /* we don't allow the write-requests to fill up the queue completely:
 192  * we want some room for reads: they take precedence. The last third
 193  * of the requests are only for reads.
 194  */
 195         if (rw == READ)
 196                 req = request+NR_REQUEST;
 197         else
 198                 req = request+(NR_REQUEST/2);
 199 /* find an empty request */
 200         while (--req >= request)
 201                 if (req->dev < 0)
 202                         goto found;
 203 /* if none found, sleep on new requests: check for rw_ahead */
 204         if (rw_ahead) {
 205                 sti();
 206                 unlock_buffer(bh);
 207                 return;
 208         }
 209         sleep_on(&wait_for_request);
 210         sti();
 211         goto repeat;
 212 
 213 found:
 214 /* fill up the request-info, and add it to the queue */
 215         req->dev = bh->b_dev;
 216         sti();
 217         req->cmd = rw;
 218         req->errors = 0;
 219         req->sector = sector;
 220         req->nr_sectors = count;
 221         req->current_nr_sectors = count;
 222         req->buffer = bh->b_data;
 223         req->waiting = NULL;
 224         req->bh = bh;
 225         req->bhtail = bh;
 226         req->next = NULL;
 227         add_request(major+blk_dev,req);
 228 }
 229 
 230 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         struct request * req;
 233         unsigned int major = MAJOR(dev);
 234 
 235         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 236                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 237                 return;
 238         }
 239         if (rw!=READ && rw!=WRITE)
 240                 panic("Bad block dev command, must be R/W");
 241         if (rw == WRITE && is_read_only(dev)) {
 242                 printk("Can't page to read-only device 0x%X\n",dev);
 243                 return;
 244         }
 245         cli();
 246 repeat:
 247         req = request+NR_REQUEST;
 248         while (--req >= request)
 249                 if (req->dev<0)
 250                         break;
 251         if (req < request) {
 252                 sleep_on(&wait_for_request);
 253                 goto repeat;
 254         }
 255         sti();
 256 /* fill up the request-info, and add it to the queue */
 257         req->dev = dev;
 258         req->cmd = rw;
 259         req->errors = 0;
 260         req->sector = page<<3;
 261         req->nr_sectors = 8;
 262         req->current_nr_sectors = 8;
 263         req->buffer = buffer;
 264         req->waiting = current;
 265         req->bh = NULL;
 266         req->next = NULL;
 267         current->state = TASK_SWAPPING;
 268         add_request(major+blk_dev,req);
 269         schedule();
 270 }
 271 
 272 /* This function can be used to request a number of buffers from a block
 273    device. Currently the only restriction is that all buffers must belong to
 274    the same device */
 275 
 276 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 277 {
 278         unsigned int major;
 279 
 280         struct request plug;
 281         int plugged;
 282         int correct_size;
 283         struct blk_dev_struct * dev;
 284         int i, j;
 285 
 286         /* Make sure that the first block contains something reasonable */
 287         while(!bh[0]){
 288           bh++;
 289           nr--;
 290           if (nr <= 0) return;
 291         };
 292 
 293         if ((major=MAJOR(bh[0]->b_dev)) >= MAX_BLKDEV ||
 294         !(blk_dev[major].request_fn)) {
 295                 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh[0]->b_dev,bh[0]->b_blocknr);
 296                 for (i=0;i<nr; i++)
 297                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 298                 return;
 299         }
 300 
 301         for(j=0;j<nr; j++){
 302           if(!bh[j]) continue;
 303           /* Determine correct block size for this device */
 304           correct_size = BLOCK_SIZE;
 305           if(blksize_size[major] && blksize_size[major][MINOR(bh[j]->b_dev)])
 306             correct_size = blksize_size[major][MINOR(bh[j]->b_dev)];
 307           
 308           if(bh[j]->b_size != correct_size) {
 309             
 310             printk("ll_rw_block: only %d-char blocks implemented (%d)\n",
 311                    correct_size, bh[j]->b_size);
 312             
 313             for (i=0;i<nr; i++)
 314               if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 315             return;
 316           }
 317         };
 318 
 319         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 320                 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
 321                 for (i=0;i<nr; i++)
 322                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 323                 return;
 324         }
 325 /* If there are no pending requests for this device, then we insert a dummy
 326    request for that device.  This will prevent the request from starting until
 327    we have shoved all of the blocks into the queue, and then we let it rip */
 328 
 329         plugged = 0;
 330         cli();
 331         if (!blk_dev[major].current_request && nr > 1) {
 332           blk_dev[major].current_request = &plug;
 333           plug.dev = -1;
 334           plug.next = NULL;
 335           plugged = 1;
 336         };
 337         sti();
 338         for (i=0;i<nr; i++)
 339           if (bh[i]) {
 340             bh[i]->b_req = 1;
 341             make_request(major, rw, bh[i]);
 342         }
 343         if(plugged){
 344           cli();
 345           blk_dev[major].current_request = plug.next;
 346           dev = major+blk_dev;
 347           (dev->request_fn)();
 348           sti();
 349         };
 350 }
 351 
 352 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 353 {
 354         int i;
 355         int buffersize;
 356         struct request * req;
 357         unsigned int major = MAJOR(dev);
 358 
 359         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 360                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 361                 return;
 362         }
 363 
 364         if (rw!=READ && rw!=WRITE) {
 365                 printk("ll_rw_swap: bad block dev command, must be R/W");
 366                 return;
 367         }
 368         if (rw == WRITE && is_read_only(dev)) {
 369                 printk("Can't swap to read-only device 0x%X\n",dev);
 370                 return;
 371         }
 372         
 373         buffersize = PAGE_SIZE / nb;
 374 
 375         for (i=0; i<nb; i++, buf += buffersize)
 376         {
 377 repeat:
 378                 req = request+NR_REQUEST;
 379                 while (--req >= request)
 380                         if (req->dev<0)
 381                                 break;
 382                 if (req < request) {
 383                         sleep_on(&wait_for_request);
 384                         goto repeat;
 385                 }
 386 
 387                 req->dev = dev;
 388                 req->cmd = rw;
 389                 req->errors = 0;
 390                 req->sector = (b[i] * buffersize) >> 9;
 391                 req->nr_sectors = buffersize >> 9;
 392                 req->current_nr_sectors = buffersize >> 9;
 393                 req->buffer = buf;
 394                 req->waiting = current;
 395                 req->bh = NULL;
 396                 req->next = NULL;
 397                 current->state = TASK_UNINTERRUPTIBLE;
 398                 add_request(major+blk_dev,req);
 399                 schedule();
 400         }
 401 }
 402 
 403 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         int i;
 406 
 407         for (i=0 ; i<NR_REQUEST ; i++) {
 408                 request[i].dev = -1;
 409                 request[i].next = NULL;
 410         }
 411         memset(ro_bits,0,sizeof(ro_bits));
 412 #ifdef CONFIG_BLK_DEV_HD
 413         mem_start = hd_init(mem_start,mem_end);
 414 #endif
 415 #ifdef CONFIG_BLK_DEV_XD
 416         mem_start = xd_init(mem_start,mem_end);
 417 #endif
 418         if (ramdisk_size)
 419                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 420         return mem_start;
 421 }

/* [previous][next][first][last][top][bottom][index][help] */