root/kernel/blk_drv/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_read_only
  2. set_device_ro
  3. add_request
  4. make_request
  5. ll_rw_page
  6. ll_rw_block
  7. ll_rw_swap_file
  8. blk_dev_init

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/config.h>
  15 #include <linux/locks.h>
  16 
  17 #include <asm/system.h>
  18 
  19 #include "blk.h"
  20 
  21 /*
  22  * The request-struct contains all necessary data
  23  * to load a nr of sectors into memory
  24  */
  25 struct request request[NR_REQUEST];
  26 
  27 /*
  28  * used to wait on when there are no free requests
  29  */
  30 struct wait_queue * wait_for_request = NULL;
  31 
  32 /* This specifies how many sectors to read ahead on the disk.  */
  33 
  34 int read_ahead[NR_BLK_DEV] = {0, };
  35 
  36 /* blk_dev_struct is:
  37  *      do_request-address
  38  *      next-request
  39  */
  40 struct blk_dev_struct blk_dev[NR_BLK_DEV] = {
  41         { NULL, NULL },         /* no_dev */
  42         { NULL, NULL },         /* dev mem */
  43         { NULL, NULL },         /* dev fd */
  44         { NULL, NULL },         /* dev hd */
  45         { NULL, NULL },         /* dev ttyx */
  46         { NULL, NULL },         /* dev tty */
  47         { NULL, NULL },         /* dev lp */
  48         { NULL, NULL },         /* dev pipes */
  49         { NULL, NULL },         /* dev sd */
  50         { NULL, NULL }          /* dev st */
  51 };
  52 
  53 /*
  54  * blk_size contains the size of all block-devices:
  55  *
  56  * blk_size[MAJOR][MINOR]
  57  *
  58  * if (!blk_size[MAJOR]) then no minor size checking is done.
  59  */
  60 int * blk_size[NR_BLK_DEV] = { NULL, NULL, };
  61 
  62 /* RO fail safe mechanism */
  63 
  64 static long ro_bits[NR_BLK_DEV][8];
  65 
  66 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  67 {
  68         int minor,major;
  69 
  70         major = MAJOR(dev);
  71         minor = MINOR(dev);
  72         if (major < 0 || major >= NR_BLK_DEV) return 0;
  73         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
  74 }
  75 
  76 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         int minor,major;
  79 
  80         major = MAJOR(dev);
  81         minor = MINOR(dev);
  82         if (major < 0 || major >= NR_BLK_DEV) return;
  83         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
  84         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
  85 }
  86 
  87 /*
  88  * add-request adds a request to the linked list.
  89  * It disables interrupts so that it can muck with the
  90  * request-lists in peace.
  91  */
  92 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94         struct request * tmp;
  95 
  96         req->next = NULL;
  97         cli();
  98         if (req->bh)
  99                 req->bh->b_dirt = 0;
 100         if (!(tmp = dev->current_request)) {
 101                 dev->current_request = req;
 102                 (dev->request_fn)();
 103                 sti();
 104                 return;
 105         }
 106         for ( ; tmp->next ; tmp = tmp->next) {
 107                 if ((IN_ORDER(tmp,req) ||
 108                     !IN_ORDER(tmp,tmp->next)) &&
 109                     IN_ORDER(req,tmp->next))
 110                         break;
 111         }
 112         req->next = tmp->next;
 113         tmp->next = req;
 114 
 115 /* Scsi devices are treated differently */
 116         if(MAJOR(req->dev) == 8 || 
 117            MAJOR(req->dev) == 9 ||
 118            MAJOR(req->dev) == 11)
 119           (dev->request_fn)();
 120 
 121         sti();
 122 }
 123 
 124 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         unsigned int sector, count;
 127         struct request * req;
 128         int rw_ahead;
 129 
 130 /* WRITEA/READA is special case - it is not really needed, so if the */
 131 /* buffer is locked, we just forget about it, else it's a normal read */
 132         rw_ahead = (rw == READA || rw == WRITEA);
 133         if (rw_ahead) {
 134                 if (bh->b_lock)
 135                         return;
 136                 if (rw == READA)
 137                         rw = READ;
 138                 else
 139                         rw = WRITE;
 140         }
 141         if (rw!=READ && rw!=WRITE) {
 142                 printk("Bad block dev command, must be R/W/RA/WA\n");
 143                 return;
 144         }
 145         count = bh->b_size >> 9;
 146         sector = bh->b_blocknr * count;
 147         if (blk_size[major])
 148                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 149                         bh->b_dirt = bh->b_uptodate = 0;
 150                         return;
 151                 }
 152         lock_buffer(bh);
 153         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 154                 unlock_buffer(bh);
 155                 return;
 156         }
 157 /* The scsi disk drivers completely remove the request from the queue when
 158    they start processing an entry.  For this reason it is safe to continue
 159    to add links to the top entry for scsi devices */
 160 
 161 repeat:
 162         cli();
 163         if ((major == 3 ||  major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
 164                 if(major == 3) req = req->next;
 165                 while (req) {
 166                         if (req->dev == bh->b_dev &&
 167                             !req->waiting &&
 168                             req->cmd == rw &&
 169                             req->sector + req->nr_sectors == sector &&
 170                             req->nr_sectors < 254) {
 171                                 req->bhtail->b_reqnext = bh;
 172                                 req->bhtail = bh;
 173                                 req->nr_sectors += count;
 174                                 bh->b_dirt = 0;
 175                                 sti();
 176                                 return;
 177                               }
 178                         req = req->next;
 179                       }
 180               }
 181 /* we don't allow the write-requests to fill up the queue completely:
 182  * we want some room for reads: they take precedence. The last third
 183  * of the requests are only for reads.
 184  */
 185         if (rw == READ)
 186                 req = request+NR_REQUEST;
 187         else
 188                 req = request+(NR_REQUEST/2);
 189 /* find an empty request */
 190         while (--req >= request)
 191                 if (req->dev < 0)
 192                         goto found;
 193 /* if none found, sleep on new requests: check for rw_ahead */
 194         if (rw_ahead) {
 195                 sti();
 196                 unlock_buffer(bh);
 197                 return;
 198         }
 199         sleep_on(&wait_for_request);
 200         sti();
 201         goto repeat;
 202 
 203 found:
 204 /* fill up the request-info, and add it to the queue */
 205         req->dev = bh->b_dev;
 206         sti();
 207         req->cmd = rw;
 208         req->errors = 0;
 209         req->sector = sector;
 210         req->nr_sectors = count;
 211         req->current_nr_sectors = count;
 212         req->buffer = bh->b_data;
 213         req->waiting = NULL;
 214         req->bh = bh;
 215         req->bhtail = bh;
 216         req->next = NULL;
 217         add_request(major+blk_dev,req);
 218 }
 219 
 220 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         struct request * req;
 223         unsigned int major = MAJOR(dev);
 224 
 225         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 226                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 227                 return;
 228         }
 229         if (rw!=READ && rw!=WRITE)
 230                 panic("Bad block dev command, must be R/W");
 231         if (rw == WRITE && is_read_only(dev)) {
 232                 printk("Can't page to read-only device 0x%X\n\r",dev);
 233                 return;
 234         }
 235         cli();
 236 repeat:
 237         req = request+NR_REQUEST;
 238         while (--req >= request)
 239                 if (req->dev<0)
 240                         break;
 241         if (req < request) {
 242                 sleep_on(&wait_for_request);
 243                 goto repeat;
 244         }
 245         sti();
 246 /* fill up the request-info, and add it to the queue */
 247         req->dev = dev;
 248         req->cmd = rw;
 249         req->errors = 0;
 250         req->sector = page<<3;
 251         req->nr_sectors = 8;
 252         req->current_nr_sectors = 8;
 253         req->buffer = buffer;
 254         req->waiting = current;
 255         req->bh = NULL;
 256         req->next = NULL;
 257         current->state = TASK_SWAPPING;
 258         add_request(major+blk_dev,req);
 259         schedule();
 260 }
 261 
 262 /* This function can be used to request a number of buffers from a block
 263    device. Currently the only restriction is that all buffers must belong to
 264    the same device */
 265 
 266 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268         unsigned int major;
 269 
 270         struct request plug;
 271         int plugged;
 272         struct blk_dev_struct * dev;
 273         int i, j;
 274 
 275         /* Make sure that the first block contains something reasonable */
 276         while(!bh[0]){
 277           bh++;
 278           nr--;
 279           if (nr <= 0) return;
 280         };
 281 
 282         for(j=0;j<nr; j++){
 283           if(!bh[j]) continue;
 284           if (bh[j]->b_size != 1024) {
 285             printk("ll_rw_block: only 1024-char blocks implemented (%d)\n",bh[0]->b_size);
 286             for (i=0;i<nr; i++)
 287               if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 288             return;
 289           }
 290         };
 291 
 292         if ((major=MAJOR(bh[0]->b_dev)) >= NR_BLK_DEV ||
 293         !(blk_dev[major].request_fn)) {
 294                 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh[0]->b_dev,bh[0]->b_blocknr);
 295                 for (i=0;i<nr; i++)
 296                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 297                 return;
 298         }
 299         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 300                 printk("Can't write to read-only device 0x%X\n\r",bh[0]->b_dev);
 301                 for (i=0;i<nr; i++)
 302                   if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 303                 return;
 304         }
 305 /* If there are no pending requests for this device, then we insert a dummy
 306    request for that device.  This will prevent the request from starting until
 307    we have shoved all of the blocks into the queue, and then we let it rip */
 308 
 309         plugged = 0;
 310         cli();
 311         if (!blk_dev[major].current_request && nr > 1) {
 312           blk_dev[major].current_request = &plug;
 313           plug.dev = -1;
 314           plug.next = NULL;
 315           plugged = 1;
 316         };
 317         sti();
 318         for (i=0;i<nr; i++)
 319           if (bh[i]) make_request(major, rw, bh[i]);
 320         if(plugged){
 321           cli();
 322           blk_dev[major].current_request = plug.next;
 323           dev = major+blk_dev;
 324           (dev->request_fn)();
 325           sti();
 326         };
 327 }
 328 
 329 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 330 {
 331         int i;
 332         struct request * req;
 333         unsigned int major = MAJOR(dev);
 334 
 335         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 336                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n\r");
 337                 return;
 338         }
 339 
 340         if (rw!=READ && rw!=WRITE) {
 341                 printk("ll_rw_swap: bad block dev command, must be R/W");
 342                 return;
 343         }
 344         if (rw == WRITE && is_read_only(dev)) {
 345                 printk("Can't swap to read-only device 0x%X\n\r",dev);
 346                 return;
 347         }
 348         
 349         for (i=0; i<nb; i++, buf += BLOCK_SIZE)
 350         {
 351 repeat:
 352                 req = request+NR_REQUEST;
 353                 while (--req >= request)
 354                         if (req->dev<0)
 355                                 break;
 356                 if (req < request) {
 357                         sleep_on(&wait_for_request);
 358                         goto repeat;
 359                 }
 360 
 361                 req->dev = dev;
 362                 req->cmd = rw;
 363                 req->errors = 0;
 364                 req->sector = b[i] << 1;
 365                 req->nr_sectors = 2;
 366                 req->current_nr_sectors = 2;
 367                 req->buffer = buf;
 368                 req->waiting = current;
 369                 req->bh = NULL;
 370                 req->next = NULL;
 371                 current->state = TASK_UNINTERRUPTIBLE;
 372                 add_request(major+blk_dev,req);
 373                 schedule();
 374         }
 375 }
 376 
 377 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 378 {
 379         int i;
 380 
 381         for (i=0 ; i<NR_REQUEST ; i++) {
 382                 request[i].dev = -1;
 383                 request[i].next = NULL;
 384         }
 385         memset(ro_bits,0,sizeof(ro_bits));
 386 #ifdef CONFIG_BLK_DEV_HD
 387         mem_start = hd_init(mem_start,mem_end);
 388 #endif
 389         if (ramdisk_size)
 390                 mem_start += rd_init(mem_start, ramdisk_size*1024);
 391         return mem_start;
 392 }

/* [previous][next][first][last][top][bottom][index][help] */