root/kernel/blk_drv/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lock_buffer
  2. unlock_buffer
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. blk_dev_init
  10. ll_rw_swap_file

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/config.h>
  15 
  16 #include <asm/system.h>
  17 
  18 #include "blk.h"
  19 
  20 extern long rd_init(long mem_start, int length);
  21 
  22 /*
  23  * The request-struct contains all necessary data
  24  * to load a nr of sectors into memory
  25  */
  26 struct request request[NR_REQUEST];
  27 
  28 /*
  29  * used to wait on when there are no free requests
  30  */
  31 struct wait_queue * wait_for_request = NULL;
  32 
  33 /* blk_dev_struct is:
  34  *      do_request-address
  35  *      next-request
  36  */
  37 struct blk_dev_struct blk_dev[NR_BLK_DEV] = {
  38         { NULL, NULL },         /* no_dev */
  39         { NULL, NULL },         /* dev mem */
  40         { NULL, NULL },         /* dev fd */
  41         { NULL, NULL },         /* dev hd */
  42         { NULL, NULL },         /* dev ttyx */
  43         { NULL, NULL },         /* dev tty */
  44         { NULL, NULL },         /* dev lp */
  45         { NULL, NULL },         /* dev pipes */
  46         { NULL, NULL },         /* dev sd */
  47         { NULL, NULL }          /* dev st */
  48 };
  49 
  50 /*
  51  * blk_size contains the size of all block-devices:
  52  *
  53  * blk_size[MAJOR][MINOR]
  54  *
  55  * if (!blk_size[MAJOR]) then no minor size checking is done.
  56  */
  57 int * blk_size[NR_BLK_DEV] = { NULL, NULL, };
  58 
  59 static inline void lock_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         cli();
  62         while (bh->b_lock)
  63                 sleep_on(&bh->b_wait);
  64         bh->b_lock=1;
  65         sti();
  66 }
  67 
  68 static inline void unlock_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         if (!bh->b_lock)
  71                 printk("ll_rw_block.c: buffer not locked\n\r");
  72         bh->b_lock = 0;
  73         wake_up(&bh->b_wait);
  74 }
  75 
  76 /* RO fail safe mechanism */
  77 
  78 static long ro_bits[NR_BLK_DEV][8];
  79 
  80 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  81 {
  82         int minor,major;
  83 
  84         major = MAJOR(dev);
  85         minor = MINOR(dev);
  86         if (major < 0 || major >= NR_BLK_DEV) return 0;
  87         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
  88 }
  89 
  90 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         int minor,major;
  93 
  94         major = MAJOR(dev);
  95         minor = MINOR(dev);
  96         if (major < 0 || major >= NR_BLK_DEV) return;
  97         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
  98         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
  99 }
 100 
 101 /*
 102  * add-request adds a request to the linked list.
 103  * It disables interrupts so that it can muck with the
 104  * request-lists in peace.
 105  */
 106 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {
 108         struct request * tmp;
 109 
 110         req->next = NULL;
 111         cli();
 112         if (req->bh)
 113                 req->bh->b_dirt = 0;
 114         if (!(tmp = dev->current_request)) {
 115                 dev->current_request = req;
 116                 (dev->request_fn)();
 117                 sti();
 118                 return;
 119         }
 120         for ( ; tmp->next ; tmp = tmp->next) {
 121                 if ((IN_ORDER(tmp,req) ||
 122                     !IN_ORDER(tmp,tmp->next)) &&
 123                     IN_ORDER(req,tmp->next))
 124                         break;
 125         }
 126         req->next = tmp->next;
 127         tmp->next = req;
 128         sti();
 129 }
 130 
 131 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         unsigned int sector, count;
 134         struct request * req;
 135         int rw_ahead;
 136 
 137 /* WRITEA/READA is special case - it is not really needed, so if the */
 138 /* buffer is locked, we just forget about it, else it's a normal read */
 139         if (rw_ahead = (rw == READA || rw == WRITEA)) {
 140                 if (bh->b_lock)
 141                         return;
 142                 if (rw == READA)
 143                         rw = READ;
 144                 else
 145                         rw = WRITE;
 146         }
 147         if (rw!=READ && rw!=WRITE) {
 148                 printk("Bad block dev command, must be R/W/RA/WA\n");
 149                 return;
 150         }
 151         count = bh->b_size >> 9;
 152         sector = bh->b_blocknr * count;
 153         if (blk_size[major])
 154                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 155                         bh->b_dirt = bh->b_uptodate = 0;
 156                         return;
 157                 }
 158         lock_buffer(bh);
 159         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 160                 unlock_buffer(bh);
 161                 return;
 162         }
 163 repeat:
 164         cli();
 165         if ((major == 3 ||  major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
 166                 while (req = req->next) {
 167                         if (req->dev == bh->b_dev &&
 168                             !req->waiting &&
 169                             req->cmd == rw &&
 170                             req->sector + req->nr_sectors == sector &&
 171                             req->nr_sectors < 254) {
 172                                 req->bhtail->b_reqnext = bh;
 173                                 req->bhtail = bh;
 174                                 req->nr_sectors += count;
 175                                 bh->b_dirt = 0;
 176                                 sti();
 177                                 return;
 178                         }
 179                 }
 180         }
 181 /* we don't allow the write-requests to fill up the queue completely:
 182  * we want some room for reads: they take precedence. The last third
 183  * of the requests are only for reads.
 184  */
 185         if (rw == READ)
 186                 req = request+NR_REQUEST;
 187         else
 188                 req = request+(NR_REQUEST/2);
 189 /* find an empty request */
 190         while (--req >= request)
 191                 if (req->dev < 0)
 192                         goto found;
 193 /* if none found, sleep on new requests: check for rw_ahead */
 194         if (rw_ahead) {
 195                 sti();
 196                 unlock_buffer(bh);
 197                 return;
 198         }
 199         sleep_on(&wait_for_request);
 200         sti();
 201         goto repeat;
 202 
 203 found:
 204 /* fill up the request-info, and add it to the queue */
 205         req->dev = bh->b_dev;
 206         sti();
 207         req->cmd = rw;
 208         req->errors = 0;
 209         req->sector = sector;
 210         req->nr_sectors = count;
 211         req->current_nr_sectors = count;
 212         req->buffer = bh->b_data;
 213         req->waiting = NULL;
 214         req->bh = bh;
 215         req->bhtail = bh;
 216         req->next = NULL;
 217         add_request(major+blk_dev,req);
 218 }
 219 
 220 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         struct request * req;
 223         unsigned int major = MAJOR(dev);
 224 
 225         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 226                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 227                 return;
 228         }
 229         if (rw!=READ && rw!=WRITE)
 230                 panic("Bad block dev command, must be R/W");
 231         if (rw == WRITE && is_read_only(dev)) {
 232                 printk("Can't page to read-only device 0x%X\n\r",dev);
 233                 return;
 234         }
 235         cli();
 236 repeat:
 237         req = request+NR_REQUEST;
 238         while (--req >= request)
 239                 if (req->dev<0)
 240                         break;
 241         if (req < request) {
 242                 sleep_on(&wait_for_request);
 243                 goto repeat;
 244         }
 245         sti();
 246 /* fill up the request-info, and add it to the queue */
 247         req->dev = dev;
 248         req->cmd = rw;
 249         req->errors = 0;
 250         req->sector = page<<3;
 251         req->nr_sectors = 8;
 252         req->current_nr_sectors = 8;
 253         req->buffer = buffer;
 254         req->waiting = &current->wait;
 255         req->bh = NULL;
 256         req->next = NULL;
 257         current->state = TASK_UNINTERRUPTIBLE;
 258         add_request(major+blk_dev,req);
 259         schedule();
 260 }
 261 
 262 void ll_rw_block(int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 263 {
 264         unsigned int major;
 265 
 266         if (!bh)
 267                 return;
 268         if (bh->b_size != 1024) {
 269                 printk("ll_rw_block: only 1024-char blocks implemented (%d)\n",bh->b_size);
 270                 bh->b_dirt = bh->b_uptodate = 0;
 271                 return;
 272         }
 273         if ((major=MAJOR(bh->b_dev)) >= NR_BLK_DEV ||
 274         !(blk_dev[major].request_fn)) {
 275                 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh->b_dev,bh->b_blocknr);
 276                 bh->b_dirt = bh->b_uptodate = 0;
 277                 return;
 278         }
 279         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh->b_dev)) {
 280                 printk("Can't write to read-only device 0x%X\n\r",bh->b_dev);
 281                 bh->b_dirt = bh->b_uptodate = 0;
 282                 return;
 283         }
 284         make_request(major,rw,bh);
 285 }
 286 
 287 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 {
 289         int i;
 290 
 291         for (i=0 ; i<NR_REQUEST ; i++) {
 292                 request[i].dev = -1;
 293                 request[i].next = NULL;
 294         }
 295         memset(ro_bits,0,sizeof(ro_bits));
 296 #ifdef CONFIG_BLK_DEV_HD
 297         mem_start = hd_init(mem_start,mem_end);
 298 #endif
 299 #ifdef RAMDISK
 300         mem_start += rd_init(mem_start, RAMDISK*1024);
 301 #endif
 302         return mem_start;
 303 }
 304 
 305 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         int i;
 308         struct request * req;
 309         unsigned int major = MAJOR(dev);
 310 
 311         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 312                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n\r");
 313                 return;
 314         }
 315 
 316         if (rw!=READ && rw!=WRITE) {
 317                 printk("ll_rw_swap: bad block dev command, must be R/W");
 318                 return;
 319         }
 320         if (rw == WRITE && is_read_only(dev)) {
 321                 printk("Can't swap to read-only device 0x%X\n\r",dev);
 322                 return;
 323         }
 324         
 325         for (i=0; i<nb; i++, buf += BLOCK_SIZE)
 326         {
 327 repeat:
 328                 req = request+NR_REQUEST;
 329                 while (--req >= request)
 330                         if (req->dev<0)
 331                                 break;
 332                 if (req < request) {
 333                         sleep_on(&wait_for_request);
 334                         goto repeat;
 335                 }
 336 
 337                 req->dev = dev;
 338                 req->cmd = rw;
 339                 req->errors = 0;
 340                 req->sector = b[i] << 1;
 341                 req->nr_sectors = 2;
 342                 req->current_nr_sectors = 2;
 343                 req->buffer = buf;
 344                 req->waiting = &current->wait;
 345                 req->bh = NULL;
 346                 req->next = NULL;
 347                 current->state = TASK_UNINTERRUPTIBLE;
 348                 add_request(major+blk_dev,req);
 349                 schedule();
 350         }
 351 }

/* [previous][next][first][last][top][bottom][index][help] */