root/kernel/blk_drv/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lock_buffer
  2. unlock_buffer
  3. is_read_only
  4. set_device_ro
  5. add_request
  6. make_request
  7. ll_rw_page
  8. ll_rw_block
  9. blk_dev_init
  10. ll_rw_swap_file

   1 /*
   2  *  linux/kernel/blk_dev/ll_rw.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This handles all read/write requests to block devices
   9  */
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 
  15 #include <asm/system.h>
  16 
  17 #include "blk.h"
  18 
  19 extern long rd_init(long mem_start, int length);
  20 
  21 /*
  22  * The request-struct contains all necessary data
  23  * to load a nr of sectors into memory
  24  */
  25 struct request request[NR_REQUEST];
  26 
  27 /*
  28  * used to wait on when there are no free requests
  29  */
  30 struct wait_queue * wait_for_request = NULL;
  31 
  32 /* blk_dev_struct is:
  33  *      do_request-address
  34  *      next-request
  35  */
  36 struct blk_dev_struct blk_dev[NR_BLK_DEV] = {
  37         { NULL, NULL },         /* no_dev */
  38         { NULL, NULL },         /* dev mem */
  39         { NULL, NULL },         /* dev fd */
  40         { NULL, NULL },         /* dev hd */
  41         { NULL, NULL },         /* dev ttyx */
  42         { NULL, NULL },         /* dev tty */
  43         { NULL, NULL },         /* dev lp */
  44         { NULL, NULL },         /* dev pipes */
  45         { NULL, NULL },         /* dev sd */
  46         { NULL, NULL }          /* dev st */
  47 };
  48 
  49 /*
  50  * blk_size contains the size of all block-devices:
  51  *
  52  * blk_size[MAJOR][MINOR]
  53  *
  54  * if (!blk_size[MAJOR]) then no minor size checking is done.
  55  */
  56 int * blk_size[NR_BLK_DEV] = { NULL, NULL, };
  57 
  58 static inline void lock_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         cli();
  61         while (bh->b_lock)
  62                 sleep_on(&bh->b_wait);
  63         bh->b_lock=1;
  64         sti();
  65 }
  66 
  67 static inline void unlock_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  68 {
  69         if (!bh->b_lock)
  70                 printk("ll_rw_block.c: buffer not locked\n\r");
  71         bh->b_lock = 0;
  72         wake_up(&bh->b_wait);
  73 }
  74 
  75 /* RO fail safe mechanism */
  76 
  77 static long ro_bits[NR_BLK_DEV][8];
  78 
  79 int is_read_only(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         int minor,major;
  82 
  83         major = MAJOR(dev);
  84         minor = MINOR(dev);
  85         if (major < 0 || major >= NR_BLK_DEV) return 0;
  86         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
  87 }
  88 
  89 void set_device_ro(int dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
  90 {
  91         int minor,major;
  92 
  93         major = MAJOR(dev);
  94         minor = MINOR(dev);
  95         if (major < 0 || major >= NR_BLK_DEV) return;
  96         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
  97         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
  98 }
  99 
 100 /*
 101  * add-request adds a request to the linked list.
 102  * It disables interrupts so that it can muck with the
 103  * request-lists in peace.
 104  *
 105  * Note that swapping requests always go before other requests,
 106  * and are done in the order they appear.
 107  */
 108 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 109 {
 110         struct request * tmp;
 111 
 112         req->next = NULL;
 113         cli();
 114         if (req->bh)
 115                 req->bh->b_dirt = 0;
 116         if (!(tmp = dev->current_request)) {
 117                 dev->current_request = req;
 118                 (dev->request_fn)();
 119                 sti();
 120                 return;
 121         }
 122         for ( ; tmp->next ; tmp = tmp->next) {
 123                 if (!req->bh)
 124                         if (tmp->next->bh)
 125                                 break;
 126                         else
 127                                 continue;
 128                 if ((IN_ORDER(tmp,req) ||
 129                     !IN_ORDER(tmp,tmp->next)) &&
 130                     IN_ORDER(req,tmp->next))
 131                         break;
 132         }
 133         req->next = tmp->next;
 134         tmp->next = req;
 135         sti();
 136 }
 137 
 138 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         unsigned int sector, count;
 141         struct request * req;
 142         int rw_ahead;
 143 
 144 /* WRITEA/READA is special case - it is not really needed, so if the */
 145 /* buffer is locked, we just forget about it, else it's a normal read */
 146         if (rw_ahead = (rw == READA || rw == WRITEA)) {
 147                 if (bh->b_lock)
 148                         return;
 149                 if (rw == READA)
 150                         rw = READ;
 151                 else
 152                         rw = WRITE;
 153         }
 154         if (rw!=READ && rw!=WRITE) {
 155                 printk("Bad block dev command, must be R/W/RA/WA\n");
 156                 return;
 157         }
 158         count = bh->b_size >> 9;
 159         sector = bh->b_blocknr * count;
 160         if (blk_size[major])
 161                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 162                         bh->b_dirt = bh->b_uptodate = 0;
 163                         return;
 164                 }
 165         lock_buffer(bh);
 166         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 167                 unlock_buffer(bh);
 168                 return;
 169         }
 170 repeat:
 171         cli();
 172         if ((major == 3 ||  major == 8 )&& (req = blk_dev[major].current_request)) {
 173                 while (req = req->next) {
 174                         if (req->dev == bh->b_dev &&
 175                             !req->waiting &&
 176                             req->cmd == rw &&
 177                             req->sector + req->nr_sectors == sector &&
 178                             req->nr_sectors < 254) {
 179                                 req->bhtail->b_reqnext = bh;
 180                                 req->bhtail = bh;
 181                                 req->nr_sectors += count;
 182                                 bh->b_dirt = 0;
 183                                 sti();
 184                                 return;
 185                         }
 186                 }
 187         }
 188 /* we don't allow the write-requests to fill up the queue completely:
 189  * we want some room for reads: they take precedence. The last third
 190  * of the requests are only for reads.
 191  */
 192         if (rw == READ)
 193                 req = request+NR_REQUEST;
 194         else
 195                 req = request+(NR_REQUEST/2);
 196 /* find an empty request */
 197         while (--req >= request)
 198                 if (req->dev < 0)
 199                         goto found;
 200 /* if none found, sleep on new requests: check for rw_ahead */
 201         if (rw_ahead) {
 202                 sti();
 203                 unlock_buffer(bh);
 204                 return;
 205         }
 206         sleep_on(&wait_for_request);
 207         sti();
 208         goto repeat;
 209 
 210 found:  sti();
 211 /* fill up the request-info, and add it to the queue */
 212         req->dev = bh->b_dev;
 213         req->cmd = rw;
 214         req->errors = 0;
 215         req->sector = sector;
 216         req->nr_sectors = count;
 217         req->current_nr_sectors = count;
 218         req->buffer = bh->b_data;
 219         req->waiting = NULL;
 220         req->bh = bh;
 221         req->bhtail = bh;
 222         req->next = NULL;
 223         add_request(major+blk_dev,req);
 224 }
 225 
 226 void ll_rw_page(int rw, int dev, int page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 227 {
 228         struct request * req;
 229         unsigned int major = MAJOR(dev);
 230 
 231         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 232                 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
 233                 return;
 234         }
 235         if (rw!=READ && rw!=WRITE)
 236                 panic("Bad block dev command, must be R/W");
 237         if (rw == WRITE && is_read_only(dev)) {
 238                 printk("Can't page to read-only device 0x%X\n\r",dev);
 239                 return;
 240         }
 241         cli();
 242 repeat:
 243         req = request+NR_REQUEST;
 244         while (--req >= request)
 245                 if (req->dev<0)
 246                         break;
 247         if (req < request) {
 248                 sleep_on(&wait_for_request);
 249                 goto repeat;
 250         }
 251         sti();
 252 /* fill up the request-info, and add it to the queue */
 253         req->dev = dev;
 254         req->cmd = rw;
 255         req->errors = 0;
 256         req->sector = page<<3;
 257         req->nr_sectors = 8;
 258         req->current_nr_sectors = 8;
 259         req->buffer = buffer;
 260         req->waiting = &current->wait;
 261         req->bh = NULL;
 262         req->next = NULL;
 263         current->state = TASK_UNINTERRUPTIBLE;
 264         add_request(major+blk_dev,req);
 265         schedule();
 266 }
 267 
 268 void ll_rw_block(int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 269 {
 270         unsigned int major;
 271 
 272         if (!bh)
 273                 return;
 274         if (bh->b_size != 1024) {
 275                 printk("ll_rw_block: only 1024-char blocks implemented (%d)\n",bh->b_size);
 276                 bh->b_dirt = bh->b_uptodate = 0;
 277                 return;
 278         }
 279         if ((major=MAJOR(bh->b_dev)) >= NR_BLK_DEV ||
 280         !(blk_dev[major].request_fn)) {
 281                 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh->b_dev,bh->b_blocknr);
 282                 bh->b_dirt = bh->b_uptodate = 0;
 283                 return;
 284         }
 285         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh->b_dev)) {
 286                 printk("Can't write to read-only device 0x%X\n\r",bh->b_dev);
 287                 bh->b_dirt = bh->b_uptodate = 0;
 288                 return;
 289         }
 290         make_request(major,rw,bh);
 291 }
 292 
 293 long blk_dev_init(long mem_start, long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 294 {
 295         int i;
 296 
 297         for (i=0 ; i<NR_REQUEST ; i++) {
 298                 request[i].dev = -1;
 299                 request[i].next = NULL;
 300         }
 301         memset(ro_bits,0,sizeof(ro_bits));
 302 #ifdef RAMDISK
 303         mem_start += rd_init(mem_start, RAMDISK*1024);
 304 #endif
 305         return mem_start;
 306 }
 307 
 308 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310         int i;
 311         struct request * req;
 312         unsigned int major = MAJOR(dev);
 313 
 314         if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
 315                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n\r");
 316                 return;
 317         }
 318 
 319         if (rw!=READ && rw!=WRITE) {
 320                 printk("ll_rw_swap: bad block dev command, must be R/W");
 321                 return;
 322         }
 323         if (rw == WRITE && is_read_only(dev)) {
 324                 printk("Can't swap to read-only device 0x%X\n\r",dev);
 325                 return;
 326         }
 327         
 328         for (i=0; i<nb; i++, buf += BLOCK_SIZE)
 329         {
 330 repeat:
 331                 req = request+NR_REQUEST;
 332                 while (--req >= request)
 333                         if (req->dev<0)
 334                                 break;
 335                 if (req < request) {
 336                         sleep_on(&wait_for_request);
 337                         goto repeat;
 338                 }
 339 
 340                 req->dev = dev;
 341                 req->cmd = rw;
 342                 req->errors = 0;
 343                 req->sector = b[i] << 1;
 344                 req->nr_sectors = 2;
 345                 req->current_nr_sectors = 2;
 346                 req->buffer = buf;
 347                 req->waiting = &current->wait;
 348                 req->bh = NULL;
 349                 req->next = NULL;
 350                 current->state = TASK_UNINTERRUPTIBLE;
 351                 add_request(major+blk_dev,req);
 352                 schedule();
 353         }
 354 }

/* [previous][next][first][last][top][bottom][index][help] */