root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. wait_on_buffer
  2. sys_sync
  3. sync_dev
  4. check_disk_change
  5. remove_from_queues
  6. insert_into_queues
  7. find_buffer
  8. get_hash_table
  9. getblk
  10. brelse
  11. bread
  12. breada
  13. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  (C) 1991  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it. NOTE! As interrupts
  11  * can wake up a caller, some cli-sti sequences are needed to check for
  12  * sleep-on-calls. These should be extremely quick, though (I hope).
  13  */
  14 
  15 /*
  16  * NOTE! There is one discordant note here: checking floppies for
  17  * disk change. This is where it fits best, I think, as it should
  18  * invalidate changed floppy-disk-caches.
  19  */
  20 
  21 #include <stdarg.h>
  22  
  23 #include <linux/config.h>
  24 #include <linux/sched.h>
  25 #include <linux/kernel.h>
  26 #include <asm/system.h>
  27 #include <asm/io.h>
  28 
  29 extern int end;
  30 struct buffer_head * start_buffer = (struct buffer_head *) &end;
  31 struct buffer_head * hash_table[NR_HASH];
  32 static struct buffer_head * free_list;
  33 static struct task_struct * buffer_wait = NULL;
  34 int NR_BUFFERS = 0;
  35 
  36 static inline void wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  37 {
  38         cli();
  39         while (bh->b_lock)
  40                 sleep_on(&bh->b_wait);
  41         sti();
  42 }
  43 
  44 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46         int i;
  47         struct buffer_head * bh;
  48 
  49         sync_inodes();          /* write out inodes into buffers */
  50         bh = start_buffer;
  51         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
  52                 wait_on_buffer(bh);
  53                 if (bh->b_dirt)
  54                         ll_rw_block(WRITE,bh);
  55         }
  56         return 0;
  57 }
  58 
  59 int sync_dev(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         int i;
  62         struct buffer_head * bh;
  63 
  64         bh = start_buffer;
  65         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
  66                 if (bh->b_dev != dev)
  67                         continue;
  68                 wait_on_buffer(bh);
  69                 if (bh->b_dev == dev && bh->b_dirt)
  70                         ll_rw_block(WRITE,bh);
  71         }
  72         sync_inodes();
  73         bh = start_buffer;
  74         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
  75                 if (bh->b_dev != dev)
  76                         continue;
  77                 wait_on_buffer(bh);
  78                 if (bh->b_dev == dev && bh->b_dirt)
  79                         ll_rw_block(WRITE,bh);
  80         }
  81         return 0;
  82 }
  83 
  84 /*
  85  * This routine checks whether a floppy has been changed, and
  86  * invalidates all buffer-cache-entries in that case. This
  87  * is a relatively slow routine, so we have to try to minimize using
  88  * it. Thus it is called only upon a 'mount' or 'open'. This
  89  * is the best way of combining speed and utility, I think.
  90  * People changing diskettes in the middle of an operation deserve
  91  * to loose :-)
  92  *
  93  * NOTE! Although currently this is only for floppies, the idea is
  94  * that any additional removable block-device will use this routine,
  95  * and that mount/open needn't know that floppies/whatever are
  96  * special.
  97  */
  98 void check_disk_change(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  99 {
 100         int i;
 101         struct buffer_head * bh;
 102 
 103         if (MAJOR(dev) != 2)
 104                 return;
 105         dev=MINOR(dev) & 0x03;  /* which floppy is it? */
 106         if (!floppy_change(dev))
 107                 return;
 108         dev |= 0x200;
 109         for (i=0 ; i<NR_SUPER ; i++)
 110                 if ((super_block[i].s_dev & 0xff03)==dev)
 111                         put_super(super_block[i].s_dev);
 112         bh = start_buffer;
 113         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
 114                 if ((bh->b_dev & 0xff03) != dev)
 115                         continue;
 116                 wait_on_buffer(bh);
 117                 if ((bh->b_dev & 0xff03) == dev)
 118                         bh->b_uptodate = bh->b_dirt = 0;
 119         }
 120 }
 121 
 122 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 123 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 124 
 125 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 126 {
 127 /* remove from hash-queue */
 128         if (bh->b_next)
 129                 bh->b_next->b_prev = bh->b_prev;
 130         if (bh->b_prev)
 131                 bh->b_prev->b_next = bh->b_next;
 132         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 133                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 134 /* remove from free list */
 135         if (!(bh->b_prev_free) || !(bh->b_next_free))
 136                 panic("Free block list corrupted");
 137         bh->b_prev_free->b_next_free = bh->b_next_free;
 138         bh->b_next_free->b_prev_free = bh->b_prev_free;
 139         if (free_list == bh)
 140                 free_list = bh->b_next_free;
 141 }
 142 
 143 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145 /* put at end of free list */
 146         bh->b_next_free = free_list;
 147         bh->b_prev_free = free_list->b_prev_free;
 148         free_list->b_prev_free->b_next_free = bh;
 149         free_list->b_prev_free = bh;
 150 /* put the buffer in new hash-queue if it has a device */
 151         bh->b_prev = NULL;
 152         bh->b_next = NULL;
 153         if (!bh->b_dev)
 154                 return;
 155         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 156         hash(bh->b_dev,bh->b_blocknr) = bh;
 157         bh->b_next->b_prev = bh;
 158 }
 159 
 160 static struct buffer_head * find_buffer(int dev, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 {               
 162         struct buffer_head * tmp;
 163 
 164         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 165                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 166                         return tmp;
 167         return NULL;
 168 }
 169 
 170 /*
 171  * Why like this, I hear you say... The reason is race-conditions.
 172  * As we don't lock buffers (unless we are readint them, that is),
 173  * something might happen to it while we sleep (ie a read-error
 174  * will force it bad). This shouldn't really happen currently, but
 175  * the code is ready.
 176  */
 177 struct buffer_head * get_hash_table(int dev, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         struct buffer_head * bh;
 180 
 181         for (;;) {
 182                 if (!(bh=find_buffer(dev,block)))
 183                         return NULL;
 184                 bh->b_count++;
 185                 wait_on_buffer(bh);
 186                 if (bh->b_dev == dev && bh->b_blocknr == block)
 187                         return bh;
 188                 bh->b_count--;
 189         }
 190 }
 191 
 192 /*
 193  * Ok, this is getblk, and it isn't very clear, again to hinder
 194  * race-conditions. Most of the code is seldom used, (ie repeating),
 195  * so it should be much more efficient than it looks.
 196  *
 197  * The algoritm is changed: better, and an elusive bug removed.
 198  *              LBT 11.11.91
 199  */
 200 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 201 struct buffer_head * getblk(int dev,int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 202 {
 203         struct buffer_head * tmp, * bh;
 204 
 205 repeat:
 206         if (bh = get_hash_table(dev,block))
 207                 return bh;
 208         tmp = free_list;
 209         do {
 210                 if (tmp->b_count)
 211                         continue;
 212                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 213                         bh = tmp;
 214                         if (!BADNESS(tmp))
 215                                 break;
 216                 }
 217         } while ((tmp = tmp->b_next_free) != free_list);
 218         if (!bh) {
 219                 sleep_on(&buffer_wait);
 220                 goto repeat;
 221         }
 222         wait_on_buffer(bh);
 223         if (bh->b_count)
 224                 goto repeat;
 225         while (bh->b_dirt) {
 226                 sync_dev(bh->b_dev);
 227                 wait_on_buffer(bh);
 228                 if (bh->b_count)
 229                         goto repeat;
 230         }
 231 /* NOTE!! While we slept waiting for this block, somebody else might */
 232 /* already have added "this" block to the cache. check it */
 233         if (find_buffer(dev,block))
 234                 goto repeat;
 235 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 236 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 237         bh->b_count=1;
 238         bh->b_dirt=0;
 239         bh->b_uptodate=0;
 240         remove_from_queues(bh);
 241         bh->b_dev=dev;
 242         bh->b_blocknr=block;
 243         insert_into_queues(bh);
 244         return bh;
 245 }
 246 
 247 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 248 {
 249         if (!buf)
 250                 return;
 251         wait_on_buffer(buf);
 252         if (!(buf->b_count--))
 253                 panic("Trying to free free buffer");
 254         wake_up(&buffer_wait);
 255 }
 256 
 257 /*
 258  * bread() reads a specified block and returns the buffer that contains
 259  * it. It returns NULL if the block was unreadable.
 260  */
 261 struct buffer_head * bread(int dev,int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 262 {
 263         struct buffer_head * bh;
 264 
 265         if (!(bh=getblk(dev,block)))
 266                 panic("bread: getblk returned NULL\n");
 267         if (bh->b_uptodate)
 268                 return bh;
 269         ll_rw_block(READ,bh);
 270         wait_on_buffer(bh);
 271         if (bh->b_uptodate)
 272                 return bh;
 273         brelse(bh);
 274         return NULL;
 275 }
 276 
 277 /*
 278  * Ok, breada can be used as bread, but additionally to mark other
 279  * blocks for reading as well. End the argument list with a negative
 280  * number.
 281  */
 282 struct buffer_head * breada(int dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 283 {
 284         va_list args;
 285         struct buffer_head * bh, *tmp;
 286 
 287         va_start(args,first);
 288         if (!(bh=getblk(dev,first)))
 289                 panic("bread: getblk returned NULL\n");
 290         if (!bh->b_uptodate)
 291                 ll_rw_block(READ,bh);
 292         while ((first=va_arg(args,int))>=0) {
 293                 tmp=getblk(dev,first);
 294                 if (tmp) {
 295                         if (!tmp->b_uptodate)
 296                                 ll_rw_block(READA,bh);
 297                         tmp->b_count--;
 298                 }
 299         }
 300         va_end(args);
 301         wait_on_buffer(bh);
 302         if (bh->b_uptodate)
 303                 return bh;
 304         brelse(bh);
 305         return (NULL);
 306 }
 307 
 308 void buffer_init(long buffer_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310         struct buffer_head * h = start_buffer;
 311         void * b;
 312         int i;
 313 
 314         if (buffer_end == 1<<20)
 315                 b = (void *) (640*1024);
 316         else
 317                 b = (void *) buffer_end;
 318         while ( (b -= BLOCK_SIZE) >= ((void *) (h+1)) ) {
 319                 h->b_dev = 0;
 320                 h->b_dirt = 0;
 321                 h->b_count = 0;
 322                 h->b_lock = 0;
 323                 h->b_uptodate = 0;
 324                 h->b_wait = NULL;
 325                 h->b_next = NULL;
 326                 h->b_prev = NULL;
 327                 h->b_data = (char *) b;
 328                 h->b_prev_free = h-1;
 329                 h->b_next_free = h+1;
 330                 h++;
 331                 NR_BUFFERS++;
 332                 if (b == (void *) 0x100000)
 333                         b = (void *) 0xA0000;
 334         }
 335         h--;
 336         free_list = start_buffer;
 337         free_list->b_prev_free = h;
 338         h->b_next_free = free_list;
 339         for (i=0;i<NR_HASH;i++)
 340                 hash_table[i]=NULL;
 341 }       

/* [previous][next][first][last][top][bottom][index][help] */