root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. wait_on_buffer
  2. sys_sync
  3. sync_dev
  4. remove_from_queues
  5. insert_into_queues
  6. find_buffer
  7. get_hash_table
  8. getblk
  9. brelse
  10. bread
  11. buffer_init

   1 /*
   2  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   3  * been avoided by NEVER letting a interrupt change a buffer (except for the
   4  * data, of course), but instead letting the caller do it. NOTE! As interrupts
   5  * can wake up a caller, some cli-sti sequences are needed to check for
   6  * sleep-on-calls. These should be extremely quick, though (I hope).
   7  */
   8 
   9 #include <linux/config.h>
  10 #include <linux/sched.h>
  11 #include <linux/kernel.h>
  12 #include <asm/system.h>
  13 
  14 #if (BUFFER_END & 0xfff)
  15 #error "Bad BUFFER_END value"
  16 #endif
  17 
  18 #if (BUFFER_END > 0xA0000 && BUFFER_END <= 0x100000)
  19 #error "Bad BUFFER_END value"
  20 #endif
  21 
  22 extern int end;
  23 struct buffer_head * start_buffer = (struct buffer_head *) &end;
  24 struct buffer_head * hash_table[NR_HASH];
  25 static struct buffer_head * free_list;
  26 static struct task_struct * buffer_wait = NULL;
  27 int NR_BUFFERS = 0;
  28 
  29 static inline void wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  30 {
  31         cli();
  32         while (bh->b_lock)
  33                 sleep_on(&bh->b_wait);
  34         sti();
  35 }
  36 
  37 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  38 {
  39         int i;
  40         struct buffer_head * bh;
  41 
  42         sync_inodes();          /* write out inodes into buffers */
  43         bh = start_buffer;
  44         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
  45                 wait_on_buffer(bh);
  46                 if (bh->b_dirt)
  47                         ll_rw_block(WRITE,bh);
  48         }
  49         return 0;
  50 }
  51 
  52 static int sync_dev(int dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         int i;
  55         struct buffer_head * bh;
  56 
  57         bh = start_buffer;
  58         for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
  59                 if (bh->b_dev != dev)
  60                         continue;
  61                 wait_on_buffer(bh);
  62                 if (bh->b_dirt)
  63                         ll_rw_block(WRITE,bh);
  64         }
  65         return 0;
  66 }
  67 
  68 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
  69 #define hash(dev,block) hash_table[_hashfn(dev,block)]
  70 
  71 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73 /* remove from hash-queue */
  74         if (bh->b_next)
  75                 bh->b_next->b_prev = bh->b_prev;
  76         if (bh->b_prev)
  77                 bh->b_prev->b_next = bh->b_next;
  78         if (hash(bh->b_dev,bh->b_blocknr) == bh)
  79                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
  80 /* remove from free list */
  81         if (!(bh->b_prev_free) || !(bh->b_next_free))
  82                 panic("Free block list corrupted");
  83         bh->b_prev_free->b_next_free = bh->b_next_free;
  84         bh->b_next_free->b_prev_free = bh->b_prev_free;
  85         if (free_list == bh)
  86                 free_list = bh->b_next_free;
  87 }
  88 
  89 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  90 {
  91 /* put at end of free list */
  92         bh->b_next_free = free_list;
  93         bh->b_prev_free = free_list->b_prev_free;
  94         free_list->b_prev_free->b_next_free = bh;
  95         free_list->b_prev_free = bh;
  96 /* put the buffer in new hash-queue if it has a device */
  97         bh->b_prev = NULL;
  98         bh->b_next = NULL;
  99         if (!bh->b_dev)
 100                 return;
 101         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 102         hash(bh->b_dev,bh->b_blocknr) = bh;
 103         bh->b_next->b_prev = bh;
 104 }
 105 
 106 static struct buffer_head * find_buffer(int dev, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {               
 108         struct buffer_head * tmp;
 109 
 110         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 111                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 112                         return tmp;
 113         return NULL;
 114 }
 115 
 116 /*
 117  * Why like this, I hear you say... The reason is race-conditions.
 118  * As we don't lock buffers (unless we are readint them, that is),
 119  * something might happen to it while we sleep (ie a read-error
 120  * will force it bad). This shouldn't really happen currently, but
 121  * the code is ready.
 122  */
 123 struct buffer_head * get_hash_table(int dev, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         struct buffer_head * bh;
 126 
 127 repeat:
 128         if (!(bh=find_buffer(dev,block)))
 129                 return NULL;
 130         bh->b_count++;
 131         wait_on_buffer(bh);
 132         if (bh->b_dev != dev || bh->b_blocknr != block) {
 133                 brelse(bh);
 134                 goto repeat;
 135         }
 136         return bh;
 137 }
 138 
 139 /*
 140  * Ok, this is getblk, and it isn't very clear, again to hinder
 141  * race-conditions. Most of the code is seldom used, (ie repeating),
 142  * so it should be much more efficient than it looks.
 143  */
 144 struct buffer_head * getblk(int dev,int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         struct buffer_head * tmp;
 147 
 148 repeat:
 149         if (tmp=get_hash_table(dev,block))
 150                 return tmp;
 151         tmp = free_list;
 152         do {
 153                 if (!tmp->b_count) {
 154                         wait_on_buffer(tmp);    /* we still have to wait */
 155                         if (!tmp->b_count)      /* on it, it might be dirty */
 156                                 break;
 157                 }
 158                 tmp = tmp->b_next_free;
 159         } while (tmp != free_list || (tmp=NULL));
 160         /* Kids, don't try THIS at home ^^^^^. Magic */
 161         if (!tmp) {
 162                 printk("Sleeping on free buffer ..");
 163                 sleep_on(&buffer_wait);
 164                 printk("ok\n");
 165                 goto repeat;
 166         }
 167         tmp->b_count++;
 168         remove_from_queues(tmp);
 169 /*
 170  * Now, when we know nobody can get to this node (as it's removed from the
 171  * free list), we write it out. We can sleep here without fear of race-
 172  * conditions.
 173  */
 174         if (tmp->b_dirt)
 175                 sync_dev(tmp->b_dev);
 176 /* update buffer contents */
 177         tmp->b_dev=dev;
 178         tmp->b_blocknr=block;
 179         tmp->b_dirt=0;
 180         tmp->b_uptodate=0;
 181 /* NOTE!! While we possibly slept in sync_dev(), somebody else might have
 182  * added "this" block already, so check for that. Thank God for goto's.
 183  */
 184         if (find_buffer(dev,block)) {
 185                 tmp->b_dev=0;           /* ok, someone else has beaten us */
 186                 tmp->b_blocknr=0;       /* to it - free this block and */
 187                 tmp->b_count=0;         /* try again */
 188                 insert_into_queues(tmp);
 189                 goto repeat;
 190         }
 191 /* and then insert into correct position */
 192         insert_into_queues(tmp);
 193         return tmp;
 194 }
 195 
 196 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         if (!buf)
 199                 return;
 200         wait_on_buffer(buf);
 201         if (!(buf->b_count--))
 202                 panic("Trying to free free buffer");
 203         wake_up(&buffer_wait);
 204 }
 205 
 206 /*
 207  * bread() reads a specified block and returns the buffer that contains
 208  * it. It returns NULL if the block was unreadable.
 209  */
 210 struct buffer_head * bread(int dev,int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         struct buffer_head * bh;
 213 
 214         if (!(bh=getblk(dev,block)))
 215                 panic("bread: getblk returned NULL\n");
 216         if (bh->b_uptodate)
 217                 return bh;
 218         ll_rw_block(READ,bh);
 219         if (bh->b_uptodate)
 220                 return bh;
 221         brelse(bh);
 222         return (NULL);
 223 }
 224 
 225 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         struct buffer_head * h = start_buffer;
 228         void * b = (void *) BUFFER_END;
 229         int i;
 230 
 231         while ( (b -= BLOCK_SIZE) >= ((void *) (h+1)) ) {
 232                 h->b_dev = 0;
 233                 h->b_dirt = 0;
 234                 h->b_count = 0;
 235                 h->b_lock = 0;
 236                 h->b_uptodate = 0;
 237                 h->b_wait = NULL;
 238                 h->b_next = NULL;
 239                 h->b_prev = NULL;
 240                 h->b_data = (char *) b;
 241                 h->b_prev_free = h-1;
 242                 h->b_next_free = h+1;
 243                 h++;
 244                 NR_BUFFERS++;
 245                 if (b == (void *) 0x100000)
 246                         b = (void *) 0xA0000;
 247         }
 248         h--;
 249         free_list = start_buffer;
 250         free_list->b_prev_free = h;
 251         h->b_next_free = free_list;
 252         for (i=0;i<NR_HASH;i++)
 253                 hash_table[i]=NULL;
 254 }       

/* [previous][next][first][last][top][bottom][index][help] */