root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. check_aligned
  32. try_to_load_aligned
  33. try_to_share_buffers
  34. bread_page
  35. grow_buffers
  36. try_to_free
  37. maybe_shrink_lav_buffers
  38. shrink_buffers
  39. shrink_specific_buffers
  40. show_buffers
  41. try_to_reassign
  42. reassign_cluster
  43. try_to_generate_cluster
  44. generate_cluster
  45. buffer_init
  46. wakeup_bdflush
  47. sync_old_buffers
  48. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #define NR_SIZES 4
  33 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  35 
  36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  60 extern int *blksize_size[];
  61 
  62 /* Here is the parameter block for the bdflush process. */
  63 static void wakeup_bdflush(int);
  64 
  65 #define N_PARAM 9
  66 #define LAV
  67 
  68 static union bdflush_param{
  69         struct {
  70                 int nfract;  /* Percentage of buffer cache dirty to 
  71                                 activate bdflush */
  72                 int ndirty;  /* Maximum number of dirty blocks to write out per
  73                                 wake-cycle */
  74                 int nrefill; /* Number of clean buffers to try and obtain
  75                                 each time we call refill */
  76                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  77                                   when trying to refill buffers. */
  78                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  79                                     search for free clusters */
  80                 int age_buffer;  /* Time for normal buffer to age before 
  81                                     we flush it */
  82                 int age_super;  /* Time for superblock to age before we 
  83                                    flush it */
  84                 int lav_const;  /* Constant used for load average (time
  85                                    constant */
  86                 int lav_ratio;  /* Used to determine how low a lav for a
  87                                    particular size can go before we start to
  88                                    trim back the buffers */
  89         } b_un;
  90         unsigned int data[N_PARAM];
  91 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  92 
  93 /* The lav constant is set for 1 minute, as long as the update process runs
  94    every 5 seconds.  If you change the frequency of update, the time
  95    constant will also change. */
  96 
  97 
  98 /* These are the min and max parameter values that we will allow to be assigned */
  99 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
 100 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 101 
 102 /*
 103  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 104  * and getting rid of the cli-sti pairs. The wait-queue routines still
 105  * need cli-sti, but now it's just a couple of 386 instructions or so.
 106  *
 107  * Note that the real wait_on_buffer() is an inline function that checks
 108  * if 'b_wait' is set before calling this, so that the queues aren't set
 109  * up unnecessarily.
 110  */
 111 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         struct wait_queue wait = { current, NULL };
 114 
 115         bh->b_count++;
 116         add_wait_queue(&bh->b_wait, &wait);
 117 repeat:
 118         current->state = TASK_UNINTERRUPTIBLE;
 119         if (bh->b_lock) {
 120                 schedule();
 121                 goto repeat;
 122         }
 123         remove_wait_queue(&bh->b_wait, &wait);
 124         bh->b_count--;
 125         current->state = TASK_RUNNING;
 126 }
 127 
 128 /* Call sync_buffers with wait!=0 to ensure that the call does not
 129    return until all buffer writes have completed.  Sync() may return
 130    before the writes have finished; fsync() may not. */
 131 
 132 
 133 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 134    spontaneously dirty themselves without ever brelse being called.
 135    We will ultimately want to put these in a separate list, but for
 136    now we search all of the lists for dirty buffers */
 137 
 138 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         int i, retry, pass = 0, err = 0;
 141         int nlist, ncount;
 142         struct buffer_head * bh, *next;
 143 
 144         /* One pass for no-wait, three for wait:
 145            0) write out all dirty, unlocked buffers;
 146            1) write out all dirty buffers, waiting if locked;
 147            2) wait for completion by waiting for all buffers to unlock. */
 148  repeat:
 149         retry = 0;
 150  repeat2:
 151         ncount = 0;
 152         /* We search all lists as a failsafe mechanism, not because we expect
 153            there to be dirty buffers on any of the other lists. */
 154         for(nlist = 0; nlist < NR_LIST; nlist++)
 155          {
 156          repeat1:
 157                  bh = lru_list[nlist];
 158                  if(!bh) continue;
 159                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 160                          if(bh->b_list != nlist) goto repeat1;
 161                          next = bh->b_next_free;
 162                          if(!lru_list[nlist]) break;
 163                          if (dev && bh->b_dev != dev)
 164                                   continue;
 165                          if (bh->b_lock)
 166                           {
 167                                   /* Buffer is locked; skip it unless wait is
 168                                      requested AND pass > 0. */
 169                                   if (!wait || !pass) {
 170                                           retry = 1;
 171                                           continue;
 172                                   }
 173                                   wait_on_buffer (bh);
 174                                   goto repeat2;
 175                           }
 176                          /* If an unlocked buffer is not uptodate, there has
 177                              been an IO error. Skip it. */
 178                          if (wait && bh->b_req && !bh->b_lock &&
 179                              !bh->b_dirt && !bh->b_uptodate) {
 180                                   err = 1;
 181                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 182                                   continue;
 183                           }
 184                          /* Don't write clean buffers.  Don't write ANY buffers
 185                             on the third pass. */
 186                          if (!bh->b_dirt || pass>=2)
 187                                   continue;
 188                          /* don't bother about locked buffers */
 189                          if (bh->b_lock)
 190                                  continue;
 191                          bh->b_count++;
 192                          bh->b_flushtime = 0;
 193                          ll_rw_block(WRITE, 1, &bh);
 194 
 195                          if(nlist != BUF_DIRTY) { 
 196                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 197                                  ncount++;
 198                          };
 199                          bh->b_count--;
 200                          retry = 1;
 201                  }
 202          }
 203         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 204         
 205         /* If we are waiting for the sync to succeed, and if any dirty
 206            blocks were written, then repeat; on the second pass, only
 207            wait for buffers being written (do not pass to write any
 208            more buffers on the second pass). */
 209         if (wait && retry && ++pass<=2)
 210                  goto repeat;
 211         return err;
 212 }
 213 
 214 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 {
 216         sync_buffers(dev, 0);
 217         sync_supers(dev);
 218         sync_inodes(dev);
 219         sync_buffers(dev, 0);
 220 }
 221 
 222 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         sync_buffers(dev, 0);
 225         sync_supers(dev);
 226         sync_inodes(dev);
 227         return sync_buffers(dev, 1);
 228 }
 229 
 230 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         sync_dev(0);
 233         return 0;
 234 }
 235 
 236 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         return fsync_dev(inode->i_dev);
 239 }
 240 
 241 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         struct file * file;
 244         struct inode * inode;
 245 
 246         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 247                 return -EBADF;
 248         if (!file->f_op || !file->f_op->fsync)
 249                 return -EINVAL;
 250         if (file->f_op->fsync(inode,file))
 251                 return -EIO;
 252         return 0;
 253 }
 254 
 255 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 256 {
 257         int i;
 258         int nlist;
 259         struct buffer_head * bh;
 260 
 261         for(nlist = 0; nlist < NR_LIST; nlist++) {
 262                 bh = lru_list[nlist];
 263                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 264                      bh = bh->b_next_free) {
 265                         if (bh->b_dev != dev)
 266                                  continue;
 267                         wait_on_buffer(bh);
 268                         if (bh->b_dev == dev)
 269                                  bh->b_flushtime = bh->b_uptodate = 
 270                                           bh->b_dirt = bh->b_req = 0;
 271                 }
 272         }
 273 }
 274 
 275 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 276 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 277 
 278 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         if (bh->b_next)
 281                 bh->b_next->b_prev = bh->b_prev;
 282         if (bh->b_prev)
 283                 bh->b_prev->b_next = bh->b_next;
 284         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 285                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 286         bh->b_next = bh->b_prev = NULL;
 287 }
 288 
 289 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 290 {
 291         if (!(bh->b_prev_free) || !(bh->b_next_free))
 292                 panic("VFS: LRU block list corrupted");
 293         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 294         bh->b_prev_free->b_next_free = bh->b_next_free;
 295         bh->b_next_free->b_prev_free = bh->b_prev_free;
 296 
 297         if (lru_list[bh->b_list] == bh)
 298                  lru_list[bh->b_list] = bh->b_next_free;
 299         if(lru_list[bh->b_list] == bh)
 300                  lru_list[bh->b_list] = NULL;
 301         bh->b_next_free = bh->b_prev_free = NULL;
 302 }
 303 
 304 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 305 {
 306         int isize = BUFSIZE_INDEX(bh->b_size);
 307         if (!(bh->b_prev_free) || !(bh->b_next_free))
 308                 panic("VFS: Free block list corrupted");
 309         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 310         if(!free_list[isize])
 311                  panic("Free list empty");
 312         nr_free[isize]--;
 313         if(bh->b_next_free == bh)
 314                  free_list[isize] = NULL;
 315         else {
 316                 bh->b_prev_free->b_next_free = bh->b_next_free;
 317                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 318                 if (free_list[isize] == bh)
 319                          free_list[isize] = bh->b_next_free;
 320         };
 321         bh->b_next_free = bh->b_prev_free = NULL;
 322 }
 323 
 324 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 325 {
 326         if(bh->b_dev == 0xffff) {
 327                 remove_from_free_list(bh); /* Free list entries should not be
 328                                               in the hash queue */
 329                 return;
 330         };
 331         nr_buffers_type[bh->b_list]--;
 332         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 333         remove_from_hash_queue(bh);
 334         remove_from_lru_list(bh);
 335 }
 336 
 337 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 {
 339         if (!bh)
 340                 return;
 341         if (bh == lru_list[bh->b_list]) {
 342                 lru_list[bh->b_list] = bh->b_next_free;
 343                 return;
 344         }
 345         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 346         remove_from_lru_list(bh);
 347 /* add to back of free list */
 348 
 349         if(!lru_list[bh->b_list]) {
 350                 lru_list[bh->b_list] = bh;
 351                 lru_list[bh->b_list]->b_prev_free = bh;
 352         };
 353 
 354         bh->b_next_free = lru_list[bh->b_list];
 355         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 356         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 357         lru_list[bh->b_list]->b_prev_free = bh;
 358 }
 359 
 360 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 361 {
 362         int isize;
 363         if (!bh)
 364                 return;
 365 
 366         isize = BUFSIZE_INDEX(bh->b_size);      
 367         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 368 /* add to back of free list */
 369 
 370         if(!free_list[isize]) {
 371                 free_list[isize] = bh;
 372                 bh->b_prev_free = bh;
 373         };
 374 
 375         nr_free[isize]++;
 376         bh->b_next_free = free_list[isize];
 377         bh->b_prev_free = free_list[isize]->b_prev_free;
 378         free_list[isize]->b_prev_free->b_next_free = bh;
 379         free_list[isize]->b_prev_free = bh;
 380 }
 381 
 382 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 383 {
 384 /* put at end of free list */
 385 
 386         if(bh->b_dev == 0xffff) {
 387                 put_last_free(bh);
 388                 return;
 389         };
 390         if(!lru_list[bh->b_list]) {
 391                 lru_list[bh->b_list] = bh;
 392                 bh->b_prev_free = bh;
 393         };
 394         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 395         bh->b_next_free = lru_list[bh->b_list];
 396         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 397         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 398         lru_list[bh->b_list]->b_prev_free = bh;
 399         nr_buffers_type[bh->b_list]++;
 400         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 401 /* put the buffer in new hash-queue if it has a device */
 402         bh->b_prev = NULL;
 403         bh->b_next = NULL;
 404         if (!bh->b_dev)
 405                 return;
 406         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 407         hash(bh->b_dev,bh->b_blocknr) = bh;
 408         if (bh->b_next)
 409                 bh->b_next->b_prev = bh;
 410 }
 411 
 412 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 413 {               
 414         struct buffer_head * tmp;
 415 
 416         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 417                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 418                         if (tmp->b_size == size)
 419                                 return tmp;
 420                         else {
 421                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 422                                                         MAJOR(dev), MINOR(dev));
 423                                 return NULL;
 424                         }
 425         return NULL;
 426 }
 427 
 428 /*
 429  * Why like this, I hear you say... The reason is race-conditions.
 430  * As we don't lock buffers (unless we are reading them, that is),
 431  * something might happen to it while we sleep (ie a read-error
 432  * will force it bad). This shouldn't really happen currently, but
 433  * the code is ready.
 434  */
 435 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 436 {
 437         struct buffer_head * bh;
 438 
 439         for (;;) {
 440                 if (!(bh=find_buffer(dev,block,size)))
 441                         return NULL;
 442                 bh->b_count++;
 443                 wait_on_buffer(bh);
 444                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 445                         return bh;
 446                 bh->b_count--;
 447         }
 448 }
 449 
 450 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 451 {
 452         int i, nlist;
 453         struct buffer_head * bh, *bhnext;
 454 
 455         if (!blksize_size[MAJOR(dev)])
 456                 return;
 457 
 458         switch(size) {
 459                 default: panic("Invalid blocksize passed to set_blocksize");
 460                 case 512: case 1024: case 2048: case 4096:;
 461         }
 462 
 463         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 464                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 465                 return;
 466         }
 467         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 468                 return;
 469         sync_buffers(dev, 2);
 470         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 471 
 472   /* We need to be quite careful how we do this - we are moving entries
 473      around on the free list, and we can get in a loop if we are not careful.*/
 474 
 475         for(nlist = 0; nlist < NR_LIST; nlist++) {
 476                 bh = lru_list[nlist];
 477                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 478                         if(!bh) break;
 479                         bhnext = bh->b_next_free; 
 480                         if (bh->b_dev != dev)
 481                                  continue;
 482                         if (bh->b_size == size)
 483                                  continue;
 484                         
 485                         wait_on_buffer(bh);
 486                         if (bh->b_dev == dev && bh->b_size != size) {
 487                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 488                                          bh->b_flushtime = 0;
 489                         };
 490                         remove_from_hash_queue(bh);
 491                 }
 492         }
 493 }
 494 
 495 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 496 
 497 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 498 {
 499         struct buffer_head * bh, * tmp;
 500         struct buffer_head * candidate[NR_LIST];
 501         unsigned int best_time, winner;
 502         int isize = BUFSIZE_INDEX(size);
 503         int buffers[NR_LIST];
 504         int i;
 505         int needed;
 506 
 507         /* First see if we even need this.  Sometimes it is advantageous
 508          to request some blocks in a filesystem that we know that we will
 509          be needing ahead of time. */
 510 
 511         if (nr_free[isize] > 100)
 512                 return;
 513 
 514         /* If there are too many dirty buffers, we wake up the update process
 515            now so as to ensure that there are still clean buffers available
 516            for user processes to use (and dirty) */
 517         
 518         /* We are going to try and locate this much memory */
 519         needed =bdf_prm.b_un.nrefill * size;  
 520 
 521         while (nr_free_pages > min_free_pages && needed > 0 &&
 522                grow_buffers(GFP_BUFFER, size)) {
 523                 needed -= PAGE_SIZE;
 524         }
 525 
 526         if(needed <= 0) return;
 527 
 528         /* See if there are too many buffers of a different size.
 529            If so, victimize them */
 530 
 531         while(maybe_shrink_lav_buffers(size))
 532          {
 533                  if(!grow_buffers(GFP_BUFFER, size)) break;
 534                  needed -= PAGE_SIZE;
 535                  if(needed <= 0) return;
 536          };
 537 
 538         /* OK, we cannot grow the buffer cache, now try and get some
 539            from the lru list */
 540 
 541         /* First set the candidate pointers to usable buffers.  This
 542            should be quick nearly all of the time. */
 543 
 544 repeat0:
 545         for(i=0; i<NR_LIST; i++){
 546                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 547                    nr_buffers_type[i] == 0) {
 548                         candidate[i] = NULL;
 549                         buffers[i] = 0;
 550                         continue;
 551                 }
 552                 buffers[i] = nr_buffers_type[i];
 553                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 554                  {
 555                          if(buffers[i] < 0) panic("Here is the problem");
 556                          tmp = bh->b_next_free;
 557                          if (!bh) break;
 558                          
 559                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 560                              bh->b_dirt) {
 561                                  refile_buffer(bh);
 562                                  continue;
 563                          };
 564                          
 565                          if (bh->b_count || bh->b_size != size)
 566                                   continue;
 567                          
 568                          /* Buffers are written in the order they are placed 
 569                             on the locked list. If we encounter a locked
 570                             buffer here, this means that the rest of them
 571                             are also locked */
 572                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 573                                  buffers[i] = 0;
 574                                  break;
 575                          }
 576                          
 577                          if (BADNESS(bh)) continue;
 578                          break;
 579                  };
 580                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 581                 else candidate[i] = bh;
 582                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 583         }
 584         
 585  repeat:
 586         if(needed <= 0) return;
 587         
 588         /* Now see which candidate wins the election */
 589         
 590         winner = best_time = UINT_MAX;  
 591         for(i=0; i<NR_LIST; i++){
 592                 if(!candidate[i]) continue;
 593                 if(candidate[i]->b_lru_time < best_time){
 594                         best_time = candidate[i]->b_lru_time;
 595                         winner = i;
 596                 }
 597         }
 598         
 599         /* If we have a winner, use it, and then get a new candidate from that list */
 600         if(winner != UINT_MAX) {
 601                 i = winner;
 602                 bh = candidate[i];
 603                 candidate[i] = bh->b_next_free;
 604                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 605                 if (bh->b_count || bh->b_size != size)
 606                          panic("Busy buffer in candidate list\n");
 607                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 608                          panic("Shared buffer in candidate list\n");
 609                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 610                 
 611                 if(bh->b_dev == 0xffff) panic("Wrong list");
 612                 remove_from_queues(bh);
 613                 bh->b_dev = 0xffff;
 614                 put_last_free(bh);
 615                 needed -= bh->b_size;
 616                 buffers[i]--;
 617                 if(buffers[i] < 0) panic("Here is the problem");
 618                 
 619                 if(buffers[i] == 0) candidate[i] = NULL;
 620                 
 621                 /* Now all we need to do is advance the candidate pointer
 622                    from the winner list to the next usable buffer */
 623                 if(candidate[i] && buffers[i] > 0){
 624                         if(buffers[i] <= 0) panic("Here is another problem");
 625                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 626                                 if(buffers[i] < 0) panic("Here is the problem");
 627                                 tmp = bh->b_next_free;
 628                                 if (!bh) break;
 629                                 
 630                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 631                                     bh->b_dirt) {
 632                                         refile_buffer(bh);
 633                                         continue;
 634                                 };
 635                                 
 636                                 if (bh->b_count || bh->b_size != size)
 637                                          continue;
 638                                 
 639                                 /* Buffers are written in the order they are
 640                                    placed on the locked list.  If we encounter
 641                                    a locked buffer here, this means that the
 642                                    rest of them are also locked */
 643                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 644                                         buffers[i] = 0;
 645                                         break;
 646                                 }
 647               
 648                                 if (BADNESS(bh)) continue;
 649                                 break;
 650                         };
 651                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 652                         else candidate[i] = bh;
 653                         if(candidate[i] && candidate[i]->b_count) 
 654                                  panic("Here is the problem");
 655                 }
 656                 
 657                 goto repeat;
 658         }
 659         
 660         if(needed <= 0) return;
 661         
 662         /* Too bad, that was not enough. Try a little harder to grow some. */
 663         
 664         if (nr_free_pages > 5) {
 665                 if (grow_buffers(GFP_BUFFER, size)) {
 666                         needed -= PAGE_SIZE;
 667                         goto repeat0;
 668                 };
 669         }
 670         
 671         /* and repeat until we find something good */
 672         if (!grow_buffers(GFP_ATOMIC, size))
 673                 wakeup_bdflush(1);
 674         needed -= PAGE_SIZE;
 675         goto repeat0;
 676 }
 677 
 678 /*
 679  * Ok, this is getblk, and it isn't very clear, again to hinder
 680  * race-conditions. Most of the code is seldom used, (ie repeating),
 681  * so it should be much more efficient than it looks.
 682  *
 683  * The algorithm is changed: hopefully better, and an elusive bug removed.
 684  *
 685  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 686  * when the filesystem starts to get full of dirty blocks (I hope).
 687  */
 688 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 689 {
 690         struct buffer_head * bh;
 691         int isize = BUFSIZE_INDEX(size);
 692 
 693         /* Update this for the buffer size lav. */
 694         buffer_usage[isize]++;
 695 
 696         /* If there are too many dirty buffers, we wake up the update process
 697            now so as to ensure that there are still clean buffers available
 698            for user processes to use (and dirty) */
 699 repeat:
 700         bh = get_hash_table(dev, block, size);
 701         if (bh) {
 702                 if (bh->b_uptodate && !bh->b_dirt)
 703                          put_last_lru(bh);
 704                 if(!bh->b_dirt) bh->b_flushtime = 0;
 705                 return bh;
 706         }
 707 
 708         while(!free_list[isize]) refill_freelist(size);
 709         
 710         if (find_buffer(dev,block,size))
 711                  goto repeat;
 712 
 713         bh = free_list[isize];
 714         remove_from_free_list(bh);
 715 
 716 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 717 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 718         bh->b_count=1;
 719         bh->b_dirt=0;
 720         bh->b_lock=0;
 721         bh->b_uptodate=0;
 722         bh->b_flushtime = 0;
 723         bh->b_req=0;
 724         bh->b_dev=dev;
 725         bh->b_blocknr=block;
 726         insert_into_queues(bh);
 727         return bh;
 728 }
 729 
 730 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 731 {
 732         int newtime;
 733 
 734         if (buf->b_dirt){
 735                 /* Move buffer to dirty list if jiffies is clear */
 736                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 737                                      bdf_prm.b_un.age_buffer);
 738                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 739                          buf->b_flushtime = newtime;
 740         } else {
 741                 buf->b_flushtime = 0;
 742         }
 743 }
 744 
 745 
 746 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 747                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 748 
 749 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 750         int i, dispose;
 751         i = 0;
 752         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 753         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 754         if(buf->b_lock) i |= 2;
 755         if(buf->b_dirt) i |= 4;
 756         dispose = buffer_disposition[i];
 757         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 758                  dispose = BUF_UNSHARED;
 759         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 760         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 761         if(dispose != buf->b_list)  {
 762                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 763                          buf->b_lru_time = jiffies;
 764                 if(dispose == BUF_LOCKED && 
 765                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 766                          dispose = BUF_LOCKED1;
 767                 remove_from_queues(buf);
 768                 buf->b_list = dispose;
 769                 insert_into_queues(buf);
 770                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 771                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 772                    bdf_prm.b_un.nfract/100)
 773                          wakeup_bdflush(0);
 774         }
 775 }
 776 
 777 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 778 {
 779         if (!buf)
 780                 return;
 781         wait_on_buffer(buf);
 782 
 783         /* If dirty, mark the time this buffer should be written back */
 784         set_writetime(buf, 0);
 785         refile_buffer(buf);
 786 
 787         if (buf->b_count) {
 788                 if (--buf->b_count)
 789                         return;
 790                 wake_up(&buffer_wait);
 791                 return;
 792         }
 793         printk("VFS: brelse: Trying to free free buffer\n");
 794 }
 795 
 796 /*
 797  * bread() reads a specified block and returns the buffer that contains
 798  * it. It returns NULL if the block was unreadable.
 799  */
 800 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 801 {
 802         struct buffer_head * bh;
 803 
 804         if (!(bh = getblk(dev, block, size))) {
 805                 printk("VFS: bread: READ error on device %d/%d\n",
 806                                                 MAJOR(dev), MINOR(dev));
 807                 return NULL;
 808         }
 809         if (bh->b_uptodate)
 810                 return bh;
 811         ll_rw_block(READ, 1, &bh);
 812         wait_on_buffer(bh);
 813         if (bh->b_uptodate)
 814                 return bh;
 815         brelse(bh);
 816         return NULL;
 817 }
 818 
 819 /*
 820  * Ok, breada can be used as bread, but additionally to mark other
 821  * blocks for reading as well. End the argument list with a negative
 822  * number.
 823  */
 824 
 825 #define NBUF 16
 826 
 827 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 828         unsigned int pos, unsigned int filesize)
 829 {
 830         struct buffer_head * bhlist[NBUF];
 831         unsigned int blocks;
 832         struct buffer_head * bh;
 833         int index;
 834         int i, j;
 835 
 836         if (pos >= filesize)
 837                 return NULL;
 838 
 839         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 840                 return NULL;
 841 
 842         index = BUFSIZE_INDEX(bh->b_size);
 843 
 844         if (bh->b_uptodate)
 845                 return bh;
 846 
 847         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 848 
 849         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 850                 blocks = read_ahead[MAJOR(dev)] >> index;
 851         if (blocks > NBUF)
 852                 blocks = NBUF;
 853         
 854         bhlist[0] = bh;
 855         j = 1;
 856         for(i=1; i<blocks; i++) {
 857                 bh = getblk(dev,block+i,bufsize);
 858                 if (bh->b_uptodate) {
 859                         brelse(bh);
 860                         break;
 861                 }
 862                 bhlist[j++] = bh;
 863         }
 864 
 865         /* Request the read for these buffers, and then release them */
 866         ll_rw_block(READ, j, bhlist);
 867 
 868         for(i=1; i<j; i++)
 869                 brelse(bhlist[i]);
 870 
 871         /* Wait for this buffer, and then continue on */
 872         bh = bhlist[0];
 873         wait_on_buffer(bh);
 874         if (bh->b_uptodate)
 875                 return bh;
 876         brelse(bh);
 877         return NULL;
 878 }
 879 
 880 /*
 881  * See fs/inode.c for the weird use of volatile..
 882  */
 883 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 884 {
 885         struct wait_queue * wait;
 886 
 887         wait = ((volatile struct buffer_head *) bh)->b_wait;
 888         memset(bh,0,sizeof(*bh));
 889         ((volatile struct buffer_head *) bh)->b_wait = wait;
 890         bh->b_next_free = unused_list;
 891         unused_list = bh;
 892 }
 893 
 894 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 895 {
 896         int i;
 897         struct buffer_head * bh;
 898 
 899         if (unused_list)
 900                 return;
 901 
 902         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 903                 return;
 904 
 905         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 906                 bh->b_next_free = unused_list;  /* only make link */
 907                 unused_list = bh++;
 908         }
 909 }
 910 
 911 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 912 {
 913         struct buffer_head * bh;
 914 
 915         get_more_buffer_heads();
 916         if (!unused_list)
 917                 return NULL;
 918         bh = unused_list;
 919         unused_list = bh->b_next_free;
 920         bh->b_next_free = NULL;
 921         bh->b_data = NULL;
 922         bh->b_size = 0;
 923         bh->b_req = 0;
 924         return bh;
 925 }
 926 
 927 /*
 928  * Create the appropriate buffers when given a page for data area and
 929  * the size of each buffer.. Use the bh->b_this_page linked list to
 930  * follow the buffers created.  Return NULL if unable to create more
 931  * buffers.
 932  */
 933 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 934 {
 935         struct buffer_head *bh, *head;
 936         unsigned long offset;
 937 
 938         head = NULL;
 939         offset = PAGE_SIZE;
 940         while ((offset -= size) < PAGE_SIZE) {
 941                 bh = get_unused_buffer_head();
 942                 if (!bh)
 943                         goto no_grow;
 944                 bh->b_this_page = head;
 945                 head = bh;
 946                 bh->b_data = (char *) (page+offset);
 947                 bh->b_size = size;
 948                 bh->b_dev = 0xffff;  /* Flag as unused */
 949         }
 950         return head;
 951 /*
 952  * In case anything failed, we just free everything we got.
 953  */
 954 no_grow:
 955         bh = head;
 956         while (bh) {
 957                 head = bh;
 958                 bh = bh->b_this_page;
 959                 put_unused_buffer_head(head);
 960         }
 961         return NULL;
 962 }
 963 
 964 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 965 {
 966         int i;
 967         int bhnum = 0;
 968         struct buffer_head * bhr[8];
 969 
 970         for (i = 0 ; i < nrbuf ; i++) {
 971                 if (bh[i] && !bh[i]->b_uptodate)
 972                         bhr[bhnum++] = bh[i];
 973         }
 974         if (bhnum)
 975                 ll_rw_block(READ, bhnum, bhr);
 976         for (i = 0 ; i < nrbuf ; i++) {
 977                 if (bh[i]) {
 978                         wait_on_buffer(bh[i]);
 979                 }
 980         }
 981 }
 982 
 983 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 984         dev_t dev, int *b, int size)
 985 {
 986         struct buffer_head * bh[8];
 987         unsigned long page;
 988         unsigned long offset;
 989         int block;
 990         int nrbuf;
 991 
 992         page = (unsigned long) first->b_data;
 993         if (page & ~PAGE_MASK) {
 994                 brelse(first);
 995                 return 0;
 996         }
 997         mem_map[MAP_NR(page)]++;
 998         bh[0] = first;
 999         nrbuf = 1;
1000         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1001                 block = *++b;
1002                 if (!block)
1003                         goto no_go;
1004                 first = get_hash_table(dev, block, size);
1005                 if (!first)
1006                         goto no_go;
1007                 bh[nrbuf++] = first;
1008                 if (page+offset != (unsigned long) first->b_data)
1009                         goto no_go;
1010         }
1011         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1012         while (nrbuf-- > 0)
1013                 brelse(bh[nrbuf]);
1014         free_page(address);
1015         ++current->mm->min_flt;
1016         return page;
1017 no_go:
1018         while (nrbuf-- > 0)
1019                 brelse(bh[nrbuf]);
1020         free_page(page);
1021         return 0;
1022 }
1023 
1024 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1025         dev_t dev, int b[], int size)
1026 {
1027         struct buffer_head * bh, * tmp, * arr[8];
1028         unsigned long offset;
1029         int isize = BUFSIZE_INDEX(size);
1030         int * p;
1031         int block;
1032 
1033         bh = create_buffers(address, size);
1034         if (!bh)
1035                 return 0;
1036         /* do any of the buffers already exist? punt if so.. */
1037         p = b;
1038         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1039                 block = *(p++);
1040                 if (!block)
1041                         goto not_aligned;
1042                 if (find_buffer(dev, block, size))
1043                         goto not_aligned;
1044         }
1045         tmp = bh;
1046         p = b;
1047         block = 0;
1048         while (1) {
1049                 arr[block++] = bh;
1050                 bh->b_count = 1;
1051                 bh->b_dirt = 0;
1052                 bh->b_flushtime = 0;
1053                 bh->b_uptodate = 0;
1054                 bh->b_req = 0;
1055                 bh->b_dev = dev;
1056                 bh->b_blocknr = *(p++);
1057                 bh->b_list = BUF_CLEAN;
1058                 nr_buffers++;
1059                 nr_buffers_size[isize]++;
1060                 insert_into_queues(bh);
1061                 if (bh->b_this_page)
1062                         bh = bh->b_this_page;
1063                 else
1064                         break;
1065         }
1066         buffermem += PAGE_SIZE;
1067         bh->b_this_page = tmp;
1068         mem_map[MAP_NR(address)]++;
1069         buffer_pages[MAP_NR(address)] = bh;
1070         read_buffers(arr,block);
1071         while (block-- > 0)
1072                 brelse(arr[block]);
1073         ++current->mm->maj_flt;
1074         return address;
1075 not_aligned:
1076         while ((tmp = bh) != NULL) {
1077                 bh = bh->b_this_page;
1078                 put_unused_buffer_head(tmp);
1079         }
1080         return 0;
1081 }
1082 
1083 /*
1084  * Try-to-share-buffers tries to minimize memory use by trying to keep
1085  * both code pages and the buffer area in the same page. This is done by
1086  * (a) checking if the buffers are already aligned correctly in memory and
1087  * (b) if none of the buffer heads are in memory at all, trying to load
1088  * them into memory the way we want them.
1089  *
1090  * This doesn't guarantee that the memory is shared, but should under most
1091  * circumstances work very well indeed (ie >90% sharing of code pages on
1092  * demand-loadable executables).
1093  */
1094 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1095         dev_t dev, int *b, int size)
1096 {
1097         struct buffer_head * bh;
1098         int block;
1099 
1100         block = b[0];
1101         if (!block)
1102                 return 0;
1103         bh = get_hash_table(dev, block, size);
1104         if (bh)
1105                 return check_aligned(bh, address, dev, b, size);
1106         return try_to_load_aligned(address, dev, b, size);
1107 }
1108 
1109 #define COPYBLK(size,from,to) \
1110 __asm__ __volatile__("rep ; movsl": \
1111         :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1112         :"cx","di","si")
1113 
1114 /*
1115  * bread_page reads four buffers into memory at the desired address. It's
1116  * a function of its own, as there is some speed to be got by reading them
1117  * all at the same time, not waiting for one to be read, and then another
1118  * etc. This also allows us to optimize memory usage by sharing code pages
1119  * and filesystem buffers..
1120  */
1121 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1122 {
1123         struct buffer_head * bh[8];
1124         unsigned long where;
1125         int i, j;
1126 
1127         if (!no_share) {
1128                 where = try_to_share_buffers(address, dev, b, size);
1129                 if (where)
1130                         return where;
1131         }
1132         ++current->mm->maj_flt;
1133         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1134                 bh[i] = NULL;
1135                 if (b[i])
1136                         bh[i] = getblk(dev, b[i], size);
1137         }
1138         read_buffers(bh,i);
1139         where = address;
1140         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1141                 if (bh[i]) {
1142                         if (bh[i]->b_uptodate)
1143                                 COPYBLK(size, (unsigned long) bh[i]->b_data, where);
1144                         brelse(bh[i]);
1145                 }
1146         }
1147         return address;
1148 }
1149 
1150 /*
1151  * Try to increase the number of buffers available: the size argument
1152  * is used to determine what kind of buffers we want.
1153  */
1154 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1155 {
1156         unsigned long page;
1157         struct buffer_head *bh, *tmp;
1158         struct buffer_head * insert_point;
1159         int isize;
1160 
1161         if ((size & 511) || (size > PAGE_SIZE)) {
1162                 printk("VFS: grow_buffers: size = %d\n",size);
1163                 return 0;
1164         }
1165 
1166         isize = BUFSIZE_INDEX(size);
1167 
1168         if (!(page = __get_free_page(pri)))
1169                 return 0;
1170         bh = create_buffers(page, size);
1171         if (!bh) {
1172                 free_page(page);
1173                 return 0;
1174         }
1175 
1176         insert_point = free_list[isize];
1177 
1178         tmp = bh;
1179         while (1) {
1180                 nr_free[isize]++;
1181                 if (insert_point) {
1182                         tmp->b_next_free = insert_point->b_next_free;
1183                         tmp->b_prev_free = insert_point;
1184                         insert_point->b_next_free->b_prev_free = tmp;
1185                         insert_point->b_next_free = tmp;
1186                 } else {
1187                         tmp->b_prev_free = tmp;
1188                         tmp->b_next_free = tmp;
1189                 }
1190                 insert_point = tmp;
1191                 ++nr_buffers;
1192                 if (tmp->b_this_page)
1193                         tmp = tmp->b_this_page;
1194                 else
1195                         break;
1196         }
1197         free_list[isize] = bh;
1198         buffer_pages[MAP_NR(page)] = bh;
1199         tmp->b_this_page = bh;
1200         wake_up(&buffer_wait);
1201         buffermem += PAGE_SIZE;
1202         return 1;
1203 }
1204 
1205 /*
1206  * try_to_free() checks if all the buffers on this particular page
1207  * are unused, and free's the page if so.
1208  */
1209 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1210 {
1211         unsigned long page;
1212         struct buffer_head * tmp, * p;
1213         int isize = BUFSIZE_INDEX(bh->b_size);
1214 
1215         *bhp = bh;
1216         page = (unsigned long) bh->b_data;
1217         page &= PAGE_MASK;
1218         tmp = bh;
1219         do {
1220                 if (!tmp)
1221                         return 0;
1222                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1223                         return 0;
1224                 tmp = tmp->b_this_page;
1225         } while (tmp != bh);
1226         tmp = bh;
1227         do {
1228                 p = tmp;
1229                 tmp = tmp->b_this_page;
1230                 nr_buffers--;
1231                 nr_buffers_size[isize]--;
1232                 if (p == *bhp)
1233                   {
1234                     *bhp = p->b_prev_free;
1235                     if (p == *bhp) /* Was this the last in the list? */
1236                       *bhp = NULL;
1237                   }
1238                 remove_from_queues(p);
1239                 put_unused_buffer_head(p);
1240         } while (tmp != bh);
1241         buffermem -= PAGE_SIZE;
1242         buffer_pages[MAP_NR(page)] = NULL;
1243         free_page(page);
1244         return !mem_map[MAP_NR(page)];
1245 }
1246 
1247 
1248 /*
1249  * Consult the load average for buffers and decide whether or not
1250  * we should shrink the buffers of one size or not.  If we decide yes,
1251  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1252  * that is specified.
1253  *
1254  * I would prefer not to use a load average, but the way things are now it
1255  * seems unavoidable.  The way to get rid of it would be to force clustering
1256  * universally, so that when we reclaim buffers we always reclaim an entire
1257  * page.  Doing this would mean that we all need to move towards QMAGIC.
1258  */
1259 
1260 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1261 {          
1262         int nlist;
1263         int isize;
1264         int total_lav, total_n_buffers, n_sizes;
1265         
1266         /* Do not consider the shared buffers since they would not tend
1267            to have getblk called very often, and this would throw off
1268            the lav.  They are not easily reclaimable anyway (let the swapper
1269            make the first move). */
1270   
1271         total_lav = total_n_buffers = n_sizes = 0;
1272         for(nlist = 0; nlist < NR_SIZES; nlist++)
1273          {
1274                  total_lav += buffers_lav[nlist];
1275                  if(nr_buffers_size[nlist]) n_sizes++;
1276                  total_n_buffers += nr_buffers_size[nlist];
1277                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1278          }
1279         
1280         /* See if we have an excessive number of buffers of a particular
1281            size - if so, victimize that bunch. */
1282   
1283         isize = (size ? BUFSIZE_INDEX(size) : -1);
1284         
1285         if (n_sizes > 1)
1286                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1287                   {
1288                           if(nlist == isize) continue;
1289                           if(nr_buffers_size[nlist] &&
1290                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1291                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1292                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1293                                             return 1;
1294                   }
1295         return 0;
1296 }
1297 /*
1298  * Try to free up some pages by shrinking the buffer-cache
1299  *
1300  * Priority tells the routine how hard to try to shrink the
1301  * buffers: 3 means "don't bother too much", while a value
1302  * of 0 means "we'd better get some free pages now".
1303  */
1304 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1305 {
1306         if (priority < 2) {
1307                 sync_buffers(0,0);
1308         }
1309 
1310         if(priority == 2) wakeup_bdflush(1);
1311 
1312         if(maybe_shrink_lav_buffers(0)) return 1;
1313 
1314         /* No good candidate size - take any size we can find */
1315         return shrink_specific_buffers(priority, 0);
1316 }
1317 
1318 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1319 {
1320         struct buffer_head *bh;
1321         int nlist;
1322         int i, isize, isize1;
1323 
1324 #ifdef DEBUG
1325         if(size) printk("Shrinking buffers of size %d\n", size);
1326 #endif
1327         /* First try the free lists, and see if we can get a complete page
1328            from here */
1329         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1330 
1331         for(isize = 0; isize<NR_SIZES; isize++){
1332                 if(isize1 != -1 && isize1 != isize) continue;
1333                 bh = free_list[isize];
1334                 if(!bh) continue;
1335                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1336                         if (bh->b_count || !bh->b_this_page)
1337                                  continue;
1338                         if (try_to_free(bh, &bh))
1339                                  return 1;
1340                         if(!bh) break; /* Some interrupt must have used it after we
1341                                           freed the page.  No big deal - keep looking */
1342                 }
1343         }
1344         
1345         /* Not enough in the free lists, now try the lru list */
1346         
1347         for(nlist = 0; nlist < NR_LIST; nlist++) {
1348         repeat1:
1349                 if(priority > 3 && nlist == BUF_SHARED) continue;
1350                 bh = lru_list[nlist];
1351                 if(!bh) continue;
1352                 i = nr_buffers_type[nlist] >> priority;
1353                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1354                         /* We may have stalled while waiting for I/O to complete. */
1355                         if(bh->b_list != nlist) goto repeat1;
1356                         if (bh->b_count || !bh->b_this_page)
1357                                  continue;
1358                         if(size && bh->b_size != size) continue;
1359                         if (bh->b_lock)
1360                                  if (priority)
1361                                           continue;
1362                                  else
1363                                           wait_on_buffer(bh);
1364                         if (bh->b_dirt) {
1365                                 bh->b_count++;
1366                                 bh->b_flushtime = 0;
1367                                 ll_rw_block(WRITEA, 1, &bh);
1368                                 bh->b_count--;
1369                                 continue;
1370                         }
1371                         if (try_to_free(bh, &bh))
1372                                  return 1;
1373                         if(!bh) break;
1374                 }
1375         }
1376         return 0;
1377 }
1378 
1379 
1380 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1381 {
1382         struct buffer_head * bh;
1383         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1384         int shared;
1385         int nlist, isize;
1386 
1387         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1388         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1389         printk("Buffer blocks:   %6d\n",nr_buffers);
1390 
1391         for(nlist = 0; nlist < NR_LIST; nlist++) {
1392           shared = found = locked = dirty = used = lastused = 0;
1393           bh = lru_list[nlist];
1394           if(!bh) continue;
1395           do {
1396                 found++;
1397                 if (bh->b_lock)
1398                         locked++;
1399                 if (bh->b_dirt)
1400                         dirty++;
1401                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1402                 if (bh->b_count)
1403                         used++, lastused = found;
1404                 bh = bh->b_next_free;
1405               } while (bh != lru_list[nlist]);
1406         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1407                 nlist, found, used, lastused, locked, dirty, shared);
1408         };
1409         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1410         for(isize = 0; isize<NR_SIZES; isize++){
1411                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1412                        buffers_lav[isize], nr_free[isize]);
1413                 for(nlist = 0; nlist < NR_LIST; nlist++)
1414                          printk("%7d ", nr_buffers_st[isize][nlist]);
1415                 printk("\n");
1416         }
1417 }
1418 
1419 /*
1420  * try_to_reassign() checks if all the buffers on this particular page
1421  * are unused, and reassign to a new cluster them if this is true.
1422  */
1423 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1424                            dev_t dev, unsigned int starting_block)
1425 {
1426         unsigned long page;
1427         struct buffer_head * tmp, * p;
1428 
1429         *bhp = bh;
1430         page = (unsigned long) bh->b_data;
1431         page &= PAGE_MASK;
1432         if(mem_map[MAP_NR(page)] != 1) return 0;
1433         tmp = bh;
1434         do {
1435                 if (!tmp)
1436                          return 0;
1437                 
1438                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1439                          return 0;
1440                 tmp = tmp->b_this_page;
1441         } while (tmp != bh);
1442         tmp = bh;
1443         
1444         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1445                  tmp = tmp->b_this_page;
1446         
1447         /* This is the buffer at the head of the page */
1448         bh = tmp;
1449         do {
1450                 p = tmp;
1451                 tmp = tmp->b_this_page;
1452                 remove_from_queues(p);
1453                 p->b_dev=dev;
1454                 p->b_uptodate = 0;
1455                 p->b_req = 0;
1456                 p->b_blocknr=starting_block++;
1457                 insert_into_queues(p);
1458         } while (tmp != bh);
1459         return 1;
1460 }
1461 
1462 /*
1463  * Try to find a free cluster by locating a page where
1464  * all of the buffers are unused.  We would like this function
1465  * to be atomic, so we do not call anything that might cause
1466  * the process to sleep.  The priority is somewhat similar to
1467  * the priority used in shrink_buffers.
1468  * 
1469  * My thinking is that the kernel should end up using whole
1470  * pages for the buffer cache as much of the time as possible.
1471  * This way the other buffers on a particular page are likely
1472  * to be very near each other on the free list, and we will not
1473  * be expiring data prematurely.  For now we only cannibalize buffers
1474  * of the same size to keep the code simpler.
1475  */
1476 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1477                      unsigned int starting_block, int size)
1478 {
1479         struct buffer_head *bh;
1480         int isize = BUFSIZE_INDEX(size);
1481         int i;
1482 
1483         /* We want to give ourselves a really good shot at generating
1484            a cluster, and since we only take buffers from the free
1485            list, we "overfill" it a little. */
1486 
1487         while(nr_free[isize] < 32) refill_freelist(size);
1488 
1489         bh = free_list[isize];
1490         if(bh)
1491                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1492                          if (!bh->b_this_page)  continue;
1493                          if (try_to_reassign(bh, &bh, dev, starting_block))
1494                                  return 4;
1495                  }
1496         return 0;
1497 }
1498 
1499 /* This function tries to generate a new cluster of buffers
1500  * from a new page in memory.  We should only do this if we have
1501  * not expanded the buffer cache to the maximum size that we allow.
1502  */
1503 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1504 {
1505         struct buffer_head * bh, * tmp, * arr[8];
1506         int isize = BUFSIZE_INDEX(size);
1507         unsigned long offset;
1508         unsigned long page;
1509         int nblock;
1510 
1511         page = get_free_page(GFP_NOBUFFER);
1512         if(!page) return 0;
1513 
1514         bh = create_buffers(page, size);
1515         if (!bh) {
1516                 free_page(page);
1517                 return 0;
1518         };
1519         nblock = block;
1520         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1521                 if (find_buffer(dev, nblock++, size))
1522                          goto not_aligned;
1523         }
1524         tmp = bh;
1525         nblock = 0;
1526         while (1) {
1527                 arr[nblock++] = bh;
1528                 bh->b_count = 1;
1529                 bh->b_dirt = 0;
1530                 bh->b_flushtime = 0;
1531                 bh->b_lock = 0;
1532                 bh->b_uptodate = 0;
1533                 bh->b_req = 0;
1534                 bh->b_dev = dev;
1535                 bh->b_list = BUF_CLEAN;
1536                 bh->b_blocknr = block++;
1537                 nr_buffers++;
1538                 nr_buffers_size[isize]++;
1539                 insert_into_queues(bh);
1540                 if (bh->b_this_page)
1541                         bh = bh->b_this_page;
1542                 else
1543                         break;
1544         }
1545         buffermem += PAGE_SIZE;
1546         buffer_pages[MAP_NR(page)] = bh;
1547         bh->b_this_page = tmp;
1548         while (nblock-- > 0)
1549                 brelse(arr[nblock]);
1550         return 4;
1551 not_aligned:
1552         while ((tmp = bh) != NULL) {
1553                 bh = bh->b_this_page;
1554                 put_unused_buffer_head(tmp);
1555         }
1556         free_page(page);
1557         return 0;
1558 }
1559 
1560 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1561 {
1562         int i, offset;
1563         
1564         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1565                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1566                 if(find_buffer(dev, b[i], size)) return 0;
1567         };
1568 
1569         /* OK, we have a candidate for a new cluster */
1570         
1571         /* See if one size of buffer is over-represented in the buffer cache,
1572            if so reduce the numbers of buffers */
1573         if(maybe_shrink_lav_buffers(size))
1574          {
1575                  int retval;
1576                  retval = try_to_generate_cluster(dev, b[0], size);
1577                  if(retval) return retval;
1578          };
1579         
1580         if (nr_free_pages > min_free_pages) 
1581                  return try_to_generate_cluster(dev, b[0], size);
1582         else
1583                  return reassign_cluster(dev, b[0], size);
1584 }
1585 
1586 /*
1587  * This initializes the initial buffer free list.  nr_buffers_type is set
1588  * to one less the actual number of buffers, as a sop to backwards
1589  * compatibility --- the old code did this (I think unintentionally,
1590  * but I'm not sure), and programs in the ps package expect it.
1591  *                                      - TYT 8/30/92
1592  */
1593 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1594 {
1595         int i;
1596         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1597 
1598         if (high_memory >= 4*1024*1024) {
1599                 min_free_pages = 200;
1600                 if(high_memory >= 16*1024*1024)
1601                          nr_hash = 16381;
1602                 else
1603                          nr_hash = 4093;
1604         } else {
1605                 min_free_pages = 20;
1606                 nr_hash = 997;
1607         };
1608         
1609         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1610                                                      sizeof(struct buffer_head *));
1611 
1612 
1613         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1614                                                      sizeof(struct buffer_head *));
1615         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1616                 buffer_pages[i] = NULL;
1617 
1618         for (i = 0 ; i < nr_hash ; i++)
1619                 hash_table[i] = NULL;
1620         lru_list[BUF_CLEAN] = 0;
1621         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1622         if (!free_list[isize])
1623                 panic("VFS: Unable to initialize buffer free list!");
1624         return;
1625 }
1626 
1627 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1628  * response to dirty buffers.  Once this process is activated, we write back
1629  * a limited number of buffers to the disks and then go back to sleep again.
1630  * In effect this is a process which never leaves kernel mode, and does not have
1631  * any user memory associated with it except for the stack.  There is also
1632  * a kernel stack page, which obviously must be separate from the user stack.
1633  */
1634 struct wait_queue * bdflush_wait = NULL;
1635 struct wait_queue * bdflush_done = NULL;
1636 
1637 static int bdflush_running = 0;
1638 
1639 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1640 {
1641         if(!bdflush_running){
1642                 printk("Warning - bdflush not running\n");
1643                 sync_buffers(0,0);
1644                 return;
1645         };
1646         wake_up(&bdflush_wait);
1647         if(wait) sleep_on(&bdflush_done);
1648 }
1649 
1650 
1651 
1652 /* 
1653  * Here we attempt to write back old buffers.  We also try and flush inodes 
1654  * and supers as well, since this function is essentially "update", and 
1655  * otherwise there would be no way of ensuring that these quantities ever 
1656  * get written back.  Ideally, we would have a timestamp on the inodes
1657  * and superblocks so that we could write back only the old ones as well
1658  */
1659 
1660 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1661 {
1662         int i, isize;
1663         int ndirty, nwritten;
1664         int nlist;
1665         int ncount;
1666         struct buffer_head * bh, *next;
1667 
1668         sync_supers(0);
1669         sync_inodes(0);
1670 
1671         ncount = 0;
1672 #ifdef DEBUG
1673         for(nlist = 0; nlist < NR_LIST; nlist++)
1674 #else
1675         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1676 #endif
1677         {
1678                 ndirty = 0;
1679                 nwritten = 0;
1680         repeat:
1681                 bh = lru_list[nlist];
1682                 if(bh) 
1683                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1684                                  /* We may have stalled while waiting for I/O to complete. */
1685                                  if(bh->b_list != nlist) goto repeat;
1686                                  next = bh->b_next_free;
1687                                  if(!lru_list[nlist]) {
1688                                          printk("Dirty list empty %d\n", i);
1689                                          break;
1690                                  }
1691                                  
1692                                  /* Clean buffer on dirty list?  Refile it */
1693                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1694                                   {
1695                                           refile_buffer(bh);
1696                                           continue;
1697                                   }
1698                                  
1699                                  if (bh->b_lock || !bh->b_dirt)
1700                                           continue;
1701                                  ndirty++;
1702                                  if(bh->b_flushtime > jiffies) continue;
1703                                  nwritten++;
1704                                  bh->b_count++;
1705                                  bh->b_flushtime = 0;
1706 #ifdef DEBUG
1707                                  if(nlist != BUF_DIRTY) ncount++;
1708 #endif
1709                                  ll_rw_block(WRITE, 1, &bh);
1710                                  bh->b_count--;
1711                          }
1712         }
1713 #ifdef DEBUG
1714         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1715         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1716 #endif
1717         
1718         /* We assume that we only come through here on a regular
1719            schedule, like every 5 seconds.  Now update load averages.  
1720            Shift usage counts to prevent overflow. */
1721         for(isize = 0; isize<NR_SIZES; isize++){
1722                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1723                 buffer_usage[isize] = 0;
1724         };
1725         return 0;
1726 }
1727 
1728 
1729 /* This is the interface to bdflush.  As we get more sophisticated, we can
1730  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1731  * invoke this again after you have done this once, you would simply modify 
1732  * the tuning parameters.  We would want to verify each parameter, however,
1733  * to make sure that it is reasonable. */
1734 
1735 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1736 {
1737         int i, error;
1738         int ndirty;
1739         int nlist;
1740         int ncount;
1741         struct buffer_head * bh, *next;
1742 
1743         if (!suser())
1744                 return -EPERM;
1745 
1746         if (func == 1)
1747                  return sync_old_buffers();
1748 
1749         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1750         if (func >= 2) {
1751                 i = (func-2) >> 1;
1752                 if (i < 0 || i >= N_PARAM)
1753                         return -EINVAL;
1754                 if((func & 1) == 0) {
1755                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1756                         if (error)
1757                                 return error;
1758                         put_fs_long(bdf_prm.data[i], data);
1759                         return 0;
1760                 };
1761                 if (data < bdflush_min[i] || data > bdflush_max[i])
1762                         return -EINVAL;
1763                 bdf_prm.data[i] = data;
1764                 return 0;
1765         };
1766         
1767         if (bdflush_running)
1768                 return -EBUSY; /* Only one copy of this running at one time */
1769         bdflush_running++;
1770         
1771         /* OK, from here on is the daemon */
1772         
1773         for (;;) {
1774 #ifdef DEBUG
1775                 printk("bdflush() activated...");
1776 #endif
1777                 
1778                 ncount = 0;
1779 #ifdef DEBUG
1780                 for(nlist = 0; nlist < NR_LIST; nlist++)
1781 #else
1782                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1783 #endif
1784                  {
1785                          ndirty = 0;
1786                  repeat:
1787                          bh = lru_list[nlist];
1788                          if(bh) 
1789                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1790                                        bh = next) {
1791                                           /* We may have stalled while waiting for I/O to complete. */
1792                                           if(bh->b_list != nlist) goto repeat;
1793                                           next = bh->b_next_free;
1794                                           if(!lru_list[nlist]) {
1795                                                   printk("Dirty list empty %d\n", i);
1796                                                   break;
1797                                           }
1798                                           
1799                                           /* Clean buffer on dirty list?  Refile it */
1800                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1801                                            {
1802                                                    refile_buffer(bh);
1803                                                    continue;
1804                                            }
1805                                           
1806                                           if (bh->b_lock || !bh->b_dirt)
1807                                                    continue;
1808                                           /* Should we write back buffers that are shared or not??
1809                                              currently dirty buffers are not shared, so it does not matter */
1810                                           bh->b_count++;
1811                                           ndirty++;
1812                                           bh->b_flushtime = 0;
1813                                           ll_rw_block(WRITE, 1, &bh);
1814 #ifdef DEBUG
1815                                           if(nlist != BUF_DIRTY) ncount++;
1816 #endif
1817                                           bh->b_count--;
1818                                   }
1819                  }
1820 #ifdef DEBUG
1821                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1822                 printk("sleeping again.\n");
1823 #endif
1824                 wake_up(&bdflush_done);
1825                 
1826                 /* If there are still a lot of dirty buffers around, skip the sleep
1827                    and flush some more */
1828                 
1829                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1830                    bdf_prm.b_un.nfract/100) {
1831                         if (current->signal & (1 << (SIGKILL-1))) {
1832                                 bdflush_running--;
1833                                 return 0;
1834                         }
1835                         current->signal = 0;
1836                         interruptible_sleep_on(&bdflush_wait);
1837                 }
1838         }
1839 }
1840 
1841 
1842 /*
1843  * Overrides for Emacs so that we follow Linus's tabbing style.
1844  * Emacs will notice this stuff at the end of the file and automatically
1845  * adjust the settings for this buffer only.  This must remain at the end
1846  * of the file.
1847  * ---------------------------------------------------------------------------
1848  * Local variables:
1849  * c-indent-level: 8
1850  * c-brace-imaginary-offset: 0
1851  * c-brace-offset: -8
1852  * c-argdecl-indent: 8
1853  * c-label-offset: -8
1854  * c-continued-statement-offset: 8
1855  * c-continued-brace-offset: 0
1856  * End:
1857  */

/* [previous][next][first][last][top][bottom][index][help] */