root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. check_aligned
  32. try_to_load_aligned
  33. try_to_share_buffers
  34. bread_page
  35. grow_buffers
  36. try_to_free
  37. maybe_shrink_lav_buffers
  38. shrink_buffers
  39. shrink_specific_buffers
  40. show_buffers
  41. try_to_reassign
  42. reassign_cluster
  43. try_to_generate_cluster
  44. generate_cluster
  45. buffer_init
  46. wakeup_bdflush
  47. sync_old_buffers
  48. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #define NR_SIZES 4
  33 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  35 
  36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  60 extern int *blksize_size[];
  61 
  62 /* Here is the parameter block for the bdflush process. */
  63 static void wakeup_bdflush(int);
  64 
  65 #define N_PARAM 9
  66 #define LAV
  67 
  68 static union bdflush_param{
  69         struct {
  70                 int nfract;  /* Percentage of buffer cache dirty to 
  71                                 activate bdflush */
  72                 int ndirty;  /* Maximum number of dirty blocks to write out per
  73                                 wake-cycle */
  74                 int nrefill; /* Number of clean buffers to try and obtain
  75                                 each time we call refill */
  76                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  77                                   when trying to refill buffers. */
  78                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  79                                     search for free clusters */
  80                 int age_buffer;  /* Time for normal buffer to age before 
  81                                     we flush it */
  82                 int age_super;  /* Time for superblock to age before we 
  83                                    flush it */
  84                 int lav_const;  /* Constant used for load average (time
  85                                    constant */
  86                 int lav_ratio;  /* Used to determine how low a lav for a
  87                                    particular size can go before we start to
  88                                    trim back the buffers */
  89         } b_un;
  90         unsigned int data[N_PARAM];
  91 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  92 
  93 /* The lav constant is set for 1 minute, as long as the update process runs
  94    every 5 seconds.  If you change the frequency of update, the time
  95    constant will also change. */
  96 
  97 
  98 /* These are the min and max parameter values that we will allow to be assigned */
  99 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
 100 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 101 
 102 /*
 103  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 104  * and getting rid of the cli-sti pairs. The wait-queue routines still
 105  * need cli-sti, but now it's just a couple of 386 instructions or so.
 106  *
 107  * Note that the real wait_on_buffer() is an inline function that checks
 108  * if 'b_wait' is set before calling this, so that the queues aren't set
 109  * up unnecessarily.
 110  */
 111 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         struct wait_queue wait = { current, NULL };
 114 
 115         bh->b_count++;
 116         add_wait_queue(&bh->b_wait, &wait);
 117 repeat:
 118         current->state = TASK_UNINTERRUPTIBLE;
 119         if (bh->b_lock) {
 120                 schedule();
 121                 goto repeat;
 122         }
 123         remove_wait_queue(&bh->b_wait, &wait);
 124         bh->b_count--;
 125         current->state = TASK_RUNNING;
 126 }
 127 
 128 /* Call sync_buffers with wait!=0 to ensure that the call does not
 129    return until all buffer writes have completed.  Sync() may return
 130    before the writes have finished; fsync() may not. */
 131 
 132 
 133 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 134    spontaneously dirty themselves without ever brelse being called.
 135    We will ultimately want to put these in a separate list, but for
 136    now we search all of the lists for dirty buffers */
 137 
 138 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         int i, retry, pass = 0, err = 0;
 141         int nlist, ncount;
 142         struct buffer_head * bh, *next;
 143 
 144         /* One pass for no-wait, three for wait:
 145            0) write out all dirty, unlocked buffers;
 146            1) write out all dirty buffers, waiting if locked;
 147            2) wait for completion by waiting for all buffers to unlock. */
 148  repeat:
 149         retry = 0;
 150         ncount = 0;
 151         /* We search all lists as a failsafe mechanism, not because we expect
 152            there to be dirty buffers on any of the other lists. */
 153         for(nlist = 0; nlist < NR_LIST; nlist++)
 154          {
 155          repeat1:
 156                  bh = lru_list[nlist];
 157                  if(!bh) continue;
 158                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 159                          if(bh->b_list != nlist) goto repeat1;
 160                          next = bh->b_next_free;
 161                          if(!lru_list[nlist]) break;
 162                          if (dev && bh->b_dev != dev)
 163                                   continue;
 164                          if (bh->b_lock)
 165                           {
 166                                   /* Buffer is locked; skip it unless wait is
 167                                      requested AND pass > 0. */
 168                                   if (!wait || !pass) {
 169                                           retry = 1;
 170                                           continue;
 171                                   }
 172                                   wait_on_buffer (bh);
 173                           }
 174                          /* If an unlocked buffer is not uptodate, there has
 175                              been an IO error. Skip it. */
 176                          if (wait && bh->b_req && !bh->b_lock &&
 177                              !bh->b_dirt && !bh->b_uptodate) {
 178                                   err = 1;
 179                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 180                                   continue;
 181                           }
 182                          /* Don't write clean buffers.  Don't write ANY buffers
 183                             on the third pass. */
 184                          if (!bh->b_dirt || pass>=2)
 185                                   continue;
 186                          bh->b_count++;
 187                          bh->b_flushtime = 0;
 188                          ll_rw_block(WRITE, 1, &bh);
 189 
 190                          if(nlist != BUF_DIRTY) { 
 191                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 192                                  ncount++;
 193                          };
 194                          bh->b_count--;
 195                          retry = 1;
 196                  }
 197          }
 198         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 199         
 200         /* If we are waiting for the sync to succeed, and if any dirty
 201            blocks were written, then repeat; on the second pass, only
 202            wait for buffers being written (do not pass to write any
 203            more buffers on the second pass). */
 204         if (wait && retry && ++pass<=2)
 205                  goto repeat;
 206         return err;
 207 }
 208 
 209 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         sync_buffers(dev, 0);
 212         sync_supers(dev);
 213         sync_inodes(dev);
 214         sync_buffers(dev, 0);
 215 }
 216 
 217 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219         sync_buffers(dev, 0);
 220         sync_supers(dev);
 221         sync_inodes(dev);
 222         return sync_buffers(dev, 1);
 223 }
 224 
 225 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         sync_dev(0);
 228         return 0;
 229 }
 230 
 231 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233         return fsync_dev(inode->i_dev);
 234 }
 235 
 236 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct file * file;
 239         struct inode * inode;
 240 
 241         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 242                 return -EBADF;
 243         if (!file->f_op || !file->f_op->fsync)
 244                 return -EINVAL;
 245         if (file->f_op->fsync(inode,file))
 246                 return -EIO;
 247         return 0;
 248 }
 249 
 250 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252         int i;
 253         int nlist;
 254         struct buffer_head * bh;
 255 
 256         for(nlist = 0; nlist < NR_LIST; nlist++) {
 257                 bh = lru_list[nlist];
 258                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 259                      bh = bh->b_next_free) {
 260                         if (bh->b_dev != dev)
 261                                  continue;
 262                         wait_on_buffer(bh);
 263                         if (bh->b_dev == dev)
 264                                  bh->b_flushtime = bh->b_uptodate = 
 265                                           bh->b_dirt = bh->b_req = 0;
 266                 }
 267         }
 268 }
 269 
 270 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 271 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 272 
 273 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 274 {
 275         if (bh->b_next)
 276                 bh->b_next->b_prev = bh->b_prev;
 277         if (bh->b_prev)
 278                 bh->b_prev->b_next = bh->b_next;
 279         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 280                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 281         bh->b_next = bh->b_prev = NULL;
 282 }
 283 
 284 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 {
 286         if (!(bh->b_prev_free) || !(bh->b_next_free))
 287                 panic("VFS: LRU block list corrupted");
 288         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 289         bh->b_prev_free->b_next_free = bh->b_next_free;
 290         bh->b_next_free->b_prev_free = bh->b_prev_free;
 291 
 292         if (lru_list[bh->b_list] == bh)
 293                  lru_list[bh->b_list] = bh->b_next_free;
 294         if(lru_list[bh->b_list] == bh)
 295                  lru_list[bh->b_list] = NULL;
 296         bh->b_next_free = bh->b_prev_free = NULL;
 297 }
 298 
 299 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         int isize = BUFSIZE_INDEX(bh->b_size);
 302         if (!(bh->b_prev_free) || !(bh->b_next_free))
 303                 panic("VFS: Free block list corrupted");
 304         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 305         if(!free_list[isize])
 306                  panic("Free list empty");
 307         nr_free[isize]--;
 308         if(bh->b_next_free == bh)
 309                  free_list[isize] = NULL;
 310         else {
 311                 bh->b_prev_free->b_next_free = bh->b_next_free;
 312                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 313                 if (free_list[isize] == bh)
 314                          free_list[isize] = bh->b_next_free;
 315         };
 316         bh->b_next_free = bh->b_prev_free = NULL;
 317 }
 318 
 319 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         if(bh->b_dev == 0xffff) {
 322                 remove_from_free_list(bh); /* Free list entries should not be
 323                                               in the hash queue */
 324                 return;
 325         };
 326         nr_buffers_type[bh->b_list]--;
 327         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 328         remove_from_hash_queue(bh);
 329         remove_from_lru_list(bh);
 330 }
 331 
 332 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 333 {
 334         if (!bh)
 335                 return;
 336         if (bh == lru_list[bh->b_list]) {
 337                 lru_list[bh->b_list] = bh->b_next_free;
 338                 return;
 339         }
 340         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 341         remove_from_lru_list(bh);
 342 /* add to back of free list */
 343 
 344         if(!lru_list[bh->b_list]) {
 345                 lru_list[bh->b_list] = bh;
 346                 lru_list[bh->b_list]->b_prev_free = bh;
 347         };
 348 
 349         bh->b_next_free = lru_list[bh->b_list];
 350         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 351         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 352         lru_list[bh->b_list]->b_prev_free = bh;
 353 }
 354 
 355 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 356 {
 357         int isize;
 358         if (!bh)
 359                 return;
 360 
 361         isize = BUFSIZE_INDEX(bh->b_size);      
 362         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 363 /* add to back of free list */
 364 
 365         if(!free_list[isize]) {
 366                 free_list[isize] = bh;
 367                 bh->b_prev_free = bh;
 368         };
 369 
 370         nr_free[isize]++;
 371         bh->b_next_free = free_list[isize];
 372         bh->b_prev_free = free_list[isize]->b_prev_free;
 373         free_list[isize]->b_prev_free->b_next_free = bh;
 374         free_list[isize]->b_prev_free = bh;
 375 }
 376 
 377 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 378 {
 379 /* put at end of free list */
 380 
 381         if(bh->b_dev == 0xffff) {
 382                 put_last_free(bh);
 383                 return;
 384         };
 385         if(!lru_list[bh->b_list]) {
 386                 lru_list[bh->b_list] = bh;
 387                 bh->b_prev_free = bh;
 388         };
 389         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 390         bh->b_next_free = lru_list[bh->b_list];
 391         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 392         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 393         lru_list[bh->b_list]->b_prev_free = bh;
 394         nr_buffers_type[bh->b_list]++;
 395         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 396 /* put the buffer in new hash-queue if it has a device */
 397         bh->b_prev = NULL;
 398         bh->b_next = NULL;
 399         if (!bh->b_dev)
 400                 return;
 401         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 402         hash(bh->b_dev,bh->b_blocknr) = bh;
 403         if (bh->b_next)
 404                 bh->b_next->b_prev = bh;
 405 }
 406 
 407 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 408 {               
 409         struct buffer_head * tmp;
 410 
 411         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 412                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 413                         if (tmp->b_size == size)
 414                                 return tmp;
 415                         else {
 416                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 417                                                         MAJOR(dev), MINOR(dev));
 418                                 return NULL;
 419                         }
 420         return NULL;
 421 }
 422 
 423 /*
 424  * Why like this, I hear you say... The reason is race-conditions.
 425  * As we don't lock buffers (unless we are readint them, that is),
 426  * something might happen to it while we sleep (ie a read-error
 427  * will force it bad). This shouldn't really happen currently, but
 428  * the code is ready.
 429  */
 430 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 431 {
 432         struct buffer_head * bh;
 433 
 434         for (;;) {
 435                 if (!(bh=find_buffer(dev,block,size)))
 436                         return NULL;
 437                 bh->b_count++;
 438                 wait_on_buffer(bh);
 439                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 440                         return bh;
 441                 bh->b_count--;
 442         }
 443 }
 444 
 445 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 446 {
 447         int i, nlist;
 448         struct buffer_head * bh, *bhnext;
 449 
 450         if (!blksize_size[MAJOR(dev)])
 451                 return;
 452 
 453         switch(size) {
 454                 default: panic("Invalid blocksize passed to set_blocksize");
 455                 case 512: case 1024: case 2048: case 4096:;
 456         }
 457 
 458         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 459                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 460                 return;
 461         }
 462         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 463                 return;
 464         sync_buffers(dev, 2);
 465         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 466 
 467   /* We need to be quite careful how we do this - we are moving entries
 468      around on the free list, and we can get in a loop if we are not careful.*/
 469 
 470         for(nlist = 0; nlist < NR_LIST; nlist++) {
 471                 bh = lru_list[nlist];
 472                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 473                         if(!bh) break;
 474                         bhnext = bh->b_next_free; 
 475                         if (bh->b_dev != dev)
 476                                  continue;
 477                         if (bh->b_size == size)
 478                                  continue;
 479                         
 480                         wait_on_buffer(bh);
 481                         if (bh->b_dev == dev && bh->b_size != size) {
 482                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 483                                          bh->b_flushtime = 0;
 484                         };
 485                         remove_from_hash_queue(bh);
 486                 }
 487         }
 488 }
 489 
 490 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 491 
 492 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 493 {
 494         struct buffer_head * bh, * tmp;
 495         struct buffer_head * candidate[NR_LIST];
 496         unsigned int best_time, winner;
 497         int isize = BUFSIZE_INDEX(size);
 498         int buffers[NR_LIST];
 499         int i;
 500         int needed;
 501 
 502         /* First see if we even need this.  Sometimes it is advantageous
 503          to request some blocks in a filesystem that we know that we will
 504          be needing ahead of time. */
 505 
 506         if (nr_free[isize] > 100)
 507                 return;
 508 
 509         /* If there are too many dirty buffers, we wake up the update process
 510            now so as to ensure that there are still clean buffers available
 511            for user processes to use (and dirty) */
 512         
 513         /* We are going to try and locate this much memory */
 514         needed =bdf_prm.b_un.nrefill * size;  
 515 
 516         while (nr_free_pages > min_free_pages && needed > 0 &&
 517                grow_buffers(GFP_BUFFER, size)) {
 518                 needed -= PAGE_SIZE;
 519         }
 520 
 521         if(needed <= 0) return;
 522 
 523         /* See if there are too many buffers of a different size.
 524            If so, victimize them */
 525 
 526         while(maybe_shrink_lav_buffers(size))
 527          {
 528                  if(!grow_buffers(GFP_BUFFER, size)) break;
 529                  needed -= PAGE_SIZE;
 530                  if(needed <= 0) return;
 531          };
 532 
 533         /* OK, we cannot grow the buffer cache, now try and get some
 534            from the lru list */
 535 
 536         /* First set the candidate pointers to usable buffers.  This
 537            should be quick nearly all of the time. */
 538 
 539 repeat0:
 540         for(i=0; i<NR_LIST; i++){
 541                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 542                    nr_buffers_type[i] == 0) {
 543                         candidate[i] = NULL;
 544                         buffers[i] = 0;
 545                         continue;
 546                 }
 547                 buffers[i] = nr_buffers_type[i];
 548                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 549                  {
 550                          if(buffers[i] < 0) panic("Here is the problem");
 551                          tmp = bh->b_next_free;
 552                          if (!bh) break;
 553                          
 554                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 555                              bh->b_dirt) {
 556                                  refile_buffer(bh);
 557                                  continue;
 558                          };
 559                          
 560                          if (bh->b_count || bh->b_size != size)
 561                                   continue;
 562                          
 563                          /* Buffers are written in the order they are placed 
 564                             on the locked list. If we encounter a locked
 565                             buffer here, this means that the rest of them
 566                             are also locked */
 567                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 568                                  buffers[i] = 0;
 569                                  break;
 570                          }
 571                          
 572                          if (BADNESS(bh)) continue;
 573                          break;
 574                  };
 575                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 576                 else candidate[i] = bh;
 577                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 578         }
 579         
 580  repeat:
 581         if(needed <= 0) return;
 582         
 583         /* Now see which candidate wins the election */
 584         
 585         winner = best_time = UINT_MAX;  
 586         for(i=0; i<NR_LIST; i++){
 587                 if(!candidate[i]) continue;
 588                 if(candidate[i]->b_lru_time < best_time){
 589                         best_time = candidate[i]->b_lru_time;
 590                         winner = i;
 591                 }
 592         }
 593         
 594         /* If we have a winner, use it, and then get a new candidate from that list */
 595         if(winner != UINT_MAX) {
 596                 i = winner;
 597                 bh = candidate[i];
 598                 candidate[i] = bh->b_next_free;
 599                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 600                 if (bh->b_count || bh->b_size != size)
 601                          panic("Busy buffer in candidate list\n");
 602                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 603                          panic("Shared buffer in candidate list\n");
 604                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 605                 
 606                 if(bh->b_dev == 0xffff) panic("Wrong list");
 607                 remove_from_queues(bh);
 608                 bh->b_dev = 0xffff;
 609                 put_last_free(bh);
 610                 needed -= bh->b_size;
 611                 buffers[i]--;
 612                 if(buffers[i] < 0) panic("Here is the problem");
 613                 
 614                 if(buffers[i] == 0) candidate[i] = NULL;
 615                 
 616                 /* Now all we need to do is advance the candidate pointer
 617                    from the winner list to the next usable buffer */
 618                 if(candidate[i] && buffers[i] > 0){
 619                         if(buffers[i] <= 0) panic("Here is another problem");
 620                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 621                                 if(buffers[i] < 0) panic("Here is the problem");
 622                                 tmp = bh->b_next_free;
 623                                 if (!bh) break;
 624                                 
 625                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 626                                     bh->b_dirt) {
 627                                         refile_buffer(bh);
 628                                         continue;
 629                                 };
 630                                 
 631                                 if (bh->b_count || bh->b_size != size)
 632                                          continue;
 633                                 
 634                                 /* Buffers are written in the order they are
 635                                    placed on the locked list.  If we encounter
 636                                    a locked buffer here, this means that the
 637                                    rest of them are also locked */
 638                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 639                                         buffers[i] = 0;
 640                                         break;
 641                                 }
 642               
 643                                 if (BADNESS(bh)) continue;
 644                                 break;
 645                         };
 646                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 647                         else candidate[i] = bh;
 648                         if(candidate[i] && candidate[i]->b_count) 
 649                                  panic("Here is the problem");
 650                 }
 651                 
 652                 goto repeat;
 653         }
 654         
 655         if(needed <= 0) return;
 656         
 657         /* Too bad, that was not enough. Try a little harder to grow some. */
 658         
 659         if (nr_free_pages > 5) {
 660                 if (grow_buffers(GFP_BUFFER, size)) {
 661                         needed -= PAGE_SIZE;
 662                         goto repeat0;
 663                 };
 664         }
 665         
 666         /* and repeat until we find something good */
 667         if (!grow_buffers(GFP_ATOMIC, size))
 668                 wakeup_bdflush(1);
 669         needed -= PAGE_SIZE;
 670         goto repeat0;
 671 }
 672 
 673 /*
 674  * Ok, this is getblk, and it isn't very clear, again to hinder
 675  * race-conditions. Most of the code is seldom used, (ie repeating),
 676  * so it should be much more efficient than it looks.
 677  *
 678  * The algoritm is changed: hopefully better, and an elusive bug removed.
 679  *
 680  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 681  * when the filesystem starts to get full of dirty blocks (I hope).
 682  */
 683 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 684 {
 685         struct buffer_head * bh;
 686         int isize = BUFSIZE_INDEX(size);
 687 
 688         /* Update this for the buffer size lav. */
 689         buffer_usage[isize]++;
 690 
 691         /* If there are too many dirty buffers, we wake up the update process
 692            now so as to ensure that there are still clean buffers available
 693            for user processes to use (and dirty) */
 694 repeat:
 695         bh = get_hash_table(dev, block, size);
 696         if (bh) {
 697                 if (bh->b_uptodate && !bh->b_dirt)
 698                          put_last_lru(bh);
 699                 if(!bh->b_dirt) bh->b_flushtime = 0;
 700                 return bh;
 701         }
 702 
 703         while(!free_list[isize]) refill_freelist(size);
 704         
 705         if (find_buffer(dev,block,size))
 706                  goto repeat;
 707 
 708         bh = free_list[isize];
 709         remove_from_free_list(bh);
 710 
 711 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 712 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 713         bh->b_count=1;
 714         bh->b_dirt=0;
 715         bh->b_lock=0;
 716         bh->b_uptodate=0;
 717         bh->b_flushtime = 0;
 718         bh->b_req=0;
 719         bh->b_dev=dev;
 720         bh->b_blocknr=block;
 721         insert_into_queues(bh);
 722         return bh;
 723 }
 724 
 725 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 726 {
 727         int newtime;
 728 
 729         if (buf->b_dirt){
 730                 /* Move buffer to dirty list if jiffies is clear */
 731                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 732                                      bdf_prm.b_un.age_buffer);
 733                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 734                          buf->b_flushtime = newtime;
 735         } else {
 736                 buf->b_flushtime = 0;
 737         }
 738 }
 739 
 740 
 741 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 742                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 743 
 744 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 745         int i, dispose;
 746         i = 0;
 747         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 748         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 749         if(buf->b_lock) i |= 2;
 750         if(buf->b_dirt) i |= 4;
 751         dispose = buffer_disposition[i];
 752         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 753                  dispose = BUF_UNSHARED;
 754         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 755         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 756         if(dispose != buf->b_list)  {
 757                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 758                          buf->b_lru_time = jiffies;
 759                 if(dispose == BUF_LOCKED && 
 760                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 761                          dispose = BUF_LOCKED1;
 762                 remove_from_queues(buf);
 763                 buf->b_list = dispose;
 764                 insert_into_queues(buf);
 765                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 766                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 767                    bdf_prm.b_un.nfract/100)
 768                          wakeup_bdflush(0);
 769         }
 770 }
 771 
 772 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 773 {
 774         if (!buf)
 775                 return;
 776         wait_on_buffer(buf);
 777 
 778         /* If dirty, mark the time this buffer should be written back */
 779         set_writetime(buf, 0);
 780         refile_buffer(buf);
 781 
 782         if (buf->b_count) {
 783                 if (--buf->b_count)
 784                         return;
 785                 wake_up(&buffer_wait);
 786                 return;
 787         }
 788         printk("VFS: brelse: Trying to free free buffer\n");
 789 }
 790 
 791 /*
 792  * bread() reads a specified block and returns the buffer that contains
 793  * it. It returns NULL if the block was unreadable.
 794  */
 795 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 796 {
 797         struct buffer_head * bh;
 798 
 799         if (!(bh = getblk(dev, block, size))) {
 800                 printk("VFS: bread: READ error on device %d/%d\n",
 801                                                 MAJOR(dev), MINOR(dev));
 802                 return NULL;
 803         }
 804         if (bh->b_uptodate)
 805                 return bh;
 806         ll_rw_block(READ, 1, &bh);
 807         wait_on_buffer(bh);
 808         if (bh->b_uptodate)
 809                 return bh;
 810         brelse(bh);
 811         return NULL;
 812 }
 813 
 814 /*
 815  * Ok, breada can be used as bread, but additionally to mark other
 816  * blocks for reading as well. End the argument list with a negative
 817  * number.
 818  */
 819 
 820 #define NBUF 16
 821 
 822 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 823         unsigned int pos, unsigned int filesize)
 824 {
 825         struct buffer_head * bhlist[NBUF];
 826         unsigned int blocks;
 827         struct buffer_head * bh;
 828         int index;
 829         int i, j;
 830 
 831         if (pos >= filesize)
 832                 return NULL;
 833 
 834         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 835                 return NULL;
 836 
 837         index = BUFSIZE_INDEX(bh->b_size);
 838 
 839         if (bh->b_uptodate)
 840                 return bh;
 841 
 842         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 843 
 844         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 845                 blocks = read_ahead[MAJOR(dev)] >> index;
 846         if (blocks > NBUF)
 847                 blocks = NBUF;
 848         
 849         bhlist[0] = bh;
 850         j = 1;
 851         for(i=1; i<blocks; i++) {
 852                 bh = getblk(dev,block+i,bufsize);
 853                 if (bh->b_uptodate) {
 854                         brelse(bh);
 855                         break;
 856                 }
 857                 bhlist[j++] = bh;
 858         }
 859 
 860         /* Request the read for these buffers, and then release them */
 861         ll_rw_block(READ, j, bhlist);
 862 
 863         for(i=1; i<j; i++)
 864                 brelse(bhlist[i]);
 865 
 866         /* Wait for this buffer, and then continue on */
 867         bh = bhlist[0];
 868         wait_on_buffer(bh);
 869         if (bh->b_uptodate)
 870                 return bh;
 871         brelse(bh);
 872         return NULL;
 873 }
 874 
 875 /*
 876  * See fs/inode.c for the weird use of volatile..
 877  */
 878 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 879 {
 880         struct wait_queue * wait;
 881 
 882         wait = ((volatile struct buffer_head *) bh)->b_wait;
 883         memset((void *) bh,0,sizeof(*bh));
 884         ((volatile struct buffer_head *) bh)->b_wait = wait;
 885         bh->b_next_free = unused_list;
 886         unused_list = bh;
 887 }
 888 
 889 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 890 {
 891         int i;
 892         struct buffer_head * bh;
 893 
 894         if (unused_list)
 895                 return;
 896 
 897         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 898                 return;
 899 
 900         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 901                 bh->b_next_free = unused_list;  /* only make link */
 902                 unused_list = bh++;
 903         }
 904 }
 905 
 906 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 907 {
 908         struct buffer_head * bh;
 909 
 910         get_more_buffer_heads();
 911         if (!unused_list)
 912                 return NULL;
 913         bh = unused_list;
 914         unused_list = bh->b_next_free;
 915         bh->b_next_free = NULL;
 916         bh->b_data = NULL;
 917         bh->b_size = 0;
 918         bh->b_req = 0;
 919         return bh;
 920 }
 921 
 922 /*
 923  * Create the appropriate buffers when given a page for data area and
 924  * the size of each buffer.. Use the bh->b_this_page linked list to
 925  * follow the buffers created.  Return NULL if unable to create more
 926  * buffers.
 927  */
 928 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 929 {
 930         struct buffer_head *bh, *head;
 931         unsigned long offset;
 932 
 933         head = NULL;
 934         offset = PAGE_SIZE;
 935         while ((offset -= size) < PAGE_SIZE) {
 936                 bh = get_unused_buffer_head();
 937                 if (!bh)
 938                         goto no_grow;
 939                 bh->b_this_page = head;
 940                 head = bh;
 941                 bh->b_data = (char *) (page+offset);
 942                 bh->b_size = size;
 943                 bh->b_dev = 0xffff;  /* Flag as unused */
 944         }
 945         return head;
 946 /*
 947  * In case anything failed, we just free everything we got.
 948  */
 949 no_grow:
 950         bh = head;
 951         while (bh) {
 952                 head = bh;
 953                 bh = bh->b_this_page;
 954                 put_unused_buffer_head(head);
 955         }
 956         return NULL;
 957 }
 958 
 959 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 960 {
 961         int i;
 962         int bhnum = 0;
 963         struct buffer_head * bhr[8];
 964 
 965         for (i = 0 ; i < nrbuf ; i++) {
 966                 if (bh[i] && !bh[i]->b_uptodate)
 967                         bhr[bhnum++] = bh[i];
 968         }
 969         if (bhnum)
 970                 ll_rw_block(READ, bhnum, bhr);
 971         for (i = 0 ; i < nrbuf ; i++) {
 972                 if (bh[i]) {
 973                         wait_on_buffer(bh[i]);
 974                 }
 975         }
 976 }
 977 
 978 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 979         dev_t dev, int *b, int size)
 980 {
 981         struct buffer_head * bh[8];
 982         unsigned long page;
 983         unsigned long offset;
 984         int block;
 985         int nrbuf;
 986 
 987         page = (unsigned long) first->b_data;
 988         if (page & ~PAGE_MASK) {
 989                 brelse(first);
 990                 return 0;
 991         }
 992         mem_map[MAP_NR(page)]++;
 993         bh[0] = first;
 994         nrbuf = 1;
 995         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
 996                 block = *++b;
 997                 if (!block)
 998                         goto no_go;
 999                 first = get_hash_table(dev, block, size);
1000                 if (!first)
1001                         goto no_go;
1002                 bh[nrbuf++] = first;
1003                 if (page+offset != (unsigned long) first->b_data)
1004                         goto no_go;
1005         }
1006         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1007         while (nrbuf-- > 0)
1008                 brelse(bh[nrbuf]);
1009         free_page(address);
1010         ++current->mm->min_flt;
1011         return page;
1012 no_go:
1013         while (nrbuf-- > 0)
1014                 brelse(bh[nrbuf]);
1015         free_page(page);
1016         return 0;
1017 }
1018 
1019 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1020         dev_t dev, int b[], int size)
1021 {
1022         struct buffer_head * bh, * tmp, * arr[8];
1023         unsigned long offset;
1024         int isize = BUFSIZE_INDEX(size);
1025         int * p;
1026         int block;
1027 
1028         bh = create_buffers(address, size);
1029         if (!bh)
1030                 return 0;
1031         /* do any of the buffers already exist? punt if so.. */
1032         p = b;
1033         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1034                 block = *(p++);
1035                 if (!block)
1036                         goto not_aligned;
1037                 if (find_buffer(dev, block, size))
1038                         goto not_aligned;
1039         }
1040         tmp = bh;
1041         p = b;
1042         block = 0;
1043         while (1) {
1044                 arr[block++] = bh;
1045                 bh->b_count = 1;
1046                 bh->b_dirt = 0;
1047                 bh->b_flushtime = 0;
1048                 bh->b_uptodate = 0;
1049                 bh->b_req = 0;
1050                 bh->b_dev = dev;
1051                 bh->b_blocknr = *(p++);
1052                 bh->b_list = BUF_CLEAN;
1053                 nr_buffers++;
1054                 nr_buffers_size[isize]++;
1055                 insert_into_queues(bh);
1056                 if (bh->b_this_page)
1057                         bh = bh->b_this_page;
1058                 else
1059                         break;
1060         }
1061         buffermem += PAGE_SIZE;
1062         bh->b_this_page = tmp;
1063         mem_map[MAP_NR(address)]++;
1064         buffer_pages[address >> PAGE_SHIFT] = bh;
1065         read_buffers(arr,block);
1066         while (block-- > 0)
1067                 brelse(arr[block]);
1068         ++current->mm->maj_flt;
1069         return address;
1070 not_aligned:
1071         while ((tmp = bh) != NULL) {
1072                 bh = bh->b_this_page;
1073                 put_unused_buffer_head(tmp);
1074         }
1075         return 0;
1076 }
1077 
1078 /*
1079  * Try-to-share-buffers tries to minimize memory use by trying to keep
1080  * both code pages and the buffer area in the same page. This is done by
1081  * (a) checking if the buffers are already aligned correctly in memory and
1082  * (b) if none of the buffer heads are in memory at all, trying to load
1083  * them into memory the way we want them.
1084  *
1085  * This doesn't guarantee that the memory is shared, but should under most
1086  * circumstances work very well indeed (ie >90% sharing of code pages on
1087  * demand-loadable executables).
1088  */
1089 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1090         dev_t dev, int *b, int size)
1091 {
1092         struct buffer_head * bh;
1093         int block;
1094 
1095         block = b[0];
1096         if (!block)
1097                 return 0;
1098         bh = get_hash_table(dev, block, size);
1099         if (bh)
1100                 return check_aligned(bh, address, dev, b, size);
1101         return try_to_load_aligned(address, dev, b, size);
1102 }
1103 
1104 #define COPYBLK(size,from,to) \
1105 __asm__ __volatile__("rep ; movsl": \
1106         :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1107         :"cx","di","si")
1108 
1109 /*
1110  * bread_page reads four buffers into memory at the desired address. It's
1111  * a function of its own, as there is some speed to be got by reading them
1112  * all at the same time, not waiting for one to be read, and then another
1113  * etc. This also allows us to optimize memory usage by sharing code pages
1114  * and filesystem buffers..
1115  */
1116 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
1117 {
1118         struct buffer_head * bh[8];
1119         unsigned long where;
1120         int i, j;
1121 
1122         if (!(prot & PAGE_RW)) {
1123                 where = try_to_share_buffers(address,dev,b,size);
1124                 if (where)
1125                         return where;
1126         }
1127         ++current->mm->maj_flt;
1128         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1129                 bh[i] = NULL;
1130                 if (b[i])
1131                         bh[i] = getblk(dev, b[i], size);
1132         }
1133         read_buffers(bh,i);
1134         where = address;
1135         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
1136                 if (bh[i]) {
1137                         if (bh[i]->b_uptodate)
1138                                 COPYBLK(size, (unsigned long) bh[i]->b_data,address);
1139                         brelse(bh[i]);
1140                 }
1141         }
1142         return where;
1143 }
1144 
1145 /*
1146  * Try to increase the number of buffers available: the size argument
1147  * is used to determine what kind of buffers we want.
1148  */
1149 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1150 {
1151         unsigned long page;
1152         struct buffer_head *bh, *tmp;
1153         struct buffer_head * insert_point;
1154         int isize;
1155 
1156         if ((size & 511) || (size > PAGE_SIZE)) {
1157                 printk("VFS: grow_buffers: size = %d\n",size);
1158                 return 0;
1159         }
1160 
1161         isize = BUFSIZE_INDEX(size);
1162 
1163         if (!(page = __get_free_page(pri)))
1164                 return 0;
1165         bh = create_buffers(page, size);
1166         if (!bh) {
1167                 free_page(page);
1168                 return 0;
1169         }
1170 
1171         insert_point = free_list[isize];
1172 
1173         tmp = bh;
1174         while (1) {
1175                 nr_free[isize]++;
1176                 if (insert_point) {
1177                         tmp->b_next_free = insert_point->b_next_free;
1178                         tmp->b_prev_free = insert_point;
1179                         insert_point->b_next_free->b_prev_free = tmp;
1180                         insert_point->b_next_free = tmp;
1181                 } else {
1182                         tmp->b_prev_free = tmp;
1183                         tmp->b_next_free = tmp;
1184                 }
1185                 insert_point = tmp;
1186                 ++nr_buffers;
1187                 if (tmp->b_this_page)
1188                         tmp = tmp->b_this_page;
1189                 else
1190                         break;
1191         }
1192         free_list[isize] = bh;
1193         buffer_pages[page >> PAGE_SHIFT] = bh;
1194         tmp->b_this_page = bh;
1195         wake_up(&buffer_wait);
1196         buffermem += PAGE_SIZE;
1197         return 1;
1198 }
1199 
1200 /*
1201  * try_to_free() checks if all the buffers on this particular page
1202  * are unused, and free's the page if so.
1203  */
1204 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1205 {
1206         unsigned long page;
1207         struct buffer_head * tmp, * p;
1208         int isize = BUFSIZE_INDEX(bh->b_size);
1209 
1210         *bhp = bh;
1211         page = (unsigned long) bh->b_data;
1212         page &= PAGE_MASK;
1213         tmp = bh;
1214         do {
1215                 if (!tmp)
1216                         return 0;
1217                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1218                         return 0;
1219                 tmp = tmp->b_this_page;
1220         } while (tmp != bh);
1221         tmp = bh;
1222         do {
1223                 p = tmp;
1224                 tmp = tmp->b_this_page;
1225                 nr_buffers--;
1226                 nr_buffers_size[isize]--;
1227                 if (p == *bhp)
1228                   {
1229                     *bhp = p->b_prev_free;
1230                     if (p == *bhp) /* Was this the last in the list? */
1231                       *bhp = NULL;
1232                   }
1233                 remove_from_queues(p);
1234                 put_unused_buffer_head(p);
1235         } while (tmp != bh);
1236         buffermem -= PAGE_SIZE;
1237         buffer_pages[page >> PAGE_SHIFT] = NULL;
1238         free_page(page);
1239         return !mem_map[MAP_NR(page)];
1240 }
1241 
1242 
1243 /*
1244  * Consult the load average for buffers and decide whether or not
1245  * we should shrink the buffers of one size or not.  If we decide yes,
1246  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1247  * that is specified.
1248  *
1249  * I would prefer not to use a load average, but the way things are now it
1250  * seems unavoidable.  The way to get rid of it would be to force clustering
1251  * universally, so that when we reclaim buffers we always reclaim an entire
1252  * page.  Doing this would mean that we all need to move towards QMAGIC.
1253  */
1254 
1255 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1256 {          
1257         int nlist;
1258         int isize;
1259         int total_lav, total_n_buffers, n_sizes;
1260         
1261         /* Do not consider the shared buffers since they would not tend
1262            to have getblk called very often, and this would throw off
1263            the lav.  They are not easily reclaimable anyway (let the swapper
1264            make the first move). */
1265   
1266         total_lav = total_n_buffers = n_sizes = 0;
1267         for(nlist = 0; nlist < NR_SIZES; nlist++)
1268          {
1269                  total_lav += buffers_lav[nlist];
1270                  if(nr_buffers_size[nlist]) n_sizes++;
1271                  total_n_buffers += nr_buffers_size[nlist];
1272                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1273          }
1274         
1275         /* See if we have an excessive number of buffers of a particular
1276            size - if so, victimize that bunch. */
1277   
1278         isize = (size ? BUFSIZE_INDEX(size) : -1);
1279         
1280         if (n_sizes > 1)
1281                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1282                   {
1283                           if(nlist == isize) continue;
1284                           if(nr_buffers_size[nlist] &&
1285                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1286                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1287                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1288                                             return 1;
1289                   }
1290         return 0;
1291 }
1292 /*
1293  * Try to free up some pages by shrinking the buffer-cache
1294  *
1295  * Priority tells the routine how hard to try to shrink the
1296  * buffers: 3 means "don't bother too much", while a value
1297  * of 0 means "we'd better get some free pages now".
1298  */
1299 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1300 {
1301         if (priority < 2) {
1302                 sync_buffers(0,0);
1303         }
1304 
1305         if(priority == 2) wakeup_bdflush(1);
1306 
1307         if(maybe_shrink_lav_buffers(0)) return 1;
1308 
1309         /* No good candidate size - take any size we can find */
1310         return shrink_specific_buffers(priority, 0);
1311 }
1312 
1313 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1314 {
1315         struct buffer_head *bh;
1316         int nlist;
1317         int i, isize, isize1;
1318 
1319 #ifdef DEBUG
1320         if(size) printk("Shrinking buffers of size %d\n", size);
1321 #endif
1322         /* First try the free lists, and see if we can get a complete page
1323            from here */
1324         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1325 
1326         for(isize = 0; isize<NR_SIZES; isize++){
1327                 if(isize1 != -1 && isize1 != isize) continue;
1328                 bh = free_list[isize];
1329                 if(!bh) continue;
1330                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1331                         if (bh->b_count || !bh->b_this_page)
1332                                  continue;
1333                         if (try_to_free(bh, &bh))
1334                                  return 1;
1335                         if(!bh) break; /* Some interrupt must have used it after we
1336                                           freed the page.  No big deal - keep looking */
1337                 }
1338         }
1339         
1340         /* Not enough in the free lists, now try the lru list */
1341         
1342         for(nlist = 0; nlist < NR_LIST; nlist++) {
1343         repeat1:
1344                 if(priority > 3 && nlist == BUF_SHARED) continue;
1345                 bh = lru_list[nlist];
1346                 if(!bh) continue;
1347                 i = nr_buffers_type[nlist] >> priority;
1348                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1349                         /* We may have stalled while waiting for I/O to complete. */
1350                         if(bh->b_list != nlist) goto repeat1;
1351                         if (bh->b_count || !bh->b_this_page)
1352                                  continue;
1353                         if(size && bh->b_size != size) continue;
1354                         if (bh->b_lock)
1355                                  if (priority)
1356                                           continue;
1357                                  else
1358                                           wait_on_buffer(bh);
1359                         if (bh->b_dirt) {
1360                                 bh->b_count++;
1361                                 bh->b_flushtime = 0;
1362                                 ll_rw_block(WRITEA, 1, &bh);
1363                                 bh->b_count--;
1364                                 continue;
1365                         }
1366                         if (try_to_free(bh, &bh))
1367                                  return 1;
1368                         if(!bh) break;
1369                 }
1370         }
1371         return 0;
1372 }
1373 
1374 
1375 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1376 {
1377         struct buffer_head * bh;
1378         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1379         int shared;
1380         int nlist, isize;
1381 
1382         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1383         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1384         printk("Buffer blocks:   %6d\n",nr_buffers);
1385 
1386         for(nlist = 0; nlist < NR_LIST; nlist++) {
1387           shared = found = locked = dirty = used = lastused = 0;
1388           bh = lru_list[nlist];
1389           if(!bh) continue;
1390           do {
1391                 found++;
1392                 if (bh->b_lock)
1393                         locked++;
1394                 if (bh->b_dirt)
1395                         dirty++;
1396                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1397                 if (bh->b_count)
1398                         used++, lastused = found;
1399                 bh = bh->b_next_free;
1400               } while (bh != lru_list[nlist]);
1401         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1402                 nlist, found, used, lastused, locked, dirty, shared);
1403         };
1404         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1405         for(isize = 0; isize<NR_SIZES; isize++){
1406                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1407                        buffers_lav[isize], nr_free[isize]);
1408                 for(nlist = 0; nlist < NR_LIST; nlist++)
1409                          printk("%7d ", nr_buffers_st[isize][nlist]);
1410                 printk("\n");
1411         }
1412 }
1413 
1414 /*
1415  * try_to_reassign() checks if all the buffers on this particular page
1416  * are unused, and reassign to a new cluster them if this is true.
1417  */
1418 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1419                            dev_t dev, unsigned int starting_block)
1420 {
1421         unsigned long page;
1422         struct buffer_head * tmp, * p;
1423 
1424         *bhp = bh;
1425         page = (unsigned long) bh->b_data;
1426         page &= PAGE_MASK;
1427         if(mem_map[MAP_NR(page)] != 1) return 0;
1428         tmp = bh;
1429         do {
1430                 if (!tmp)
1431                          return 0;
1432                 
1433                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1434                          return 0;
1435                 tmp = tmp->b_this_page;
1436         } while (tmp != bh);
1437         tmp = bh;
1438         
1439         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1440                  tmp = tmp->b_this_page;
1441         
1442         /* This is the buffer at the head of the page */
1443         bh = tmp;
1444         do {
1445                 p = tmp;
1446                 tmp = tmp->b_this_page;
1447                 remove_from_queues(p);
1448                 p->b_dev=dev;
1449                 p->b_uptodate = 0;
1450                 p->b_req = 0;
1451                 p->b_blocknr=starting_block++;
1452                 insert_into_queues(p);
1453         } while (tmp != bh);
1454         return 1;
1455 }
1456 
1457 /*
1458  * Try to find a free cluster by locating a page where
1459  * all of the buffers are unused.  We would like this function
1460  * to be atomic, so we do not call anything that might cause
1461  * the process to sleep.  The priority is somewhat similar to
1462  * the priority used in shrink_buffers.
1463  * 
1464  * My thinking is that the kernel should end up using whole
1465  * pages for the buffer cache as much of the time as possible.
1466  * This way the other buffers on a particular page are likely
1467  * to be very near each other on the free list, and we will not
1468  * be expiring data prematurely.  For now we only canibalize buffers
1469  * of the same size to keep the code simpler.
1470  */
1471 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1472                      unsigned int starting_block, int size)
1473 {
1474         struct buffer_head *bh;
1475         int isize = BUFSIZE_INDEX(size);
1476         int i;
1477 
1478         /* We want to give ourselves a really good shot at generating
1479            a cluster, and since we only take buffers from the free
1480            list, we "overfill" it a little. */
1481 
1482         while(nr_free[isize] < 32) refill_freelist(size);
1483 
1484         bh = free_list[isize];
1485         if(bh)
1486                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1487                          if (!bh->b_this_page)  continue;
1488                          if (try_to_reassign(bh, &bh, dev, starting_block))
1489                                  return 4;
1490                  }
1491         return 0;
1492 }
1493 
1494 /* This function tries to generate a new cluster of buffers
1495  * from a new page in memory.  We should only do this if we have
1496  * not expanded the buffer cache to the maximum size that we allow.
1497  */
1498 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1499 {
1500         struct buffer_head * bh, * tmp, * arr[8];
1501         int isize = BUFSIZE_INDEX(size);
1502         unsigned long offset;
1503         unsigned long page;
1504         int nblock;
1505 
1506         page = get_free_page(GFP_NOBUFFER);
1507         if(!page) return 0;
1508 
1509         bh = create_buffers(page, size);
1510         if (!bh) {
1511                 free_page(page);
1512                 return 0;
1513         };
1514         nblock = block;
1515         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1516                 if (find_buffer(dev, nblock++, size))
1517                          goto not_aligned;
1518         }
1519         tmp = bh;
1520         nblock = 0;
1521         while (1) {
1522                 arr[nblock++] = bh;
1523                 bh->b_count = 1;
1524                 bh->b_dirt = 0;
1525                 bh->b_flushtime = 0;
1526                 bh->b_lock = 0;
1527                 bh->b_uptodate = 0;
1528                 bh->b_req = 0;
1529                 bh->b_dev = dev;
1530                 bh->b_list = BUF_CLEAN;
1531                 bh->b_blocknr = block++;
1532                 nr_buffers++;
1533                 nr_buffers_size[isize]++;
1534                 insert_into_queues(bh);
1535                 if (bh->b_this_page)
1536                         bh = bh->b_this_page;
1537                 else
1538                         break;
1539         }
1540         buffermem += PAGE_SIZE;
1541         buffer_pages[page >> PAGE_SHIFT] = bh;
1542         bh->b_this_page = tmp;
1543         while (nblock-- > 0)
1544                 brelse(arr[nblock]);
1545         return 4;
1546 not_aligned:
1547         while ((tmp = bh) != NULL) {
1548                 bh = bh->b_this_page;
1549                 put_unused_buffer_head(tmp);
1550         }
1551         free_page(page);
1552         return 0;
1553 }
1554 
1555 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1556 {
1557         int i, offset;
1558         
1559         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1560                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1561                 if(find_buffer(dev, b[i], size)) return 0;
1562         };
1563 
1564         /* OK, we have a candidate for a new cluster */
1565         
1566         /* See if one size of buffer is over-represented in the buffer cache,
1567            if so reduce the numbers of buffers */
1568         if(maybe_shrink_lav_buffers(size))
1569          {
1570                  int retval;
1571                  retval = try_to_generate_cluster(dev, b[0], size);
1572                  if(retval) return retval;
1573          };
1574         
1575         if (nr_free_pages > min_free_pages) 
1576                  return try_to_generate_cluster(dev, b[0], size);
1577         else
1578                  return reassign_cluster(dev, b[0], size);
1579 }
1580 
1581 /*
1582  * This initializes the initial buffer free list.  nr_buffers_type is set
1583  * to one less the actual number of buffers, as a sop to backwards
1584  * compatibility --- the old code did this (I think unintentionally,
1585  * but I'm not sure), and programs in the ps package expect it.
1586  *                                      - TYT 8/30/92
1587  */
1588 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1589 {
1590         int i;
1591         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1592 
1593         if (high_memory >= 4*1024*1024) {
1594                 min_free_pages = 200;
1595                 if(high_memory >= 16*1024*1024)
1596                          nr_hash = 16381;
1597                 else
1598                          nr_hash = 4093;
1599         } else {
1600                 min_free_pages = 20;
1601                 nr_hash = 997;
1602         };
1603         
1604         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1605                                                      sizeof(struct buffer_head *));
1606 
1607 
1608         buffer_pages = (struct buffer_head **) vmalloc((high_memory >>PAGE_SHIFT) * 
1609                                                      sizeof(struct buffer_head *));
1610         for (i = 0 ; i < high_memory >> PAGE_SHIFT ; i++)
1611                 buffer_pages[i] = NULL;
1612 
1613         for (i = 0 ; i < nr_hash ; i++)
1614                 hash_table[i] = NULL;
1615         lru_list[BUF_CLEAN] = 0;
1616         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1617         if (!free_list[isize])
1618                 panic("VFS: Unable to initialize buffer free list!");
1619         return;
1620 }
1621 
1622 /* This is a simple kernel daemon, whose job it is to provide a dynamicly
1623  * response to dirty buffers.  Once this process is activated, we write back
1624  * a limited number of buffers to the disks and then go back to sleep again.
1625  * In effect this is a process which never leaves kernel mode, and does not have
1626  * any user memory associated with it except for the stack.  There is also
1627  * a kernel stack page, which obviously must be separate from the user stack.
1628  */
1629 struct wait_queue * bdflush_wait = NULL;
1630 struct wait_queue * bdflush_done = NULL;
1631 
1632 static int bdflush_running = 0;
1633 
1634 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1635 {
1636         if(!bdflush_running){
1637                 printk("Warning - bdflush not running\n");
1638                 sync_buffers(0,0);
1639                 return;
1640         };
1641         wake_up(&bdflush_wait);
1642         if(wait) sleep_on(&bdflush_done);
1643 }
1644 
1645 
1646 
1647 /* 
1648  * Here we attempt to write back old buffers.  We also try and flush indoes 
1649  * and supers as well, since this function is essentially "update", and 
1650  * otherwise there would be no way of ensuring that these quantities ever 
1651  * get written back.  Ideally, we would have a timestamp on the inodes
1652  * and superblocks so that we could write back only the old ones as well
1653  */
1654 
1655 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1656 {
1657         int i, isize;
1658         int ndirty, nwritten;
1659         int nlist;
1660         int ncount;
1661         struct buffer_head * bh, *next;
1662 
1663         sync_supers(0);
1664         sync_inodes(0);
1665 
1666         ncount = 0;
1667 #ifdef DEBUG
1668         for(nlist = 0; nlist < NR_LIST; nlist++)
1669 #else
1670         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1671 #endif
1672         {
1673                 ndirty = 0;
1674                 nwritten = 0;
1675         repeat:
1676                 bh = lru_list[nlist];
1677                 if(bh) 
1678                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1679                                  /* We may have stalled while waiting for I/O to complete. */
1680                                  if(bh->b_list != nlist) goto repeat;
1681                                  next = bh->b_next_free;
1682                                  if(!lru_list[nlist]) {
1683                                          printk("Dirty list empty %d\n", i);
1684                                          break;
1685                                  }
1686                                  
1687                                  /* Clean buffer on dirty list?  Refile it */
1688                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1689                                   {
1690                                           refile_buffer(bh);
1691                                           continue;
1692                                   }
1693                                  
1694                                  if (bh->b_lock || !bh->b_dirt)
1695                                           continue;
1696                                  ndirty++;
1697                                  if(bh->b_flushtime > jiffies) continue;
1698                                  nwritten++;
1699                                  bh->b_count++;
1700                                  bh->b_flushtime = 0;
1701 #ifdef DEBUG
1702                                  if(nlist != BUF_DIRTY) ncount++;
1703 #endif
1704                                  ll_rw_block(WRITE, 1, &bh);
1705                                  bh->b_count--;
1706                          }
1707         }
1708 #ifdef DEBUG
1709         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1710         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1711 #endif
1712         
1713         /* We assume that we only come through here on a regular
1714            schedule, like every 5 seconds.  Now update load averages.  
1715            Shift usage counts to prevent overflow. */
1716         for(isize = 0; isize<NR_SIZES; isize++){
1717                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1718                 buffer_usage[isize] = 0;
1719         };
1720         return 0;
1721 }
1722 
1723 
1724 /* This is the interface to bdflush.  As we get more sophisticated, we can
1725  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1726  * invoke this again after you have done this once, you would simply modify 
1727  * the tuning parameters.  We would want to verify each parameter, however,
1728  * to make sure that it is reasonable. */
1729 
1730 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1731 {
1732         int i, error;
1733         int ndirty;
1734         int nlist;
1735         int ncount;
1736         struct buffer_head * bh, *next;
1737 
1738         if(!suser()) return -EPERM;
1739 
1740         if(func == 1)
1741                  return sync_old_buffers();
1742 
1743         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1744         if(func >= 2){
1745                 i = (func-2) >> 1;
1746                 if (i < 0 || i >= N_PARAM) return -EINVAL;
1747                 if((func & 1) == 0) {
1748                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1749                         if(error) return error;
1750                         put_fs_long(bdf_prm.data[i], data);
1751                         return 0;
1752                 };
1753                 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1754                 bdf_prm.data[i] = data;
1755                 return 0;
1756         };
1757         
1758         if(bdflush_running++) return -EBUSY; /* Only one copy of this running at one time */
1759         
1760         /* OK, from here on is the daemon */
1761         
1762         while(1==1){
1763 #ifdef DEBUG
1764                 printk("bdflush() activated...");
1765 #endif
1766                 
1767                 ncount = 0;
1768 #ifdef DEBUG
1769                 for(nlist = 0; nlist < NR_LIST; nlist++)
1770 #else
1771                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1772 #endif
1773                  {
1774                          ndirty = 0;
1775                  repeat:
1776                          bh = lru_list[nlist];
1777                          if(bh) 
1778                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1779                                        bh = next) {
1780                                           /* We may have stalled while waiting for I/O to complete. */
1781                                           if(bh->b_list != nlist) goto repeat;
1782                                           next = bh->b_next_free;
1783                                           if(!lru_list[nlist]) {
1784                                                   printk("Dirty list empty %d\n", i);
1785                                                   break;
1786                                           }
1787                                           
1788                                           /* Clean buffer on dirty list?  Refile it */
1789                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1790                                            {
1791                                                    refile_buffer(bh);
1792                                                    continue;
1793                                            }
1794                                           
1795                                           if (bh->b_lock || !bh->b_dirt)
1796                                                    continue;
1797                                           /* Should we write back buffers that are shared or not??
1798                                              currently dirty buffers are not shared, so it does not matter */
1799                                           bh->b_count++;
1800                                           ndirty++;
1801                                           bh->b_flushtime = 0;
1802                                           ll_rw_block(WRITE, 1, &bh);
1803 #ifdef DEBUG
1804                                           if(nlist != BUF_DIRTY) ncount++;
1805 #endif
1806                                           bh->b_count--;
1807                                   }
1808                  }
1809 #ifdef DEBUG
1810                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1811                 printk("sleeping again.\n");
1812 #endif
1813                 wake_up(&bdflush_done);
1814                 
1815                 /* If there are still a lot of dirty buffers around, skip the sleep
1816                    and flush some more */
1817                 
1818                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1819                    bdf_prm.b_un.nfract/100) {
1820                         if (current->signal & (1 << (SIGKILL-1))) {
1821                                 bdflush_running--;
1822                                 return 0;
1823                         }
1824                         current->signal = 0;
1825                         interruptible_sleep_on(&bdflush_wait);
1826                 }
1827         }
1828 }
1829 
1830 
1831 /*
1832  * Overrides for Emacs so that we follow Linus's tabbing style.
1833  * Emacs will notice this stuff at the end of the file and automatically
1834  * adjust the settings for this buffer only.  This must remain at the end
1835  * of the file.
1836  * ---------------------------------------------------------------------------
1837  * Local variables:
1838  * c-indent-level: 8
1839  * c-brace-imaginary-offset: 0
1840  * c-brace-offset: -8
1841  * c-argdecl-indent: 8
1842  * c-label-offset: -8
1843  * c-continued-statement-offset: 8
1844  * c-continued-brace-offset: 0
1845  * End:
1846  */

/* [previous][next][first][last][top][bottom][index][help] */