root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. check_aligned
  32. try_to_load_aligned
  33. try_to_share_buffers
  34. bread_page
  35. grow_buffers
  36. try_to_free
  37. maybe_shrink_lav_buffers
  38. shrink_buffers
  39. shrink_specific_buffers
  40. show_buffers
  41. try_to_reassign
  42. reassign_cluster
  43. try_to_generate_cluster
  44. generate_cluster
  45. buffer_init
  46. wakeup_bdflush
  47. sync_old_buffers
  48. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #define NR_SIZES 4
  33 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  35 
  36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 extern int *blksize_size[];
  60 
  61 /* Here is the parameter block for the bdflush process. */
  62 static void wakeup_bdflush(int);
  63 
  64 #define N_PARAM 9
  65 #define LAV
  66 
  67 static union bdflush_param{
  68         struct {
  69                 int nfract;  /* Percentage of buffer cache dirty to 
  70                                 activate bdflush */
  71                 int ndirty;  /* Maximum number of dirty blocks to write out per
  72                                 wake-cycle */
  73                 int nrefill; /* Number of clean buffers to try and obtain
  74                                 each time we call refill */
  75                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  76                                   when trying to refill buffers. */
  77                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  78                                     search for free clusters */
  79                 int age_buffer;  /* Time for normal buffer to age before 
  80                                     we flush it */
  81                 int age_super;  /* Time for superblock to age before we 
  82                                    flush it */
  83                 int lav_const;  /* Constant used for load average (time
  84                                    constant */
  85                 int lav_ratio;  /* Used to determine how low a lav for a
  86                                    particular size can go before we start to
  87                                    trim back the buffers */
  88         } b_un;
  89         unsigned int data[N_PARAM];
  90 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  91 
  92 /* The lav constant is set for 1 minute, as long as the update process runs
  93    every 5 seconds.  If you change the frequency of update, the time
  94    constant will also change. */
  95 
  96 
  97 /* These are the min and max parameter values that we will allow to be assigned */
  98 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
  99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 100 
 101 /*
 102  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 103  * and getting rid of the cli-sti pairs. The wait-queue routines still
 104  * need cli-sti, but now it's just a couple of 386 instructions or so.
 105  *
 106  * Note that the real wait_on_buffer() is an inline function that checks
 107  * if 'b_wait' is set before calling this, so that the queues aren't set
 108  * up unnecessarily.
 109  */
 110 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         struct wait_queue wait = { current, NULL };
 113 
 114         bh->b_count++;
 115         add_wait_queue(&bh->b_wait, &wait);
 116 repeat:
 117         current->state = TASK_UNINTERRUPTIBLE;
 118         if (bh->b_lock) {
 119                 schedule();
 120                 goto repeat;
 121         }
 122         remove_wait_queue(&bh->b_wait, &wait);
 123         bh->b_count--;
 124         current->state = TASK_RUNNING;
 125 }
 126 
 127 /* Call sync_buffers with wait!=0 to ensure that the call does not
 128    return until all buffer writes have completed.  Sync() may return
 129    before the writes have finished; fsync() may not. */
 130 
 131 
 132 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 133    spontaneously dirty themselves without ever brelse being called.
 134    We will ultimately want to put these in a separate list, but for
 135    now we search all of the lists for dirty buffers */
 136 
 137 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         int i, retry, pass = 0, err = 0;
 140         int nlist, ncount;
 141         struct buffer_head * bh, *next;
 142 
 143         /* One pass for no-wait, three for wait:
 144            0) write out all dirty, unlocked buffers;
 145            1) write out all dirty buffers, waiting if locked;
 146            2) wait for completion by waiting for all buffers to unlock. */
 147  repeat:
 148         retry = 0;
 149  repeat2:
 150         ncount = 0;
 151         /* We search all lists as a failsafe mechanism, not because we expect
 152            there to be dirty buffers on any of the other lists. */
 153         for(nlist = 0; nlist < NR_LIST; nlist++)
 154          {
 155          repeat1:
 156                  bh = lru_list[nlist];
 157                  if(!bh) continue;
 158                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 159                          if(bh->b_list != nlist) goto repeat1;
 160                          next = bh->b_next_free;
 161                          if(!lru_list[nlist]) break;
 162                          if (dev && bh->b_dev != dev)
 163                                   continue;
 164                          if (bh->b_lock)
 165                           {
 166                                   /* Buffer is locked; skip it unless wait is
 167                                      requested AND pass > 0. */
 168                                   if (!wait || !pass) {
 169                                           retry = 1;
 170                                           continue;
 171                                   }
 172                                   wait_on_buffer (bh);
 173                                   goto repeat2;
 174                           }
 175                          /* If an unlocked buffer is not uptodate, there has
 176                              been an IO error. Skip it. */
 177                          if (wait && bh->b_req && !bh->b_lock &&
 178                              !bh->b_dirt && !bh->b_uptodate) {
 179                                   err = 1;
 180                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 181                                   continue;
 182                           }
 183                          /* Don't write clean buffers.  Don't write ANY buffers
 184                             on the third pass. */
 185                          if (!bh->b_dirt || pass>=2)
 186                                   continue;
 187                          /* don't bother about locked buffers */
 188                          if (bh->b_lock)
 189                                  continue;
 190                          bh->b_count++;
 191                          bh->b_flushtime = 0;
 192                          ll_rw_block(WRITE, 1, &bh);
 193 
 194                          if(nlist != BUF_DIRTY) { 
 195                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 196                                  ncount++;
 197                          };
 198                          bh->b_count--;
 199                          retry = 1;
 200                  }
 201          }
 202         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 203         
 204         /* If we are waiting for the sync to succeed, and if any dirty
 205            blocks were written, then repeat; on the second pass, only
 206            wait for buffers being written (do not pass to write any
 207            more buffers on the second pass). */
 208         if (wait && retry && ++pass<=2)
 209                  goto repeat;
 210         return err;
 211 }
 212 
 213 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 {
 215         sync_buffers(dev, 0);
 216         sync_supers(dev);
 217         sync_inodes(dev);
 218         sync_buffers(dev, 0);
 219 }
 220 
 221 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         sync_buffers(dev, 0);
 224         sync_supers(dev);
 225         sync_inodes(dev);
 226         return sync_buffers(dev, 1);
 227 }
 228 
 229 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         sync_dev(0);
 232         return 0;
 233 }
 234 
 235 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237         return fsync_dev(inode->i_dev);
 238 }
 239 
 240 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         struct file * file;
 243         struct inode * inode;
 244 
 245         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 246                 return -EBADF;
 247         if (!file->f_op || !file->f_op->fsync)
 248                 return -EINVAL;
 249         if (file->f_op->fsync(inode,file))
 250                 return -EIO;
 251         return 0;
 252 }
 253 
 254 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         int i;
 257         int nlist;
 258         struct buffer_head * bh;
 259 
 260         for(nlist = 0; nlist < NR_LIST; nlist++) {
 261                 bh = lru_list[nlist];
 262                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 263                      bh = bh->b_next_free) {
 264                         if (bh->b_dev != dev)
 265                                  continue;
 266                         wait_on_buffer(bh);
 267                         if (bh->b_dev == dev)
 268                                  bh->b_flushtime = bh->b_uptodate = 
 269                                           bh->b_dirt = bh->b_req = 0;
 270                 }
 271         }
 272 }
 273 
 274 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 275 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 276 
 277 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         if (bh->b_next)
 280                 bh->b_next->b_prev = bh->b_prev;
 281         if (bh->b_prev)
 282                 bh->b_prev->b_next = bh->b_next;
 283         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 284                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 285         bh->b_next = bh->b_prev = NULL;
 286 }
 287 
 288 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         if (!(bh->b_prev_free) || !(bh->b_next_free))
 291                 panic("VFS: LRU block list corrupted");
 292         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 293         bh->b_prev_free->b_next_free = bh->b_next_free;
 294         bh->b_next_free->b_prev_free = bh->b_prev_free;
 295 
 296         if (lru_list[bh->b_list] == bh)
 297                  lru_list[bh->b_list] = bh->b_next_free;
 298         if(lru_list[bh->b_list] == bh)
 299                  lru_list[bh->b_list] = NULL;
 300         bh->b_next_free = bh->b_prev_free = NULL;
 301 }
 302 
 303 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         int isize = BUFSIZE_INDEX(bh->b_size);
 306         if (!(bh->b_prev_free) || !(bh->b_next_free))
 307                 panic("VFS: Free block list corrupted");
 308         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 309         if(!free_list[isize])
 310                  panic("Free list empty");
 311         nr_free[isize]--;
 312         if(bh->b_next_free == bh)
 313                  free_list[isize] = NULL;
 314         else {
 315                 bh->b_prev_free->b_next_free = bh->b_next_free;
 316                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 317                 if (free_list[isize] == bh)
 318                          free_list[isize] = bh->b_next_free;
 319         };
 320         bh->b_next_free = bh->b_prev_free = NULL;
 321 }
 322 
 323 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         if(bh->b_dev == 0xffff) {
 326                 remove_from_free_list(bh); /* Free list entries should not be
 327                                               in the hash queue */
 328                 return;
 329         };
 330         nr_buffers_type[bh->b_list]--;
 331         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 332         remove_from_hash_queue(bh);
 333         remove_from_lru_list(bh);
 334 }
 335 
 336 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 337 {
 338         if (!bh)
 339                 return;
 340         if (bh == lru_list[bh->b_list]) {
 341                 lru_list[bh->b_list] = bh->b_next_free;
 342                 return;
 343         }
 344         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 345         remove_from_lru_list(bh);
 346 /* add to back of free list */
 347 
 348         if(!lru_list[bh->b_list]) {
 349                 lru_list[bh->b_list] = bh;
 350                 lru_list[bh->b_list]->b_prev_free = bh;
 351         };
 352 
 353         bh->b_next_free = lru_list[bh->b_list];
 354         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 355         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 356         lru_list[bh->b_list]->b_prev_free = bh;
 357 }
 358 
 359 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 360 {
 361         int isize;
 362         if (!bh)
 363                 return;
 364 
 365         isize = BUFSIZE_INDEX(bh->b_size);      
 366         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 367 /* add to back of free list */
 368 
 369         if(!free_list[isize]) {
 370                 free_list[isize] = bh;
 371                 bh->b_prev_free = bh;
 372         };
 373 
 374         nr_free[isize]++;
 375         bh->b_next_free = free_list[isize];
 376         bh->b_prev_free = free_list[isize]->b_prev_free;
 377         free_list[isize]->b_prev_free->b_next_free = bh;
 378         free_list[isize]->b_prev_free = bh;
 379 }
 380 
 381 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383 /* put at end of free list */
 384 
 385         if(bh->b_dev == 0xffff) {
 386                 put_last_free(bh);
 387                 return;
 388         };
 389         if(!lru_list[bh->b_list]) {
 390                 lru_list[bh->b_list] = bh;
 391                 bh->b_prev_free = bh;
 392         };
 393         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 394         bh->b_next_free = lru_list[bh->b_list];
 395         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 396         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 397         lru_list[bh->b_list]->b_prev_free = bh;
 398         nr_buffers_type[bh->b_list]++;
 399         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 400 /* put the buffer in new hash-queue if it has a device */
 401         bh->b_prev = NULL;
 402         bh->b_next = NULL;
 403         if (!bh->b_dev)
 404                 return;
 405         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 406         hash(bh->b_dev,bh->b_blocknr) = bh;
 407         if (bh->b_next)
 408                 bh->b_next->b_prev = bh;
 409 }
 410 
 411 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 412 {               
 413         struct buffer_head * tmp;
 414 
 415         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 416                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 417                         if (tmp->b_size == size)
 418                                 return tmp;
 419                         else {
 420                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 421                                                         MAJOR(dev), MINOR(dev));
 422                                 return NULL;
 423                         }
 424         return NULL;
 425 }
 426 
 427 /*
 428  * Why like this, I hear you say... The reason is race-conditions.
 429  * As we don't lock buffers (unless we are reading them, that is),
 430  * something might happen to it while we sleep (ie a read-error
 431  * will force it bad). This shouldn't really happen currently, but
 432  * the code is ready.
 433  */
 434 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 435 {
 436         struct buffer_head * bh;
 437 
 438         for (;;) {
 439                 if (!(bh=find_buffer(dev,block,size)))
 440                         return NULL;
 441                 bh->b_count++;
 442                 wait_on_buffer(bh);
 443                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 444                         return bh;
 445                 bh->b_count--;
 446         }
 447 }
 448 
 449 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         int i, nlist;
 452         struct buffer_head * bh, *bhnext;
 453 
 454         if (!blksize_size[MAJOR(dev)])
 455                 return;
 456 
 457         switch(size) {
 458                 default: panic("Invalid blocksize passed to set_blocksize");
 459                 case 512: case 1024: case 2048: case 4096:;
 460         }
 461 
 462         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 463                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 464                 return;
 465         }
 466         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 467                 return;
 468         sync_buffers(dev, 2);
 469         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 470 
 471   /* We need to be quite careful how we do this - we are moving entries
 472      around on the free list, and we can get in a loop if we are not careful.*/
 473 
 474         for(nlist = 0; nlist < NR_LIST; nlist++) {
 475                 bh = lru_list[nlist];
 476                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 477                         if(!bh) break;
 478                         bhnext = bh->b_next_free; 
 479                         if (bh->b_dev != dev)
 480                                  continue;
 481                         if (bh->b_size == size)
 482                                  continue;
 483                         
 484                         wait_on_buffer(bh);
 485                         if (bh->b_dev == dev && bh->b_size != size) {
 486                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 487                                          bh->b_flushtime = 0;
 488                         };
 489                         remove_from_hash_queue(bh);
 490                 }
 491         }
 492 }
 493 
 494 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 495 
 496 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 497 {
 498         struct buffer_head * bh, * tmp;
 499         struct buffer_head * candidate[NR_LIST];
 500         unsigned int best_time, winner;
 501         int isize = BUFSIZE_INDEX(size);
 502         int buffers[NR_LIST];
 503         int i;
 504         int needed;
 505 
 506         /* First see if we even need this.  Sometimes it is advantageous
 507          to request some blocks in a filesystem that we know that we will
 508          be needing ahead of time. */
 509 
 510         if (nr_free[isize] > 100)
 511                 return;
 512 
 513         /* If there are too many dirty buffers, we wake up the update process
 514            now so as to ensure that there are still clean buffers available
 515            for user processes to use (and dirty) */
 516         
 517         /* We are going to try and locate this much memory */
 518         needed =bdf_prm.b_un.nrefill * size;  
 519 
 520         while (nr_free_pages > min_free_pages*2 && needed > 0 &&
 521                grow_buffers(GFP_BUFFER, size)) {
 522                 needed -= PAGE_SIZE;
 523         }
 524 
 525         if(needed <= 0) return;
 526 
 527         /* See if there are too many buffers of a different size.
 528            If so, victimize them */
 529 
 530         while(maybe_shrink_lav_buffers(size))
 531          {
 532                  if(!grow_buffers(GFP_BUFFER, size)) break;
 533                  needed -= PAGE_SIZE;
 534                  if(needed <= 0) return;
 535          };
 536 
 537         /* OK, we cannot grow the buffer cache, now try and get some
 538            from the lru list */
 539 
 540         /* First set the candidate pointers to usable buffers.  This
 541            should be quick nearly all of the time. */
 542 
 543 repeat0:
 544         for(i=0; i<NR_LIST; i++){
 545                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 546                    nr_buffers_type[i] == 0) {
 547                         candidate[i] = NULL;
 548                         buffers[i] = 0;
 549                         continue;
 550                 }
 551                 buffers[i] = nr_buffers_type[i];
 552                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 553                  {
 554                          if(buffers[i] < 0) panic("Here is the problem");
 555                          tmp = bh->b_next_free;
 556                          if (!bh) break;
 557                          
 558                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 559                              bh->b_dirt) {
 560                                  refile_buffer(bh);
 561                                  continue;
 562                          };
 563                          
 564                          if (bh->b_count || bh->b_size != size)
 565                                   continue;
 566                          
 567                          /* Buffers are written in the order they are placed 
 568                             on the locked list. If we encounter a locked
 569                             buffer here, this means that the rest of them
 570                             are also locked */
 571                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 572                                  buffers[i] = 0;
 573                                  break;
 574                          }
 575                          
 576                          if (BADNESS(bh)) continue;
 577                          break;
 578                  };
 579                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 580                 else candidate[i] = bh;
 581                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 582         }
 583         
 584  repeat:
 585         if(needed <= 0) return;
 586         
 587         /* Now see which candidate wins the election */
 588         
 589         winner = best_time = UINT_MAX;  
 590         for(i=0; i<NR_LIST; i++){
 591                 if(!candidate[i]) continue;
 592                 if(candidate[i]->b_lru_time < best_time){
 593                         best_time = candidate[i]->b_lru_time;
 594                         winner = i;
 595                 }
 596         }
 597         
 598         /* If we have a winner, use it, and then get a new candidate from that list */
 599         if(winner != UINT_MAX) {
 600                 i = winner;
 601                 bh = candidate[i];
 602                 candidate[i] = bh->b_next_free;
 603                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 604                 if (bh->b_count || bh->b_size != size)
 605                          panic("Busy buffer in candidate list\n");
 606                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 607                          panic("Shared buffer in candidate list\n");
 608                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 609                 
 610                 if(bh->b_dev == 0xffff) panic("Wrong list");
 611                 remove_from_queues(bh);
 612                 bh->b_dev = 0xffff;
 613                 put_last_free(bh);
 614                 needed -= bh->b_size;
 615                 buffers[i]--;
 616                 if(buffers[i] < 0) panic("Here is the problem");
 617                 
 618                 if(buffers[i] == 0) candidate[i] = NULL;
 619                 
 620                 /* Now all we need to do is advance the candidate pointer
 621                    from the winner list to the next usable buffer */
 622                 if(candidate[i] && buffers[i] > 0){
 623                         if(buffers[i] <= 0) panic("Here is another problem");
 624                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 625                                 if(buffers[i] < 0) panic("Here is the problem");
 626                                 tmp = bh->b_next_free;
 627                                 if (!bh) break;
 628                                 
 629                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 630                                     bh->b_dirt) {
 631                                         refile_buffer(bh);
 632                                         continue;
 633                                 };
 634                                 
 635                                 if (bh->b_count || bh->b_size != size)
 636                                          continue;
 637                                 
 638                                 /* Buffers are written in the order they are
 639                                    placed on the locked list.  If we encounter
 640                                    a locked buffer here, this means that the
 641                                    rest of them are also locked */
 642                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 643                                         buffers[i] = 0;
 644                                         break;
 645                                 }
 646               
 647                                 if (BADNESS(bh)) continue;
 648                                 break;
 649                         };
 650                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 651                         else candidate[i] = bh;
 652                         if(candidate[i] && candidate[i]->b_count) 
 653                                  panic("Here is the problem");
 654                 }
 655                 
 656                 goto repeat;
 657         }
 658         
 659         if(needed <= 0) return;
 660         
 661         /* Too bad, that was not enough. Try a little harder to grow some. */
 662         
 663         if (nr_free_pages > 5) {
 664                 if (grow_buffers(GFP_BUFFER, size)) {
 665                         needed -= PAGE_SIZE;
 666                         goto repeat0;
 667                 };
 668         }
 669         
 670         /* and repeat until we find something good */
 671         if (!grow_buffers(GFP_ATOMIC, size))
 672                 wakeup_bdflush(1);
 673         needed -= PAGE_SIZE;
 674         goto repeat0;
 675 }
 676 
 677 /*
 678  * Ok, this is getblk, and it isn't very clear, again to hinder
 679  * race-conditions. Most of the code is seldom used, (ie repeating),
 680  * so it should be much more efficient than it looks.
 681  *
 682  * The algorithm is changed: hopefully better, and an elusive bug removed.
 683  *
 684  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 685  * when the filesystem starts to get full of dirty blocks (I hope).
 686  */
 687 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 688 {
 689         struct buffer_head * bh;
 690         int isize = BUFSIZE_INDEX(size);
 691 
 692         /* Update this for the buffer size lav. */
 693         buffer_usage[isize]++;
 694 
 695         /* If there are too many dirty buffers, we wake up the update process
 696            now so as to ensure that there are still clean buffers available
 697            for user processes to use (and dirty) */
 698 repeat:
 699         bh = get_hash_table(dev, block, size);
 700         if (bh) {
 701                 if (bh->b_uptodate && !bh->b_dirt)
 702                          put_last_lru(bh);
 703                 if(!bh->b_dirt) bh->b_flushtime = 0;
 704                 return bh;
 705         }
 706 
 707         while(!free_list[isize]) refill_freelist(size);
 708         
 709         if (find_buffer(dev,block,size))
 710                  goto repeat;
 711 
 712         bh = free_list[isize];
 713         remove_from_free_list(bh);
 714 
 715 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 716 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 717         bh->b_count=1;
 718         bh->b_dirt=0;
 719         bh->b_lock=0;
 720         bh->b_uptodate=0;
 721         bh->b_flushtime = 0;
 722         bh->b_req=0;
 723         bh->b_dev=dev;
 724         bh->b_blocknr=block;
 725         insert_into_queues(bh);
 726         return bh;
 727 }
 728 
 729 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 730 {
 731         int newtime;
 732 
 733         if (buf->b_dirt){
 734                 /* Move buffer to dirty list if jiffies is clear */
 735                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 736                                      bdf_prm.b_un.age_buffer);
 737                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 738                          buf->b_flushtime = newtime;
 739         } else {
 740                 buf->b_flushtime = 0;
 741         }
 742 }
 743 
 744 
 745 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 746                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 747 
 748 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 749         int i, dispose;
 750         i = 0;
 751         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 752         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 753         if(buf->b_lock) i |= 2;
 754         if(buf->b_dirt) i |= 4;
 755         dispose = buffer_disposition[i];
 756         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 757                  dispose = BUF_UNSHARED;
 758         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 759         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 760         if(dispose != buf->b_list)  {
 761                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 762                          buf->b_lru_time = jiffies;
 763                 if(dispose == BUF_LOCKED && 
 764                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 765                          dispose = BUF_LOCKED1;
 766                 remove_from_queues(buf);
 767                 buf->b_list = dispose;
 768                 insert_into_queues(buf);
 769                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 770                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 771                    bdf_prm.b_un.nfract/100)
 772                          wakeup_bdflush(0);
 773         }
 774 }
 775 
 776 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 777 {
 778         if (!buf)
 779                 return;
 780         wait_on_buffer(buf);
 781 
 782         /* If dirty, mark the time this buffer should be written back */
 783         set_writetime(buf, 0);
 784         refile_buffer(buf);
 785 
 786         if (buf->b_count) {
 787                 if (--buf->b_count)
 788                         return;
 789                 wake_up(&buffer_wait);
 790                 return;
 791         }
 792         printk("VFS: brelse: Trying to free free buffer\n");
 793 }
 794 
 795 /*
 796  * bread() reads a specified block and returns the buffer that contains
 797  * it. It returns NULL if the block was unreadable.
 798  */
 799 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 800 {
 801         struct buffer_head * bh;
 802 
 803         if (!(bh = getblk(dev, block, size))) {
 804                 printk("VFS: bread: READ error on device %d/%d\n",
 805                                                 MAJOR(dev), MINOR(dev));
 806                 return NULL;
 807         }
 808         if (bh->b_uptodate)
 809                 return bh;
 810         ll_rw_block(READ, 1, &bh);
 811         wait_on_buffer(bh);
 812         if (bh->b_uptodate)
 813                 return bh;
 814         brelse(bh);
 815         return NULL;
 816 }
 817 
 818 /*
 819  * Ok, breada can be used as bread, but additionally to mark other
 820  * blocks for reading as well. End the argument list with a negative
 821  * number.
 822  */
 823 
 824 #define NBUF 16
 825 
 826 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 827         unsigned int pos, unsigned int filesize)
 828 {
 829         struct buffer_head * bhlist[NBUF];
 830         unsigned int blocks;
 831         struct buffer_head * bh;
 832         int index;
 833         int i, j;
 834 
 835         if (pos >= filesize)
 836                 return NULL;
 837 
 838         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 839                 return NULL;
 840 
 841         index = BUFSIZE_INDEX(bh->b_size);
 842 
 843         if (bh->b_uptodate)
 844                 return bh;
 845 
 846         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 847 
 848         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 849                 blocks = read_ahead[MAJOR(dev)] >> index;
 850         if (blocks > NBUF)
 851                 blocks = NBUF;
 852         
 853         bhlist[0] = bh;
 854         j = 1;
 855         for(i=1; i<blocks; i++) {
 856                 bh = getblk(dev,block+i,bufsize);
 857                 if (bh->b_uptodate) {
 858                         brelse(bh);
 859                         break;
 860                 }
 861                 bhlist[j++] = bh;
 862         }
 863 
 864         /* Request the read for these buffers, and then release them */
 865         ll_rw_block(READ, j, bhlist);
 866 
 867         for(i=1; i<j; i++)
 868                 brelse(bhlist[i]);
 869 
 870         /* Wait for this buffer, and then continue on */
 871         bh = bhlist[0];
 872         wait_on_buffer(bh);
 873         if (bh->b_uptodate)
 874                 return bh;
 875         brelse(bh);
 876         return NULL;
 877 }
 878 
 879 /*
 880  * See fs/inode.c for the weird use of volatile..
 881  */
 882 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 883 {
 884         struct wait_queue * wait;
 885 
 886         wait = ((volatile struct buffer_head *) bh)->b_wait;
 887         memset(bh,0,sizeof(*bh));
 888         ((volatile struct buffer_head *) bh)->b_wait = wait;
 889         bh->b_next_free = unused_list;
 890         unused_list = bh;
 891 }
 892 
 893 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 894 {
 895         int i;
 896         struct buffer_head * bh;
 897 
 898         if (unused_list)
 899                 return;
 900 
 901         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 902                 return;
 903 
 904         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 905                 bh->b_next_free = unused_list;  /* only make link */
 906                 unused_list = bh++;
 907         }
 908 }
 909 
 910 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 911 {
 912         struct buffer_head * bh;
 913 
 914         get_more_buffer_heads();
 915         if (!unused_list)
 916                 return NULL;
 917         bh = unused_list;
 918         unused_list = bh->b_next_free;
 919         bh->b_next_free = NULL;
 920         bh->b_data = NULL;
 921         bh->b_size = 0;
 922         bh->b_req = 0;
 923         return bh;
 924 }
 925 
 926 /*
 927  * Create the appropriate buffers when given a page for data area and
 928  * the size of each buffer.. Use the bh->b_this_page linked list to
 929  * follow the buffers created.  Return NULL if unable to create more
 930  * buffers.
 931  */
 932 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 933 {
 934         struct buffer_head *bh, *head;
 935         unsigned long offset;
 936 
 937         head = NULL;
 938         offset = PAGE_SIZE;
 939         while ((offset -= size) < PAGE_SIZE) {
 940                 bh = get_unused_buffer_head();
 941                 if (!bh)
 942                         goto no_grow;
 943                 bh->b_this_page = head;
 944                 head = bh;
 945                 bh->b_data = (char *) (page+offset);
 946                 bh->b_size = size;
 947                 bh->b_dev = 0xffff;  /* Flag as unused */
 948         }
 949         return head;
 950 /*
 951  * In case anything failed, we just free everything we got.
 952  */
 953 no_grow:
 954         bh = head;
 955         while (bh) {
 956                 head = bh;
 957                 bh = bh->b_this_page;
 958                 put_unused_buffer_head(head);
 959         }
 960         return NULL;
 961 }
 962 
 963 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 964 {
 965         int i;
 966         int bhnum = 0;
 967         struct buffer_head * bhr[8];
 968 
 969         for (i = 0 ; i < nrbuf ; i++) {
 970                 if (bh[i] && !bh[i]->b_uptodate)
 971                         bhr[bhnum++] = bh[i];
 972         }
 973         if (bhnum)
 974                 ll_rw_block(READ, bhnum, bhr);
 975         for (i = 0 ; i < nrbuf ; i++) {
 976                 if (bh[i]) {
 977                         wait_on_buffer(bh[i]);
 978                 }
 979         }
 980 }
 981 
 982 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 983         dev_t dev, int *b, int size)
 984 {
 985         struct buffer_head * bh[8];
 986         unsigned long page;
 987         unsigned long offset;
 988         int block;
 989         int nrbuf;
 990 
 991         page = (unsigned long) first->b_data;
 992         if (page & ~PAGE_MASK) {
 993                 brelse(first);
 994                 return 0;
 995         }
 996         mem_map[MAP_NR(page)]++;
 997         bh[0] = first;
 998         nrbuf = 1;
 999         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1000                 block = *++b;
1001                 if (!block)
1002                         goto no_go;
1003                 first = get_hash_table(dev, block, size);
1004                 if (!first)
1005                         goto no_go;
1006                 bh[nrbuf++] = first;
1007                 if (page+offset != (unsigned long) first->b_data)
1008                         goto no_go;
1009         }
1010         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1011         while (nrbuf-- > 0)
1012                 brelse(bh[nrbuf]);
1013         free_page(address);
1014         ++current->mm->min_flt;
1015         return page;
1016 no_go:
1017         while (nrbuf-- > 0)
1018                 brelse(bh[nrbuf]);
1019         free_page(page);
1020         return 0;
1021 }
1022 
1023 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1024         dev_t dev, int b[], int size)
1025 {
1026         struct buffer_head * bh, * tmp, * arr[8];
1027         unsigned long offset;
1028         int isize = BUFSIZE_INDEX(size);
1029         int * p;
1030         int block;
1031 
1032         bh = create_buffers(address, size);
1033         if (!bh)
1034                 return 0;
1035         /* do any of the buffers already exist? punt if so.. */
1036         p = b;
1037         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1038                 block = *(p++);
1039                 if (!block)
1040                         goto not_aligned;
1041                 if (find_buffer(dev, block, size))
1042                         goto not_aligned;
1043         }
1044         tmp = bh;
1045         p = b;
1046         block = 0;
1047         while (1) {
1048                 arr[block++] = bh;
1049                 bh->b_count = 1;
1050                 bh->b_dirt = 0;
1051                 bh->b_flushtime = 0;
1052                 bh->b_uptodate = 0;
1053                 bh->b_req = 0;
1054                 bh->b_dev = dev;
1055                 bh->b_blocknr = *(p++);
1056                 bh->b_list = BUF_CLEAN;
1057                 nr_buffers++;
1058                 nr_buffers_size[isize]++;
1059                 insert_into_queues(bh);
1060                 if (bh->b_this_page)
1061                         bh = bh->b_this_page;
1062                 else
1063                         break;
1064         }
1065         buffermem += PAGE_SIZE;
1066         bh->b_this_page = tmp;
1067         mem_map[MAP_NR(address)]++;
1068         buffer_pages[MAP_NR(address)] = bh;
1069         read_buffers(arr,block);
1070         while (block-- > 0)
1071                 brelse(arr[block]);
1072         ++current->mm->maj_flt;
1073         return address;
1074 not_aligned:
1075         while ((tmp = bh) != NULL) {
1076                 bh = bh->b_this_page;
1077                 put_unused_buffer_head(tmp);
1078         }
1079         return 0;
1080 }
1081 
1082 /*
1083  * Try-to-share-buffers tries to minimize memory use by trying to keep
1084  * both code pages and the buffer area in the same page. This is done by
1085  * (a) checking if the buffers are already aligned correctly in memory and
1086  * (b) if none of the buffer heads are in memory at all, trying to load
1087  * them into memory the way we want them.
1088  *
1089  * This doesn't guarantee that the memory is shared, but should under most
1090  * circumstances work very well indeed (ie >90% sharing of code pages on
1091  * demand-loadable executables).
1092  */
1093 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1094         dev_t dev, int *b, int size)
1095 {
1096         struct buffer_head * bh;
1097         int block;
1098 
1099         block = b[0];
1100         if (!block)
1101                 return 0;
1102         bh = get_hash_table(dev, block, size);
1103         if (bh)
1104                 return check_aligned(bh, address, dev, b, size);
1105         return try_to_load_aligned(address, dev, b, size);
1106 }
1107 
1108 /*
1109  * bread_page reads four buffers into memory at the desired address. It's
1110  * a function of its own, as there is some speed to be got by reading them
1111  * all at the same time, not waiting for one to be read, and then another
1112  * etc. This also allows us to optimize memory usage by sharing code pages
1113  * and filesystem buffers..
1114  */
1115 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1116 {
1117         struct buffer_head * bh[8];
1118         unsigned long where;
1119         int i, j;
1120 
1121         if (!no_share) {
1122                 where = try_to_share_buffers(address, dev, b, size);
1123                 if (where)
1124                         return where;
1125         }
1126         ++current->mm->maj_flt;
1127         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1128                 bh[i] = NULL;
1129                 if (b[i])
1130                         bh[i] = getblk(dev, b[i], size);
1131         }
1132         read_buffers(bh,i);
1133         where = address;
1134         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1135                 if (bh[i]) {
1136                         if (bh[i]->b_uptodate)
1137                                 memcpy((void *) where, bh[i]->b_data, size);
1138                         brelse(bh[i]);
1139                 }
1140         }
1141         return address;
1142 }
1143 
1144 /*
1145  * Try to increase the number of buffers available: the size argument
1146  * is used to determine what kind of buffers we want.
1147  */
1148 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1149 {
1150         unsigned long page;
1151         struct buffer_head *bh, *tmp;
1152         struct buffer_head * insert_point;
1153         int isize;
1154 
1155         if ((size & 511) || (size > PAGE_SIZE)) {
1156                 printk("VFS: grow_buffers: size = %d\n",size);
1157                 return 0;
1158         }
1159 
1160         isize = BUFSIZE_INDEX(size);
1161 
1162         if (!(page = __get_free_page(pri)))
1163                 return 0;
1164         bh = create_buffers(page, size);
1165         if (!bh) {
1166                 free_page(page);
1167                 return 0;
1168         }
1169 
1170         insert_point = free_list[isize];
1171 
1172         tmp = bh;
1173         while (1) {
1174                 nr_free[isize]++;
1175                 if (insert_point) {
1176                         tmp->b_next_free = insert_point->b_next_free;
1177                         tmp->b_prev_free = insert_point;
1178                         insert_point->b_next_free->b_prev_free = tmp;
1179                         insert_point->b_next_free = tmp;
1180                 } else {
1181                         tmp->b_prev_free = tmp;
1182                         tmp->b_next_free = tmp;
1183                 }
1184                 insert_point = tmp;
1185                 ++nr_buffers;
1186                 if (tmp->b_this_page)
1187                         tmp = tmp->b_this_page;
1188                 else
1189                         break;
1190         }
1191         free_list[isize] = bh;
1192         buffer_pages[MAP_NR(page)] = bh;
1193         tmp->b_this_page = bh;
1194         wake_up(&buffer_wait);
1195         buffermem += PAGE_SIZE;
1196         return 1;
1197 }
1198 
1199 /*
1200  * try_to_free() checks if all the buffers on this particular page
1201  * are unused, and free's the page if so.
1202  */
1203 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1204 {
1205         unsigned long page;
1206         struct buffer_head * tmp, * p;
1207         int isize = BUFSIZE_INDEX(bh->b_size);
1208 
1209         *bhp = bh;
1210         page = (unsigned long) bh->b_data;
1211         page &= PAGE_MASK;
1212         tmp = bh;
1213         do {
1214                 if (!tmp)
1215                         return 0;
1216                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1217                         return 0;
1218                 tmp = tmp->b_this_page;
1219         } while (tmp != bh);
1220         tmp = bh;
1221         do {
1222                 p = tmp;
1223                 tmp = tmp->b_this_page;
1224                 nr_buffers--;
1225                 nr_buffers_size[isize]--;
1226                 if (p == *bhp)
1227                   {
1228                     *bhp = p->b_prev_free;
1229                     if (p == *bhp) /* Was this the last in the list? */
1230                       *bhp = NULL;
1231                   }
1232                 remove_from_queues(p);
1233                 put_unused_buffer_head(p);
1234         } while (tmp != bh);
1235         buffermem -= PAGE_SIZE;
1236         buffer_pages[MAP_NR(page)] = NULL;
1237         free_page(page);
1238         return !mem_map[MAP_NR(page)];
1239 }
1240 
1241 
1242 /*
1243  * Consult the load average for buffers and decide whether or not
1244  * we should shrink the buffers of one size or not.  If we decide yes,
1245  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1246  * that is specified.
1247  *
1248  * I would prefer not to use a load average, but the way things are now it
1249  * seems unavoidable.  The way to get rid of it would be to force clustering
1250  * universally, so that when we reclaim buffers we always reclaim an entire
1251  * page.  Doing this would mean that we all need to move towards QMAGIC.
1252  */
1253 
1254 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1255 {          
1256         int nlist;
1257         int isize;
1258         int total_lav, total_n_buffers, n_sizes;
1259         
1260         /* Do not consider the shared buffers since they would not tend
1261            to have getblk called very often, and this would throw off
1262            the lav.  They are not easily reclaimable anyway (let the swapper
1263            make the first move). */
1264   
1265         total_lav = total_n_buffers = n_sizes = 0;
1266         for(nlist = 0; nlist < NR_SIZES; nlist++)
1267          {
1268                  total_lav += buffers_lav[nlist];
1269                  if(nr_buffers_size[nlist]) n_sizes++;
1270                  total_n_buffers += nr_buffers_size[nlist];
1271                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1272          }
1273         
1274         /* See if we have an excessive number of buffers of a particular
1275            size - if so, victimize that bunch. */
1276   
1277         isize = (size ? BUFSIZE_INDEX(size) : -1);
1278         
1279         if (n_sizes > 1)
1280                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1281                   {
1282                           if(nlist == isize) continue;
1283                           if(nr_buffers_size[nlist] &&
1284                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1285                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1286                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1287                                             return 1;
1288                   }
1289         return 0;
1290 }
1291 /*
1292  * Try to free up some pages by shrinking the buffer-cache
1293  *
1294  * Priority tells the routine how hard to try to shrink the
1295  * buffers: 3 means "don't bother too much", while a value
1296  * of 0 means "we'd better get some free pages now".
1297  */
1298 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1299 {
1300         if (priority < 2) {
1301                 sync_buffers(0,0);
1302         }
1303 
1304         if(priority == 2) wakeup_bdflush(1);
1305 
1306         if(maybe_shrink_lav_buffers(0)) return 1;
1307 
1308         /* No good candidate size - take any size we can find */
1309         return shrink_specific_buffers(priority, 0);
1310 }
1311 
1312 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1313 {
1314         struct buffer_head *bh;
1315         int nlist;
1316         int i, isize, isize1;
1317 
1318 #ifdef DEBUG
1319         if(size) printk("Shrinking buffers of size %d\n", size);
1320 #endif
1321         /* First try the free lists, and see if we can get a complete page
1322            from here */
1323         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1324 
1325         for(isize = 0; isize<NR_SIZES; isize++){
1326                 if(isize1 != -1 && isize1 != isize) continue;
1327                 bh = free_list[isize];
1328                 if(!bh) continue;
1329                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1330                         if (bh->b_count || !bh->b_this_page)
1331                                  continue;
1332                         if (try_to_free(bh, &bh))
1333                                  return 1;
1334                         if(!bh) break; /* Some interrupt must have used it after we
1335                                           freed the page.  No big deal - keep looking */
1336                 }
1337         }
1338         
1339         /* Not enough in the free lists, now try the lru list */
1340         
1341         for(nlist = 0; nlist < NR_LIST; nlist++) {
1342         repeat1:
1343                 if(priority > 3 && nlist == BUF_SHARED) continue;
1344                 bh = lru_list[nlist];
1345                 if(!bh) continue;
1346                 i = nr_buffers_type[nlist] >> priority;
1347                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1348                         /* We may have stalled while waiting for I/O to complete. */
1349                         if(bh->b_list != nlist) goto repeat1;
1350                         if (bh->b_count || !bh->b_this_page)
1351                                  continue;
1352                         if(size && bh->b_size != size) continue;
1353                         if (bh->b_lock)
1354                                  if (priority)
1355                                           continue;
1356                                  else
1357                                           wait_on_buffer(bh);
1358                         if (bh->b_dirt) {
1359                                 bh->b_count++;
1360                                 bh->b_flushtime = 0;
1361                                 ll_rw_block(WRITEA, 1, &bh);
1362                                 bh->b_count--;
1363                                 continue;
1364                         }
1365                         if (try_to_free(bh, &bh))
1366                                  return 1;
1367                         if(!bh) break;
1368                 }
1369         }
1370         return 0;
1371 }
1372 
1373 
1374 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1375 {
1376         struct buffer_head * bh;
1377         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1378         int shared;
1379         int nlist, isize;
1380 
1381         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1382         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1383         printk("Buffer blocks:   %6d\n",nr_buffers);
1384 
1385         for(nlist = 0; nlist < NR_LIST; nlist++) {
1386           shared = found = locked = dirty = used = lastused = 0;
1387           bh = lru_list[nlist];
1388           if(!bh) continue;
1389           do {
1390                 found++;
1391                 if (bh->b_lock)
1392                         locked++;
1393                 if (bh->b_dirt)
1394                         dirty++;
1395                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1396                 if (bh->b_count)
1397                         used++, lastused = found;
1398                 bh = bh->b_next_free;
1399               } while (bh != lru_list[nlist]);
1400         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1401                 nlist, found, used, lastused, locked, dirty, shared);
1402         };
1403         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1404         for(isize = 0; isize<NR_SIZES; isize++){
1405                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1406                        buffers_lav[isize], nr_free[isize]);
1407                 for(nlist = 0; nlist < NR_LIST; nlist++)
1408                          printk("%7d ", nr_buffers_st[isize][nlist]);
1409                 printk("\n");
1410         }
1411 }
1412 
1413 /*
1414  * try_to_reassign() checks if all the buffers on this particular page
1415  * are unused, and reassign to a new cluster them if this is true.
1416  */
1417 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1418                            dev_t dev, unsigned int starting_block)
1419 {
1420         unsigned long page;
1421         struct buffer_head * tmp, * p;
1422 
1423         *bhp = bh;
1424         page = (unsigned long) bh->b_data;
1425         page &= PAGE_MASK;
1426         if(mem_map[MAP_NR(page)] != 1) return 0;
1427         tmp = bh;
1428         do {
1429                 if (!tmp)
1430                          return 0;
1431                 
1432                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1433                          return 0;
1434                 tmp = tmp->b_this_page;
1435         } while (tmp != bh);
1436         tmp = bh;
1437         
1438         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1439                  tmp = tmp->b_this_page;
1440         
1441         /* This is the buffer at the head of the page */
1442         bh = tmp;
1443         do {
1444                 p = tmp;
1445                 tmp = tmp->b_this_page;
1446                 remove_from_queues(p);
1447                 p->b_dev=dev;
1448                 p->b_uptodate = 0;
1449                 p->b_req = 0;
1450                 p->b_blocknr=starting_block++;
1451                 insert_into_queues(p);
1452         } while (tmp != bh);
1453         return 1;
1454 }
1455 
1456 /*
1457  * Try to find a free cluster by locating a page where
1458  * all of the buffers are unused.  We would like this function
1459  * to be atomic, so we do not call anything that might cause
1460  * the process to sleep.  The priority is somewhat similar to
1461  * the priority used in shrink_buffers.
1462  * 
1463  * My thinking is that the kernel should end up using whole
1464  * pages for the buffer cache as much of the time as possible.
1465  * This way the other buffers on a particular page are likely
1466  * to be very near each other on the free list, and we will not
1467  * be expiring data prematurely.  For now we only cannibalize buffers
1468  * of the same size to keep the code simpler.
1469  */
1470 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1471                      unsigned int starting_block, int size)
1472 {
1473         struct buffer_head *bh;
1474         int isize = BUFSIZE_INDEX(size);
1475         int i;
1476 
1477         /* We want to give ourselves a really good shot at generating
1478            a cluster, and since we only take buffers from the free
1479            list, we "overfill" it a little. */
1480 
1481         while(nr_free[isize] < 32) refill_freelist(size);
1482 
1483         bh = free_list[isize];
1484         if(bh)
1485                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1486                          if (!bh->b_this_page)  continue;
1487                          if (try_to_reassign(bh, &bh, dev, starting_block))
1488                                  return 4;
1489                  }
1490         return 0;
1491 }
1492 
1493 /* This function tries to generate a new cluster of buffers
1494  * from a new page in memory.  We should only do this if we have
1495  * not expanded the buffer cache to the maximum size that we allow.
1496  */
1497 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1498 {
1499         struct buffer_head * bh, * tmp, * arr[8];
1500         int isize = BUFSIZE_INDEX(size);
1501         unsigned long offset;
1502         unsigned long page;
1503         int nblock;
1504 
1505         page = get_free_page(GFP_NOBUFFER);
1506         if(!page) return 0;
1507 
1508         bh = create_buffers(page, size);
1509         if (!bh) {
1510                 free_page(page);
1511                 return 0;
1512         };
1513         nblock = block;
1514         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1515                 if (find_buffer(dev, nblock++, size))
1516                          goto not_aligned;
1517         }
1518         tmp = bh;
1519         nblock = 0;
1520         while (1) {
1521                 arr[nblock++] = bh;
1522                 bh->b_count = 1;
1523                 bh->b_dirt = 0;
1524                 bh->b_flushtime = 0;
1525                 bh->b_lock = 0;
1526                 bh->b_uptodate = 0;
1527                 bh->b_req = 0;
1528                 bh->b_dev = dev;
1529                 bh->b_list = BUF_CLEAN;
1530                 bh->b_blocknr = block++;
1531                 nr_buffers++;
1532                 nr_buffers_size[isize]++;
1533                 insert_into_queues(bh);
1534                 if (bh->b_this_page)
1535                         bh = bh->b_this_page;
1536                 else
1537                         break;
1538         }
1539         buffermem += PAGE_SIZE;
1540         buffer_pages[MAP_NR(page)] = bh;
1541         bh->b_this_page = tmp;
1542         while (nblock-- > 0)
1543                 brelse(arr[nblock]);
1544         return 4;
1545 not_aligned:
1546         while ((tmp = bh) != NULL) {
1547                 bh = bh->b_this_page;
1548                 put_unused_buffer_head(tmp);
1549         }
1550         free_page(page);
1551         return 0;
1552 }
1553 
1554 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1555 {
1556         int i, offset;
1557         
1558         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1559                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1560                 if(find_buffer(dev, b[i], size)) return 0;
1561         };
1562 
1563         /* OK, we have a candidate for a new cluster */
1564         
1565         /* See if one size of buffer is over-represented in the buffer cache,
1566            if so reduce the numbers of buffers */
1567         if(maybe_shrink_lav_buffers(size))
1568          {
1569                  int retval;
1570                  retval = try_to_generate_cluster(dev, b[0], size);
1571                  if(retval) return retval;
1572          };
1573         
1574         if (nr_free_pages > min_free_pages*2) 
1575                  return try_to_generate_cluster(dev, b[0], size);
1576         else
1577                  return reassign_cluster(dev, b[0], size);
1578 }
1579 
1580 /*
1581  * This initializes the initial buffer free list.  nr_buffers_type is set
1582  * to one less the actual number of buffers, as a sop to backwards
1583  * compatibility --- the old code did this (I think unintentionally,
1584  * but I'm not sure), and programs in the ps package expect it.
1585  *                                      - TYT 8/30/92
1586  */
1587 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1588 {
1589         int i;
1590         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1591 
1592         if (high_memory >= 4*1024*1024) {
1593                 if(high_memory >= 16*1024*1024)
1594                          nr_hash = 16381;
1595                 else
1596                          nr_hash = 4093;
1597         } else {
1598                 nr_hash = 997;
1599         };
1600         
1601         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1602                                                      sizeof(struct buffer_head *));
1603 
1604 
1605         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1606                                                      sizeof(struct buffer_head *));
1607         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1608                 buffer_pages[i] = NULL;
1609 
1610         for (i = 0 ; i < nr_hash ; i++)
1611                 hash_table[i] = NULL;
1612         lru_list[BUF_CLEAN] = 0;
1613         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1614         if (!free_list[isize])
1615                 panic("VFS: Unable to initialize buffer free list!");
1616         return;
1617 }
1618 
1619 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1620  * response to dirty buffers.  Once this process is activated, we write back
1621  * a limited number of buffers to the disks and then go back to sleep again.
1622  * In effect this is a process which never leaves kernel mode, and does not have
1623  * any user memory associated with it except for the stack.  There is also
1624  * a kernel stack page, which obviously must be separate from the user stack.
1625  */
1626 struct wait_queue * bdflush_wait = NULL;
1627 struct wait_queue * bdflush_done = NULL;
1628 
1629 static int bdflush_running = 0;
1630 
1631 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1632 {
1633         if(!bdflush_running){
1634                 printk("Warning - bdflush not running\n");
1635                 sync_buffers(0,0);
1636                 return;
1637         };
1638         wake_up(&bdflush_wait);
1639         if(wait) sleep_on(&bdflush_done);
1640 }
1641 
1642 
1643 
1644 /* 
1645  * Here we attempt to write back old buffers.  We also try and flush inodes 
1646  * and supers as well, since this function is essentially "update", and 
1647  * otherwise there would be no way of ensuring that these quantities ever 
1648  * get written back.  Ideally, we would have a timestamp on the inodes
1649  * and superblocks so that we could write back only the old ones as well
1650  */
1651 
1652 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1653 {
1654         int i, isize;
1655         int ndirty, nwritten;
1656         int nlist;
1657         int ncount;
1658         struct buffer_head * bh, *next;
1659 
1660         sync_supers(0);
1661         sync_inodes(0);
1662 
1663         ncount = 0;
1664 #ifdef DEBUG
1665         for(nlist = 0; nlist < NR_LIST; nlist++)
1666 #else
1667         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1668 #endif
1669         {
1670                 ndirty = 0;
1671                 nwritten = 0;
1672         repeat:
1673                 bh = lru_list[nlist];
1674                 if(bh) 
1675                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1676                                  /* We may have stalled while waiting for I/O to complete. */
1677                                  if(bh->b_list != nlist) goto repeat;
1678                                  next = bh->b_next_free;
1679                                  if(!lru_list[nlist]) {
1680                                          printk("Dirty list empty %d\n", i);
1681                                          break;
1682                                  }
1683                                  
1684                                  /* Clean buffer on dirty list?  Refile it */
1685                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1686                                   {
1687                                           refile_buffer(bh);
1688                                           continue;
1689                                   }
1690                                  
1691                                  if (bh->b_lock || !bh->b_dirt)
1692                                           continue;
1693                                  ndirty++;
1694                                  if(bh->b_flushtime > jiffies) continue;
1695                                  nwritten++;
1696                                  bh->b_count++;
1697                                  bh->b_flushtime = 0;
1698 #ifdef DEBUG
1699                                  if(nlist != BUF_DIRTY) ncount++;
1700 #endif
1701                                  ll_rw_block(WRITE, 1, &bh);
1702                                  bh->b_count--;
1703                          }
1704         }
1705 #ifdef DEBUG
1706         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1707         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1708 #endif
1709         
1710         /* We assume that we only come through here on a regular
1711            schedule, like every 5 seconds.  Now update load averages.  
1712            Shift usage counts to prevent overflow. */
1713         for(isize = 0; isize<NR_SIZES; isize++){
1714                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1715                 buffer_usage[isize] = 0;
1716         };
1717         return 0;
1718 }
1719 
1720 
1721 /* This is the interface to bdflush.  As we get more sophisticated, we can
1722  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1723  * invoke this again after you have done this once, you would simply modify 
1724  * the tuning parameters.  We would want to verify each parameter, however,
1725  * to make sure that it is reasonable. */
1726 
1727 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1728 {
1729         int i, error;
1730         int ndirty;
1731         int nlist;
1732         int ncount;
1733         struct buffer_head * bh, *next;
1734 
1735         if (!suser())
1736                 return -EPERM;
1737 
1738         if (func == 1)
1739                  return sync_old_buffers();
1740 
1741         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1742         if (func >= 2) {
1743                 i = (func-2) >> 1;
1744                 if (i < 0 || i >= N_PARAM)
1745                         return -EINVAL;
1746                 if((func & 1) == 0) {
1747                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1748                         if (error)
1749                                 return error;
1750                         put_fs_long(bdf_prm.data[i], data);
1751                         return 0;
1752                 };
1753                 if (data < bdflush_min[i] || data > bdflush_max[i])
1754                         return -EINVAL;
1755                 bdf_prm.data[i] = data;
1756                 return 0;
1757         };
1758         
1759         if (bdflush_running)
1760                 return -EBUSY; /* Only one copy of this running at one time */
1761         bdflush_running++;
1762         
1763         /* OK, from here on is the daemon */
1764         
1765         for (;;) {
1766 #ifdef DEBUG
1767                 printk("bdflush() activated...");
1768 #endif
1769                 
1770                 ncount = 0;
1771 #ifdef DEBUG
1772                 for(nlist = 0; nlist < NR_LIST; nlist++)
1773 #else
1774                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1775 #endif
1776                  {
1777                          ndirty = 0;
1778                  repeat:
1779                          bh = lru_list[nlist];
1780                          if(bh) 
1781                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1782                                        bh = next) {
1783                                           /* We may have stalled while waiting for I/O to complete. */
1784                                           if(bh->b_list != nlist) goto repeat;
1785                                           next = bh->b_next_free;
1786                                           if(!lru_list[nlist]) {
1787                                                   printk("Dirty list empty %d\n", i);
1788                                                   break;
1789                                           }
1790                                           
1791                                           /* Clean buffer on dirty list?  Refile it */
1792                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1793                                            {
1794                                                    refile_buffer(bh);
1795                                                    continue;
1796                                            }
1797                                           
1798                                           if (bh->b_lock || !bh->b_dirt)
1799                                                    continue;
1800                                           /* Should we write back buffers that are shared or not??
1801                                              currently dirty buffers are not shared, so it does not matter */
1802                                           bh->b_count++;
1803                                           ndirty++;
1804                                           bh->b_flushtime = 0;
1805                                           ll_rw_block(WRITE, 1, &bh);
1806 #ifdef DEBUG
1807                                           if(nlist != BUF_DIRTY) ncount++;
1808 #endif
1809                                           bh->b_count--;
1810                                   }
1811                  }
1812 #ifdef DEBUG
1813                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1814                 printk("sleeping again.\n");
1815 #endif
1816                 wake_up(&bdflush_done);
1817                 
1818                 /* If there are still a lot of dirty buffers around, skip the sleep
1819                    and flush some more */
1820                 
1821                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1822                    bdf_prm.b_un.nfract/100) {
1823                         if (current->signal & (1 << (SIGKILL-1))) {
1824                                 bdflush_running--;
1825                                 return 0;
1826                         }
1827                         current->signal = 0;
1828                         interruptible_sleep_on(&bdflush_wait);
1829                 }
1830         }
1831 }
1832 
1833 
1834 /*
1835  * Overrides for Emacs so that we follow Linus's tabbing style.
1836  * Emacs will notice this stuff at the end of the file and automatically
1837  * adjust the settings for this buffer only.  This must remain at the end
1838  * of the file.
1839  * ---------------------------------------------------------------------------
1840  * Local variables:
1841  * c-indent-level: 8
1842  * c-brace-imaginary-offset: 0
1843  * c-brace-offset: -8
1844  * c-argdecl-indent: 8
1845  * c-label-offset: -8
1846  * c-continued-statement-offset: 8
1847  * c-continued-brace-offset: 0
1848  * End:
1849  */

/* [previous][next][first][last][top][bottom][index][help] */