root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. check_aligned
  32. try_to_load_aligned
  33. try_to_share_buffers
  34. bread_page
  35. grow_buffers
  36. try_to_free
  37. maybe_shrink_lav_buffers
  38. shrink_buffers
  39. shrink_specific_buffers
  40. show_buffers
  41. try_to_reassign
  42. reassign_cluster
  43. try_to_generate_cluster
  44. generate_cluster
  45. buffer_init
  46. wakeup_bdflush
  47. sync_old_buffers
  48. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #define NR_SIZES 4
  33 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  35 
  36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  60 extern int *blksize_size[];
  61 
  62 /* Here is the parameter block for the bdflush process. */
  63 static void wakeup_bdflush(int);
  64 
  65 #define N_PARAM 9
  66 #define LAV
  67 
  68 static union bdflush_param{
  69         struct {
  70                 int nfract;  /* Percentage of buffer cache dirty to 
  71                                 activate bdflush */
  72                 int ndirty;  /* Maximum number of dirty blocks to write out per
  73                                 wake-cycle */
  74                 int nrefill; /* Number of clean buffers to try and obtain
  75                                 each time we call refill */
  76                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  77                                   when trying to refill buffers. */
  78                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  79                                     search for free clusters */
  80                 int age_buffer;  /* Time for normal buffer to age before 
  81                                     we flush it */
  82                 int age_super;  /* Time for superblock to age before we 
  83                                    flush it */
  84                 int lav_const;  /* Constant used for load average (time
  85                                    constant */
  86                 int lav_ratio;  /* Used to determine how low a lav for a
  87                                    particular size can go before we start to
  88                                    trim back the buffers */
  89         } b_un;
  90         unsigned int data[N_PARAM];
  91 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  92 
  93 /* The lav constant is set for 1 minute, as long as the update process runs
  94    every 5 seconds.  If you change the frequency of update, the time
  95    constant will also change. */
  96 
  97 
  98 /* These are the min and max parameter values that we will allow to be assigned */
  99 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
 100 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 101 
 102 /*
 103  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 104  * and getting rid of the cli-sti pairs. The wait-queue routines still
 105  * need cli-sti, but now it's just a couple of 386 instructions or so.
 106  *
 107  * Note that the real wait_on_buffer() is an inline function that checks
 108  * if 'b_wait' is set before calling this, so that the queues aren't set
 109  * up unnecessarily.
 110  */
 111 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         struct wait_queue wait = { current, NULL };
 114 
 115         bh->b_count++;
 116         add_wait_queue(&bh->b_wait, &wait);
 117 repeat:
 118         current->state = TASK_UNINTERRUPTIBLE;
 119         if (bh->b_lock) {
 120                 schedule();
 121                 goto repeat;
 122         }
 123         remove_wait_queue(&bh->b_wait, &wait);
 124         bh->b_count--;
 125         current->state = TASK_RUNNING;
 126 }
 127 
 128 /* Call sync_buffers with wait!=0 to ensure that the call does not
 129    return until all buffer writes have completed.  Sync() may return
 130    before the writes have finished; fsync() may not. */
 131 
 132 
 133 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 134    spontaneously dirty themselves without ever brelse being called.
 135    We will ultimately want to put these in a separate list, but for
 136    now we search all of the lists for dirty buffers */
 137 
 138 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         int i, retry, pass = 0, err = 0;
 141         int nlist, ncount;
 142         struct buffer_head * bh, *next;
 143 
 144         /* One pass for no-wait, three for wait:
 145            0) write out all dirty, unlocked buffers;
 146            1) write out all dirty buffers, waiting if locked;
 147            2) wait for completion by waiting for all buffers to unlock. */
 148  repeat:
 149         retry = 0;
 150  repeat2:
 151         ncount = 0;
 152         /* We search all lists as a failsafe mechanism, not because we expect
 153            there to be dirty buffers on any of the other lists. */
 154         for(nlist = 0; nlist < NR_LIST; nlist++)
 155          {
 156          repeat1:
 157                  bh = lru_list[nlist];
 158                  if(!bh) continue;
 159                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 160                          if(bh->b_list != nlist) goto repeat1;
 161                          next = bh->b_next_free;
 162                          if(!lru_list[nlist]) break;
 163                          if (dev && bh->b_dev != dev)
 164                                   continue;
 165                          if (bh->b_lock)
 166                           {
 167                                   /* Buffer is locked; skip it unless wait is
 168                                      requested AND pass > 0. */
 169                                   if (!wait || !pass) {
 170                                           retry = 1;
 171                                           continue;
 172                                   }
 173                                   wait_on_buffer (bh);
 174                                   goto repeat2;
 175                           }
 176                          /* If an unlocked buffer is not uptodate, there has
 177                              been an IO error. Skip it. */
 178                          if (wait && bh->b_req && !bh->b_lock &&
 179                              !bh->b_dirt && !bh->b_uptodate) {
 180                                   err = 1;
 181                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 182                                   continue;
 183                           }
 184                          /* Don't write clean buffers.  Don't write ANY buffers
 185                             on the third pass. */
 186                          if (!bh->b_dirt || pass>=2)
 187                                   continue;
 188                          /* don't bother about locked buffers */
 189                          if (bh->b_lock)
 190                                  continue;
 191                          bh->b_count++;
 192                          bh->b_flushtime = 0;
 193                          ll_rw_block(WRITE, 1, &bh);
 194 
 195                          if(nlist != BUF_DIRTY) { 
 196                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 197                                  ncount++;
 198                          };
 199                          bh->b_count--;
 200                          retry = 1;
 201                  }
 202          }
 203         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 204         
 205         /* If we are waiting for the sync to succeed, and if any dirty
 206            blocks were written, then repeat; on the second pass, only
 207            wait for buffers being written (do not pass to write any
 208            more buffers on the second pass). */
 209         if (wait && retry && ++pass<=2)
 210                  goto repeat;
 211         return err;
 212 }
 213 
 214 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 {
 216         sync_buffers(dev, 0);
 217         sync_supers(dev);
 218         sync_inodes(dev);
 219         sync_buffers(dev, 0);
 220 }
 221 
 222 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         sync_buffers(dev, 0);
 225         sync_supers(dev);
 226         sync_inodes(dev);
 227         return sync_buffers(dev, 1);
 228 }
 229 
 230 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         sync_dev(0);
 233         return 0;
 234 }
 235 
 236 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         return fsync_dev(inode->i_dev);
 239 }
 240 
 241 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         struct file * file;
 244         struct inode * inode;
 245 
 246         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 247                 return -EBADF;
 248         if (!file->f_op || !file->f_op->fsync)
 249                 return -EINVAL;
 250         if (file->f_op->fsync(inode,file))
 251                 return -EIO;
 252         return 0;
 253 }
 254 
 255 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 256 {
 257         int i;
 258         int nlist;
 259         struct buffer_head * bh;
 260 
 261         for(nlist = 0; nlist < NR_LIST; nlist++) {
 262                 bh = lru_list[nlist];
 263                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 264                      bh = bh->b_next_free) {
 265                         if (bh->b_dev != dev)
 266                                  continue;
 267                         wait_on_buffer(bh);
 268                         if (bh->b_dev == dev)
 269                                  bh->b_flushtime = bh->b_uptodate = 
 270                                           bh->b_dirt = bh->b_req = 0;
 271                 }
 272         }
 273 }
 274 
 275 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 276 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 277 
 278 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         if (bh->b_next)
 281                 bh->b_next->b_prev = bh->b_prev;
 282         if (bh->b_prev)
 283                 bh->b_prev->b_next = bh->b_next;
 284         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 285                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 286         bh->b_next = bh->b_prev = NULL;
 287 }
 288 
 289 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 290 {
 291         if (!(bh->b_prev_free) || !(bh->b_next_free))
 292                 panic("VFS: LRU block list corrupted");
 293         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 294         bh->b_prev_free->b_next_free = bh->b_next_free;
 295         bh->b_next_free->b_prev_free = bh->b_prev_free;
 296 
 297         if (lru_list[bh->b_list] == bh)
 298                  lru_list[bh->b_list] = bh->b_next_free;
 299         if(lru_list[bh->b_list] == bh)
 300                  lru_list[bh->b_list] = NULL;
 301         bh->b_next_free = bh->b_prev_free = NULL;
 302 }
 303 
 304 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 305 {
 306         int isize = BUFSIZE_INDEX(bh->b_size);
 307         if (!(bh->b_prev_free) || !(bh->b_next_free))
 308                 panic("VFS: Free block list corrupted");
 309         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 310         if(!free_list[isize])
 311                  panic("Free list empty");
 312         nr_free[isize]--;
 313         if(bh->b_next_free == bh)
 314                  free_list[isize] = NULL;
 315         else {
 316                 bh->b_prev_free->b_next_free = bh->b_next_free;
 317                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 318                 if (free_list[isize] == bh)
 319                          free_list[isize] = bh->b_next_free;
 320         };
 321         bh->b_next_free = bh->b_prev_free = NULL;
 322 }
 323 
 324 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 325 {
 326         if(bh->b_dev == 0xffff) {
 327                 remove_from_free_list(bh); /* Free list entries should not be
 328                                               in the hash queue */
 329                 return;
 330         };
 331         nr_buffers_type[bh->b_list]--;
 332         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 333         remove_from_hash_queue(bh);
 334         remove_from_lru_list(bh);
 335 }
 336 
 337 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 {
 339         if (!bh)
 340                 return;
 341         if (bh == lru_list[bh->b_list]) {
 342                 lru_list[bh->b_list] = bh->b_next_free;
 343                 return;
 344         }
 345         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 346         remove_from_lru_list(bh);
 347 /* add to back of free list */
 348 
 349         if(!lru_list[bh->b_list]) {
 350                 lru_list[bh->b_list] = bh;
 351                 lru_list[bh->b_list]->b_prev_free = bh;
 352         };
 353 
 354         bh->b_next_free = lru_list[bh->b_list];
 355         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 356         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 357         lru_list[bh->b_list]->b_prev_free = bh;
 358 }
 359 
 360 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 361 {
 362         int isize;
 363         if (!bh)
 364                 return;
 365 
 366         isize = BUFSIZE_INDEX(bh->b_size);      
 367         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 368 /* add to back of free list */
 369 
 370         if(!free_list[isize]) {
 371                 free_list[isize] = bh;
 372                 bh->b_prev_free = bh;
 373         };
 374 
 375         nr_free[isize]++;
 376         bh->b_next_free = free_list[isize];
 377         bh->b_prev_free = free_list[isize]->b_prev_free;
 378         free_list[isize]->b_prev_free->b_next_free = bh;
 379         free_list[isize]->b_prev_free = bh;
 380 }
 381 
 382 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 383 {
 384 /* put at end of free list */
 385 
 386         if(bh->b_dev == 0xffff) {
 387                 put_last_free(bh);
 388                 return;
 389         };
 390         if(!lru_list[bh->b_list]) {
 391                 lru_list[bh->b_list] = bh;
 392                 bh->b_prev_free = bh;
 393         };
 394         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 395         bh->b_next_free = lru_list[bh->b_list];
 396         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 397         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 398         lru_list[bh->b_list]->b_prev_free = bh;
 399         nr_buffers_type[bh->b_list]++;
 400         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 401 /* put the buffer in new hash-queue if it has a device */
 402         bh->b_prev = NULL;
 403         bh->b_next = NULL;
 404         if (!bh->b_dev)
 405                 return;
 406         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 407         hash(bh->b_dev,bh->b_blocknr) = bh;
 408         if (bh->b_next)
 409                 bh->b_next->b_prev = bh;
 410 }
 411 
 412 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 413 {               
 414         struct buffer_head * tmp;
 415 
 416         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 417                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 418                         if (tmp->b_size == size)
 419                                 return tmp;
 420                         else {
 421                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 422                                                         MAJOR(dev), MINOR(dev));
 423                                 return NULL;
 424                         }
 425         return NULL;
 426 }
 427 
 428 /*
 429  * Why like this, I hear you say... The reason is race-conditions.
 430  * As we don't lock buffers (unless we are reading them, that is),
 431  * something might happen to it while we sleep (ie a read-error
 432  * will force it bad). This shouldn't really happen currently, but
 433  * the code is ready.
 434  */
 435 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 436 {
 437         struct buffer_head * bh;
 438 
 439         for (;;) {
 440                 if (!(bh=find_buffer(dev,block,size)))
 441                         return NULL;
 442                 bh->b_count++;
 443                 wait_on_buffer(bh);
 444                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 445                         return bh;
 446                 bh->b_count--;
 447         }
 448 }
 449 
 450 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 451 {
 452         int i, nlist;
 453         struct buffer_head * bh, *bhnext;
 454 
 455         if (!blksize_size[MAJOR(dev)])
 456                 return;
 457 
 458         switch(size) {
 459                 default: panic("Invalid blocksize passed to set_blocksize");
 460                 case 512: case 1024: case 2048: case 4096:;
 461         }
 462 
 463         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 464                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 465                 return;
 466         }
 467         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 468                 return;
 469         sync_buffers(dev, 2);
 470         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 471 
 472   /* We need to be quite careful how we do this - we are moving entries
 473      around on the free list, and we can get in a loop if we are not careful.*/
 474 
 475         for(nlist = 0; nlist < NR_LIST; nlist++) {
 476                 bh = lru_list[nlist];
 477                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 478                         if(!bh) break;
 479                         bhnext = bh->b_next_free; 
 480                         if (bh->b_dev != dev)
 481                                  continue;
 482                         if (bh->b_size == size)
 483                                  continue;
 484                         
 485                         wait_on_buffer(bh);
 486                         if (bh->b_dev == dev && bh->b_size != size) {
 487                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 488                                          bh->b_flushtime = 0;
 489                         };
 490                         remove_from_hash_queue(bh);
 491                 }
 492         }
 493 }
 494 
 495 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 496 
 497 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 498 {
 499         struct buffer_head * bh, * tmp;
 500         struct buffer_head * candidate[NR_LIST];
 501         unsigned int best_time, winner;
 502         int isize = BUFSIZE_INDEX(size);
 503         int buffers[NR_LIST];
 504         int i;
 505         int needed;
 506 
 507         /* First see if we even need this.  Sometimes it is advantageous
 508          to request some blocks in a filesystem that we know that we will
 509          be needing ahead of time. */
 510 
 511         if (nr_free[isize] > 100)
 512                 return;
 513 
 514         /* If there are too many dirty buffers, we wake up the update process
 515            now so as to ensure that there are still clean buffers available
 516            for user processes to use (and dirty) */
 517         
 518         /* We are going to try and locate this much memory */
 519         needed =bdf_prm.b_un.nrefill * size;  
 520 
 521         while (nr_free_pages > min_free_pages && needed > 0 &&
 522                grow_buffers(GFP_BUFFER, size)) {
 523                 needed -= PAGE_SIZE;
 524         }
 525 
 526         if(needed <= 0) return;
 527 
 528         /* See if there are too many buffers of a different size.
 529            If so, victimize them */
 530 
 531         while(maybe_shrink_lav_buffers(size))
 532          {
 533                  if(!grow_buffers(GFP_BUFFER, size)) break;
 534                  needed -= PAGE_SIZE;
 535                  if(needed <= 0) return;
 536          };
 537 
 538         /* OK, we cannot grow the buffer cache, now try and get some
 539            from the lru list */
 540 
 541         /* First set the candidate pointers to usable buffers.  This
 542            should be quick nearly all of the time. */
 543 
 544 repeat0:
 545         for(i=0; i<NR_LIST; i++){
 546                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 547                    nr_buffers_type[i] == 0) {
 548                         candidate[i] = NULL;
 549                         buffers[i] = 0;
 550                         continue;
 551                 }
 552                 buffers[i] = nr_buffers_type[i];
 553                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 554                  {
 555                          if(buffers[i] < 0) panic("Here is the problem");
 556                          tmp = bh->b_next_free;
 557                          if (!bh) break;
 558                          
 559                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 560                              bh->b_dirt) {
 561                                  refile_buffer(bh);
 562                                  continue;
 563                          };
 564                          
 565                          if (bh->b_count || bh->b_size != size)
 566                                   continue;
 567                          
 568                          /* Buffers are written in the order they are placed 
 569                             on the locked list. If we encounter a locked
 570                             buffer here, this means that the rest of them
 571                             are also locked */
 572                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 573                                  buffers[i] = 0;
 574                                  break;
 575                          }
 576                          
 577                          if (BADNESS(bh)) continue;
 578                          break;
 579                  };
 580                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 581                 else candidate[i] = bh;
 582                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 583         }
 584         
 585  repeat:
 586         if(needed <= 0) return;
 587         
 588         /* Now see which candidate wins the election */
 589         
 590         winner = best_time = UINT_MAX;  
 591         for(i=0; i<NR_LIST; i++){
 592                 if(!candidate[i]) continue;
 593                 if(candidate[i]->b_lru_time < best_time){
 594                         best_time = candidate[i]->b_lru_time;
 595                         winner = i;
 596                 }
 597         }
 598         
 599         /* If we have a winner, use it, and then get a new candidate from that list */
 600         if(winner != UINT_MAX) {
 601                 i = winner;
 602                 bh = candidate[i];
 603                 candidate[i] = bh->b_next_free;
 604                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 605                 if (bh->b_count || bh->b_size != size)
 606                          panic("Busy buffer in candidate list\n");
 607                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 608                          panic("Shared buffer in candidate list\n");
 609                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 610                 
 611                 if(bh->b_dev == 0xffff) panic("Wrong list");
 612                 remove_from_queues(bh);
 613                 bh->b_dev = 0xffff;
 614                 put_last_free(bh);
 615                 needed -= bh->b_size;
 616                 buffers[i]--;
 617                 if(buffers[i] < 0) panic("Here is the problem");
 618                 
 619                 if(buffers[i] == 0) candidate[i] = NULL;
 620                 
 621                 /* Now all we need to do is advance the candidate pointer
 622                    from the winner list to the next usable buffer */
 623                 if(candidate[i] && buffers[i] > 0){
 624                         if(buffers[i] <= 0) panic("Here is another problem");
 625                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 626                                 if(buffers[i] < 0) panic("Here is the problem");
 627                                 tmp = bh->b_next_free;
 628                                 if (!bh) break;
 629                                 
 630                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 631                                     bh->b_dirt) {
 632                                         refile_buffer(bh);
 633                                         continue;
 634                                 };
 635                                 
 636                                 if (bh->b_count || bh->b_size != size)
 637                                          continue;
 638                                 
 639                                 /* Buffers are written in the order they are
 640                                    placed on the locked list.  If we encounter
 641                                    a locked buffer here, this means that the
 642                                    rest of them are also locked */
 643                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 644                                         buffers[i] = 0;
 645                                         break;
 646                                 }
 647               
 648                                 if (BADNESS(bh)) continue;
 649                                 break;
 650                         };
 651                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 652                         else candidate[i] = bh;
 653                         if(candidate[i] && candidate[i]->b_count) 
 654                                  panic("Here is the problem");
 655                 }
 656                 
 657                 goto repeat;
 658         }
 659         
 660         if(needed <= 0) return;
 661         
 662         /* Too bad, that was not enough. Try a little harder to grow some. */
 663         
 664         if (nr_free_pages > 5) {
 665                 if (grow_buffers(GFP_BUFFER, size)) {
 666                         needed -= PAGE_SIZE;
 667                         goto repeat0;
 668                 };
 669         }
 670         
 671         /* and repeat until we find something good */
 672         if (!grow_buffers(GFP_ATOMIC, size))
 673                 wakeup_bdflush(1);
 674         needed -= PAGE_SIZE;
 675         goto repeat0;
 676 }
 677 
 678 /*
 679  * Ok, this is getblk, and it isn't very clear, again to hinder
 680  * race-conditions. Most of the code is seldom used, (ie repeating),
 681  * so it should be much more efficient than it looks.
 682  *
 683  * The algorithm is changed: hopefully better, and an elusive bug removed.
 684  *
 685  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 686  * when the filesystem starts to get full of dirty blocks (I hope).
 687  */
 688 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 689 {
 690         struct buffer_head * bh;
 691         int isize = BUFSIZE_INDEX(size);
 692 
 693         /* Update this for the buffer size lav. */
 694         buffer_usage[isize]++;
 695 
 696         /* If there are too many dirty buffers, we wake up the update process
 697            now so as to ensure that there are still clean buffers available
 698            for user processes to use (and dirty) */
 699 repeat:
 700         bh = get_hash_table(dev, block, size);
 701         if (bh) {
 702                 if (bh->b_uptodate && !bh->b_dirt)
 703                          put_last_lru(bh);
 704                 if(!bh->b_dirt) bh->b_flushtime = 0;
 705                 return bh;
 706         }
 707 
 708         while(!free_list[isize]) refill_freelist(size);
 709         
 710         if (find_buffer(dev,block,size))
 711                  goto repeat;
 712 
 713         bh = free_list[isize];
 714         remove_from_free_list(bh);
 715 
 716 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 717 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 718         bh->b_count=1;
 719         bh->b_dirt=0;
 720         bh->b_lock=0;
 721         bh->b_uptodate=0;
 722         bh->b_flushtime = 0;
 723         bh->b_req=0;
 724         bh->b_dev=dev;
 725         bh->b_blocknr=block;
 726         insert_into_queues(bh);
 727         return bh;
 728 }
 729 
 730 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 731 {
 732         int newtime;
 733 
 734         if (buf->b_dirt){
 735                 /* Move buffer to dirty list if jiffies is clear */
 736                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 737                                      bdf_prm.b_un.age_buffer);
 738                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 739                          buf->b_flushtime = newtime;
 740         } else {
 741                 buf->b_flushtime = 0;
 742         }
 743 }
 744 
 745 
 746 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 747                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 748 
 749 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 750         int i, dispose;
 751         i = 0;
 752         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 753         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 754         if(buf->b_lock) i |= 2;
 755         if(buf->b_dirt) i |= 4;
 756         dispose = buffer_disposition[i];
 757         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 758                  dispose = BUF_UNSHARED;
 759         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 760         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 761         if(dispose != buf->b_list)  {
 762                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 763                          buf->b_lru_time = jiffies;
 764                 if(dispose == BUF_LOCKED && 
 765                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 766                          dispose = BUF_LOCKED1;
 767                 remove_from_queues(buf);
 768                 buf->b_list = dispose;
 769                 insert_into_queues(buf);
 770                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 771                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 772                    bdf_prm.b_un.nfract/100)
 773                          wakeup_bdflush(0);
 774         }
 775 }
 776 
 777 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 778 {
 779         if (!buf)
 780                 return;
 781         wait_on_buffer(buf);
 782 
 783         /* If dirty, mark the time this buffer should be written back */
 784         set_writetime(buf, 0);
 785         refile_buffer(buf);
 786 
 787         if (buf->b_count) {
 788                 if (--buf->b_count)
 789                         return;
 790                 wake_up(&buffer_wait);
 791                 return;
 792         }
 793         printk("VFS: brelse: Trying to free free buffer\n");
 794 }
 795 
 796 /*
 797  * bread() reads a specified block and returns the buffer that contains
 798  * it. It returns NULL if the block was unreadable.
 799  */
 800 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 801 {
 802         struct buffer_head * bh;
 803 
 804         if (!(bh = getblk(dev, block, size))) {
 805                 printk("VFS: bread: READ error on device %d/%d\n",
 806                                                 MAJOR(dev), MINOR(dev));
 807                 return NULL;
 808         }
 809         if (bh->b_uptodate)
 810                 return bh;
 811         ll_rw_block(READ, 1, &bh);
 812         wait_on_buffer(bh);
 813         if (bh->b_uptodate)
 814                 return bh;
 815         brelse(bh);
 816         return NULL;
 817 }
 818 
 819 /*
 820  * Ok, breada can be used as bread, but additionally to mark other
 821  * blocks for reading as well. End the argument list with a negative
 822  * number.
 823  */
 824 
 825 #define NBUF 16
 826 
 827 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 828         unsigned int pos, unsigned int filesize)
 829 {
 830         struct buffer_head * bhlist[NBUF];
 831         unsigned int blocks;
 832         struct buffer_head * bh;
 833         int index;
 834         int i, j;
 835 
 836         if (pos >= filesize)
 837                 return NULL;
 838 
 839         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 840                 return NULL;
 841 
 842         index = BUFSIZE_INDEX(bh->b_size);
 843 
 844         if (bh->b_uptodate)
 845                 return bh;
 846 
 847         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 848 
 849         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 850                 blocks = read_ahead[MAJOR(dev)] >> index;
 851         if (blocks > NBUF)
 852                 blocks = NBUF;
 853         
 854         bhlist[0] = bh;
 855         j = 1;
 856         for(i=1; i<blocks; i++) {
 857                 bh = getblk(dev,block+i,bufsize);
 858                 if (bh->b_uptodate) {
 859                         brelse(bh);
 860                         break;
 861                 }
 862                 bhlist[j++] = bh;
 863         }
 864 
 865         /* Request the read for these buffers, and then release them */
 866         ll_rw_block(READ, j, bhlist);
 867 
 868         for(i=1; i<j; i++)
 869                 brelse(bhlist[i]);
 870 
 871         /* Wait for this buffer, and then continue on */
 872         bh = bhlist[0];
 873         wait_on_buffer(bh);
 874         if (bh->b_uptodate)
 875                 return bh;
 876         brelse(bh);
 877         return NULL;
 878 }
 879 
 880 /*
 881  * See fs/inode.c for the weird use of volatile..
 882  */
 883 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 884 {
 885         struct wait_queue * wait;
 886 
 887         wait = ((volatile struct buffer_head *) bh)->b_wait;
 888         memset(bh,0,sizeof(*bh));
 889         ((volatile struct buffer_head *) bh)->b_wait = wait;
 890         bh->b_next_free = unused_list;
 891         unused_list = bh;
 892 }
 893 
 894 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 895 {
 896         int i;
 897         struct buffer_head * bh;
 898 
 899         if (unused_list)
 900                 return;
 901 
 902         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 903                 return;
 904 
 905         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 906                 bh->b_next_free = unused_list;  /* only make link */
 907                 unused_list = bh++;
 908         }
 909 }
 910 
 911 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 912 {
 913         struct buffer_head * bh;
 914 
 915         get_more_buffer_heads();
 916         if (!unused_list)
 917                 return NULL;
 918         bh = unused_list;
 919         unused_list = bh->b_next_free;
 920         bh->b_next_free = NULL;
 921         bh->b_data = NULL;
 922         bh->b_size = 0;
 923         bh->b_req = 0;
 924         return bh;
 925 }
 926 
 927 /*
 928  * Create the appropriate buffers when given a page for data area and
 929  * the size of each buffer.. Use the bh->b_this_page linked list to
 930  * follow the buffers created.  Return NULL if unable to create more
 931  * buffers.
 932  */
 933 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 934 {
 935         struct buffer_head *bh, *head;
 936         unsigned long offset;
 937 
 938         head = NULL;
 939         offset = PAGE_SIZE;
 940         while ((offset -= size) < PAGE_SIZE) {
 941                 bh = get_unused_buffer_head();
 942                 if (!bh)
 943                         goto no_grow;
 944                 bh->b_this_page = head;
 945                 head = bh;
 946                 bh->b_data = (char *) (page+offset);
 947                 bh->b_size = size;
 948                 bh->b_dev = 0xffff;  /* Flag as unused */
 949         }
 950         return head;
 951 /*
 952  * In case anything failed, we just free everything we got.
 953  */
 954 no_grow:
 955         bh = head;
 956         while (bh) {
 957                 head = bh;
 958                 bh = bh->b_this_page;
 959                 put_unused_buffer_head(head);
 960         }
 961         return NULL;
 962 }
 963 
 964 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 965 {
 966         int i;
 967         int bhnum = 0;
 968         struct buffer_head * bhr[8];
 969 
 970         for (i = 0 ; i < nrbuf ; i++) {
 971                 if (bh[i] && !bh[i]->b_uptodate)
 972                         bhr[bhnum++] = bh[i];
 973         }
 974         if (bhnum)
 975                 ll_rw_block(READ, bhnum, bhr);
 976         for (i = 0 ; i < nrbuf ; i++) {
 977                 if (bh[i]) {
 978                         wait_on_buffer(bh[i]);
 979                 }
 980         }
 981 }
 982 
 983 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 984         dev_t dev, int *b, int size)
 985 {
 986         struct buffer_head * bh[8];
 987         unsigned long page;
 988         unsigned long offset;
 989         int block;
 990         int nrbuf;
 991 
 992         page = (unsigned long) first->b_data;
 993         if (page & ~PAGE_MASK) {
 994                 brelse(first);
 995                 return 0;
 996         }
 997         mem_map[MAP_NR(page)]++;
 998         bh[0] = first;
 999         nrbuf = 1;
1000         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1001                 block = *++b;
1002                 if (!block)
1003                         goto no_go;
1004                 first = get_hash_table(dev, block, size);
1005                 if (!first)
1006                         goto no_go;
1007                 bh[nrbuf++] = first;
1008                 if (page+offset != (unsigned long) first->b_data)
1009                         goto no_go;
1010         }
1011         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1012         while (nrbuf-- > 0)
1013                 brelse(bh[nrbuf]);
1014         free_page(address);
1015         ++current->mm->min_flt;
1016         return page;
1017 no_go:
1018         while (nrbuf-- > 0)
1019                 brelse(bh[nrbuf]);
1020         free_page(page);
1021         return 0;
1022 }
1023 
1024 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1025         dev_t dev, int b[], int size)
1026 {
1027         struct buffer_head * bh, * tmp, * arr[8];
1028         unsigned long offset;
1029         int isize = BUFSIZE_INDEX(size);
1030         int * p;
1031         int block;
1032 
1033         bh = create_buffers(address, size);
1034         if (!bh)
1035                 return 0;
1036         /* do any of the buffers already exist? punt if so.. */
1037         p = b;
1038         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1039                 block = *(p++);
1040                 if (!block)
1041                         goto not_aligned;
1042                 if (find_buffer(dev, block, size))
1043                         goto not_aligned;
1044         }
1045         tmp = bh;
1046         p = b;
1047         block = 0;
1048         while (1) {
1049                 arr[block++] = bh;
1050                 bh->b_count = 1;
1051                 bh->b_dirt = 0;
1052                 bh->b_flushtime = 0;
1053                 bh->b_uptodate = 0;
1054                 bh->b_req = 0;
1055                 bh->b_dev = dev;
1056                 bh->b_blocknr = *(p++);
1057                 bh->b_list = BUF_CLEAN;
1058                 nr_buffers++;
1059                 nr_buffers_size[isize]++;
1060                 insert_into_queues(bh);
1061                 if (bh->b_this_page)
1062                         bh = bh->b_this_page;
1063                 else
1064                         break;
1065         }
1066         buffermem += PAGE_SIZE;
1067         bh->b_this_page = tmp;
1068         mem_map[MAP_NR(address)]++;
1069         buffer_pages[MAP_NR(address)] = bh;
1070         read_buffers(arr,block);
1071         while (block-- > 0)
1072                 brelse(arr[block]);
1073         ++current->mm->maj_flt;
1074         return address;
1075 not_aligned:
1076         while ((tmp = bh) != NULL) {
1077                 bh = bh->b_this_page;
1078                 put_unused_buffer_head(tmp);
1079         }
1080         return 0;
1081 }
1082 
1083 /*
1084  * Try-to-share-buffers tries to minimize memory use by trying to keep
1085  * both code pages and the buffer area in the same page. This is done by
1086  * (a) checking if the buffers are already aligned correctly in memory and
1087  * (b) if none of the buffer heads are in memory at all, trying to load
1088  * them into memory the way we want them.
1089  *
1090  * This doesn't guarantee that the memory is shared, but should under most
1091  * circumstances work very well indeed (ie >90% sharing of code pages on
1092  * demand-loadable executables).
1093  */
1094 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1095         dev_t dev, int *b, int size)
1096 {
1097         struct buffer_head * bh;
1098         int block;
1099 
1100         block = b[0];
1101         if (!block)
1102                 return 0;
1103         bh = get_hash_table(dev, block, size);
1104         if (bh)
1105                 return check_aligned(bh, address, dev, b, size);
1106         return try_to_load_aligned(address, dev, b, size);
1107 }
1108 
1109 /*
1110  * bread_page reads four buffers into memory at the desired address. It's
1111  * a function of its own, as there is some speed to be got by reading them
1112  * all at the same time, not waiting for one to be read, and then another
1113  * etc. This also allows us to optimize memory usage by sharing code pages
1114  * and filesystem buffers..
1115  */
1116 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1117 {
1118         struct buffer_head * bh[8];
1119         unsigned long where;
1120         int i, j;
1121 
1122         if (!no_share) {
1123                 where = try_to_share_buffers(address, dev, b, size);
1124                 if (where)
1125                         return where;
1126         }
1127         ++current->mm->maj_flt;
1128         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1129                 bh[i] = NULL;
1130                 if (b[i])
1131                         bh[i] = getblk(dev, b[i], size);
1132         }
1133         read_buffers(bh,i);
1134         where = address;
1135         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1136                 if (bh[i]) {
1137                         if (bh[i]->b_uptodate)
1138                                 memcpy((void *) where, bh[i]->b_data, size);
1139                         brelse(bh[i]);
1140                 }
1141         }
1142         return address;
1143 }
1144 
1145 /*
1146  * Try to increase the number of buffers available: the size argument
1147  * is used to determine what kind of buffers we want.
1148  */
1149 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1150 {
1151         unsigned long page;
1152         struct buffer_head *bh, *tmp;
1153         struct buffer_head * insert_point;
1154         int isize;
1155 
1156         if ((size & 511) || (size > PAGE_SIZE)) {
1157                 printk("VFS: grow_buffers: size = %d\n",size);
1158                 return 0;
1159         }
1160 
1161         isize = BUFSIZE_INDEX(size);
1162 
1163         if (!(page = __get_free_page(pri)))
1164                 return 0;
1165         bh = create_buffers(page, size);
1166         if (!bh) {
1167                 free_page(page);
1168                 return 0;
1169         }
1170 
1171         insert_point = free_list[isize];
1172 
1173         tmp = bh;
1174         while (1) {
1175                 nr_free[isize]++;
1176                 if (insert_point) {
1177                         tmp->b_next_free = insert_point->b_next_free;
1178                         tmp->b_prev_free = insert_point;
1179                         insert_point->b_next_free->b_prev_free = tmp;
1180                         insert_point->b_next_free = tmp;
1181                 } else {
1182                         tmp->b_prev_free = tmp;
1183                         tmp->b_next_free = tmp;
1184                 }
1185                 insert_point = tmp;
1186                 ++nr_buffers;
1187                 if (tmp->b_this_page)
1188                         tmp = tmp->b_this_page;
1189                 else
1190                         break;
1191         }
1192         free_list[isize] = bh;
1193         buffer_pages[MAP_NR(page)] = bh;
1194         tmp->b_this_page = bh;
1195         wake_up(&buffer_wait);
1196         buffermem += PAGE_SIZE;
1197         return 1;
1198 }
1199 
1200 /*
1201  * try_to_free() checks if all the buffers on this particular page
1202  * are unused, and free's the page if so.
1203  */
1204 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1205 {
1206         unsigned long page;
1207         struct buffer_head * tmp, * p;
1208         int isize = BUFSIZE_INDEX(bh->b_size);
1209 
1210         *bhp = bh;
1211         page = (unsigned long) bh->b_data;
1212         page &= PAGE_MASK;
1213         tmp = bh;
1214         do {
1215                 if (!tmp)
1216                         return 0;
1217                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1218                         return 0;
1219                 tmp = tmp->b_this_page;
1220         } while (tmp != bh);
1221         tmp = bh;
1222         do {
1223                 p = tmp;
1224                 tmp = tmp->b_this_page;
1225                 nr_buffers--;
1226                 nr_buffers_size[isize]--;
1227                 if (p == *bhp)
1228                   {
1229                     *bhp = p->b_prev_free;
1230                     if (p == *bhp) /* Was this the last in the list? */
1231                       *bhp = NULL;
1232                   }
1233                 remove_from_queues(p);
1234                 put_unused_buffer_head(p);
1235         } while (tmp != bh);
1236         buffermem -= PAGE_SIZE;
1237         buffer_pages[MAP_NR(page)] = NULL;
1238         free_page(page);
1239         return !mem_map[MAP_NR(page)];
1240 }
1241 
1242 
1243 /*
1244  * Consult the load average for buffers and decide whether or not
1245  * we should shrink the buffers of one size or not.  If we decide yes,
1246  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1247  * that is specified.
1248  *
1249  * I would prefer not to use a load average, but the way things are now it
1250  * seems unavoidable.  The way to get rid of it would be to force clustering
1251  * universally, so that when we reclaim buffers we always reclaim an entire
1252  * page.  Doing this would mean that we all need to move towards QMAGIC.
1253  */
1254 
1255 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1256 {          
1257         int nlist;
1258         int isize;
1259         int total_lav, total_n_buffers, n_sizes;
1260         
1261         /* Do not consider the shared buffers since they would not tend
1262            to have getblk called very often, and this would throw off
1263            the lav.  They are not easily reclaimable anyway (let the swapper
1264            make the first move). */
1265   
1266         total_lav = total_n_buffers = n_sizes = 0;
1267         for(nlist = 0; nlist < NR_SIZES; nlist++)
1268          {
1269                  total_lav += buffers_lav[nlist];
1270                  if(nr_buffers_size[nlist]) n_sizes++;
1271                  total_n_buffers += nr_buffers_size[nlist];
1272                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1273          }
1274         
1275         /* See if we have an excessive number of buffers of a particular
1276            size - if so, victimize that bunch. */
1277   
1278         isize = (size ? BUFSIZE_INDEX(size) : -1);
1279         
1280         if (n_sizes > 1)
1281                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1282                   {
1283                           if(nlist == isize) continue;
1284                           if(nr_buffers_size[nlist] &&
1285                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1286                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1287                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1288                                             return 1;
1289                   }
1290         return 0;
1291 }
1292 /*
1293  * Try to free up some pages by shrinking the buffer-cache
1294  *
1295  * Priority tells the routine how hard to try to shrink the
1296  * buffers: 3 means "don't bother too much", while a value
1297  * of 0 means "we'd better get some free pages now".
1298  */
1299 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1300 {
1301         if (priority < 2) {
1302                 sync_buffers(0,0);
1303         }
1304 
1305         if(priority == 2) wakeup_bdflush(1);
1306 
1307         if(maybe_shrink_lav_buffers(0)) return 1;
1308 
1309         /* No good candidate size - take any size we can find */
1310         return shrink_specific_buffers(priority, 0);
1311 }
1312 
1313 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1314 {
1315         struct buffer_head *bh;
1316         int nlist;
1317         int i, isize, isize1;
1318 
1319 #ifdef DEBUG
1320         if(size) printk("Shrinking buffers of size %d\n", size);
1321 #endif
1322         /* First try the free lists, and see if we can get a complete page
1323            from here */
1324         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1325 
1326         for(isize = 0; isize<NR_SIZES; isize++){
1327                 if(isize1 != -1 && isize1 != isize) continue;
1328                 bh = free_list[isize];
1329                 if(!bh) continue;
1330                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1331                         if (bh->b_count || !bh->b_this_page)
1332                                  continue;
1333                         if (try_to_free(bh, &bh))
1334                                  return 1;
1335                         if(!bh) break; /* Some interrupt must have used it after we
1336                                           freed the page.  No big deal - keep looking */
1337                 }
1338         }
1339         
1340         /* Not enough in the free lists, now try the lru list */
1341         
1342         for(nlist = 0; nlist < NR_LIST; nlist++) {
1343         repeat1:
1344                 if(priority > 3 && nlist == BUF_SHARED) continue;
1345                 bh = lru_list[nlist];
1346                 if(!bh) continue;
1347                 i = nr_buffers_type[nlist] >> priority;
1348                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1349                         /* We may have stalled while waiting for I/O to complete. */
1350                         if(bh->b_list != nlist) goto repeat1;
1351                         if (bh->b_count || !bh->b_this_page)
1352                                  continue;
1353                         if(size && bh->b_size != size) continue;
1354                         if (bh->b_lock)
1355                                  if (priority)
1356                                           continue;
1357                                  else
1358                                           wait_on_buffer(bh);
1359                         if (bh->b_dirt) {
1360                                 bh->b_count++;
1361                                 bh->b_flushtime = 0;
1362                                 ll_rw_block(WRITEA, 1, &bh);
1363                                 bh->b_count--;
1364                                 continue;
1365                         }
1366                         if (try_to_free(bh, &bh))
1367                                  return 1;
1368                         if(!bh) break;
1369                 }
1370         }
1371         return 0;
1372 }
1373 
1374 
1375 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1376 {
1377         struct buffer_head * bh;
1378         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1379         int shared;
1380         int nlist, isize;
1381 
1382         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1383         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1384         printk("Buffer blocks:   %6d\n",nr_buffers);
1385 
1386         for(nlist = 0; nlist < NR_LIST; nlist++) {
1387           shared = found = locked = dirty = used = lastused = 0;
1388           bh = lru_list[nlist];
1389           if(!bh) continue;
1390           do {
1391                 found++;
1392                 if (bh->b_lock)
1393                         locked++;
1394                 if (bh->b_dirt)
1395                         dirty++;
1396                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1397                 if (bh->b_count)
1398                         used++, lastused = found;
1399                 bh = bh->b_next_free;
1400               } while (bh != lru_list[nlist]);
1401         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1402                 nlist, found, used, lastused, locked, dirty, shared);
1403         };
1404         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1405         for(isize = 0; isize<NR_SIZES; isize++){
1406                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1407                        buffers_lav[isize], nr_free[isize]);
1408                 for(nlist = 0; nlist < NR_LIST; nlist++)
1409                          printk("%7d ", nr_buffers_st[isize][nlist]);
1410                 printk("\n");
1411         }
1412 }
1413 
1414 /*
1415  * try_to_reassign() checks if all the buffers on this particular page
1416  * are unused, and reassign to a new cluster them if this is true.
1417  */
1418 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1419                            dev_t dev, unsigned int starting_block)
1420 {
1421         unsigned long page;
1422         struct buffer_head * tmp, * p;
1423 
1424         *bhp = bh;
1425         page = (unsigned long) bh->b_data;
1426         page &= PAGE_MASK;
1427         if(mem_map[MAP_NR(page)] != 1) return 0;
1428         tmp = bh;
1429         do {
1430                 if (!tmp)
1431                          return 0;
1432                 
1433                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1434                          return 0;
1435                 tmp = tmp->b_this_page;
1436         } while (tmp != bh);
1437         tmp = bh;
1438         
1439         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1440                  tmp = tmp->b_this_page;
1441         
1442         /* This is the buffer at the head of the page */
1443         bh = tmp;
1444         do {
1445                 p = tmp;
1446                 tmp = tmp->b_this_page;
1447                 remove_from_queues(p);
1448                 p->b_dev=dev;
1449                 p->b_uptodate = 0;
1450                 p->b_req = 0;
1451                 p->b_blocknr=starting_block++;
1452                 insert_into_queues(p);
1453         } while (tmp != bh);
1454         return 1;
1455 }
1456 
1457 /*
1458  * Try to find a free cluster by locating a page where
1459  * all of the buffers are unused.  We would like this function
1460  * to be atomic, so we do not call anything that might cause
1461  * the process to sleep.  The priority is somewhat similar to
1462  * the priority used in shrink_buffers.
1463  * 
1464  * My thinking is that the kernel should end up using whole
1465  * pages for the buffer cache as much of the time as possible.
1466  * This way the other buffers on a particular page are likely
1467  * to be very near each other on the free list, and we will not
1468  * be expiring data prematurely.  For now we only cannibalize buffers
1469  * of the same size to keep the code simpler.
1470  */
1471 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1472                      unsigned int starting_block, int size)
1473 {
1474         struct buffer_head *bh;
1475         int isize = BUFSIZE_INDEX(size);
1476         int i;
1477 
1478         /* We want to give ourselves a really good shot at generating
1479            a cluster, and since we only take buffers from the free
1480            list, we "overfill" it a little. */
1481 
1482         while(nr_free[isize] < 32) refill_freelist(size);
1483 
1484         bh = free_list[isize];
1485         if(bh)
1486                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1487                          if (!bh->b_this_page)  continue;
1488                          if (try_to_reassign(bh, &bh, dev, starting_block))
1489                                  return 4;
1490                  }
1491         return 0;
1492 }
1493 
1494 /* This function tries to generate a new cluster of buffers
1495  * from a new page in memory.  We should only do this if we have
1496  * not expanded the buffer cache to the maximum size that we allow.
1497  */
1498 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1499 {
1500         struct buffer_head * bh, * tmp, * arr[8];
1501         int isize = BUFSIZE_INDEX(size);
1502         unsigned long offset;
1503         unsigned long page;
1504         int nblock;
1505 
1506         page = get_free_page(GFP_NOBUFFER);
1507         if(!page) return 0;
1508 
1509         bh = create_buffers(page, size);
1510         if (!bh) {
1511                 free_page(page);
1512                 return 0;
1513         };
1514         nblock = block;
1515         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1516                 if (find_buffer(dev, nblock++, size))
1517                          goto not_aligned;
1518         }
1519         tmp = bh;
1520         nblock = 0;
1521         while (1) {
1522                 arr[nblock++] = bh;
1523                 bh->b_count = 1;
1524                 bh->b_dirt = 0;
1525                 bh->b_flushtime = 0;
1526                 bh->b_lock = 0;
1527                 bh->b_uptodate = 0;
1528                 bh->b_req = 0;
1529                 bh->b_dev = dev;
1530                 bh->b_list = BUF_CLEAN;
1531                 bh->b_blocknr = block++;
1532                 nr_buffers++;
1533                 nr_buffers_size[isize]++;
1534                 insert_into_queues(bh);
1535                 if (bh->b_this_page)
1536                         bh = bh->b_this_page;
1537                 else
1538                         break;
1539         }
1540         buffermem += PAGE_SIZE;
1541         buffer_pages[MAP_NR(page)] = bh;
1542         bh->b_this_page = tmp;
1543         while (nblock-- > 0)
1544                 brelse(arr[nblock]);
1545         return 4;
1546 not_aligned:
1547         while ((tmp = bh) != NULL) {
1548                 bh = bh->b_this_page;
1549                 put_unused_buffer_head(tmp);
1550         }
1551         free_page(page);
1552         return 0;
1553 }
1554 
1555 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1556 {
1557         int i, offset;
1558         
1559         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1560                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1561                 if(find_buffer(dev, b[i], size)) return 0;
1562         };
1563 
1564         /* OK, we have a candidate for a new cluster */
1565         
1566         /* See if one size of buffer is over-represented in the buffer cache,
1567            if so reduce the numbers of buffers */
1568         if(maybe_shrink_lav_buffers(size))
1569          {
1570                  int retval;
1571                  retval = try_to_generate_cluster(dev, b[0], size);
1572                  if(retval) return retval;
1573          };
1574         
1575         if (nr_free_pages > min_free_pages) 
1576                  return try_to_generate_cluster(dev, b[0], size);
1577         else
1578                  return reassign_cluster(dev, b[0], size);
1579 }
1580 
1581 /*
1582  * This initializes the initial buffer free list.  nr_buffers_type is set
1583  * to one less the actual number of buffers, as a sop to backwards
1584  * compatibility --- the old code did this (I think unintentionally,
1585  * but I'm not sure), and programs in the ps package expect it.
1586  *                                      - TYT 8/30/92
1587  */
1588 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1589 {
1590         int i;
1591         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1592 
1593         if (high_memory >= 4*1024*1024) {
1594                 min_free_pages = 200;
1595                 if(high_memory >= 16*1024*1024)
1596                          nr_hash = 16381;
1597                 else
1598                          nr_hash = 4093;
1599         } else {
1600                 min_free_pages = 20;
1601                 nr_hash = 997;
1602         };
1603         
1604         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1605                                                      sizeof(struct buffer_head *));
1606 
1607 
1608         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1609                                                      sizeof(struct buffer_head *));
1610         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1611                 buffer_pages[i] = NULL;
1612 
1613         for (i = 0 ; i < nr_hash ; i++)
1614                 hash_table[i] = NULL;
1615         lru_list[BUF_CLEAN] = 0;
1616         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1617         if (!free_list[isize])
1618                 panic("VFS: Unable to initialize buffer free list!");
1619         return;
1620 }
1621 
1622 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1623  * response to dirty buffers.  Once this process is activated, we write back
1624  * a limited number of buffers to the disks and then go back to sleep again.
1625  * In effect this is a process which never leaves kernel mode, and does not have
1626  * any user memory associated with it except for the stack.  There is also
1627  * a kernel stack page, which obviously must be separate from the user stack.
1628  */
1629 struct wait_queue * bdflush_wait = NULL;
1630 struct wait_queue * bdflush_done = NULL;
1631 
1632 static int bdflush_running = 0;
1633 
1634 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1635 {
1636         if(!bdflush_running){
1637                 printk("Warning - bdflush not running\n");
1638                 sync_buffers(0,0);
1639                 return;
1640         };
1641         wake_up(&bdflush_wait);
1642         if(wait) sleep_on(&bdflush_done);
1643 }
1644 
1645 
1646 
1647 /* 
1648  * Here we attempt to write back old buffers.  We also try and flush inodes 
1649  * and supers as well, since this function is essentially "update", and 
1650  * otherwise there would be no way of ensuring that these quantities ever 
1651  * get written back.  Ideally, we would have a timestamp on the inodes
1652  * and superblocks so that we could write back only the old ones as well
1653  */
1654 
1655 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1656 {
1657         int i, isize;
1658         int ndirty, nwritten;
1659         int nlist;
1660         int ncount;
1661         struct buffer_head * bh, *next;
1662 
1663         sync_supers(0);
1664         sync_inodes(0);
1665 
1666         ncount = 0;
1667 #ifdef DEBUG
1668         for(nlist = 0; nlist < NR_LIST; nlist++)
1669 #else
1670         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1671 #endif
1672         {
1673                 ndirty = 0;
1674                 nwritten = 0;
1675         repeat:
1676                 bh = lru_list[nlist];
1677                 if(bh) 
1678                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1679                                  /* We may have stalled while waiting for I/O to complete. */
1680                                  if(bh->b_list != nlist) goto repeat;
1681                                  next = bh->b_next_free;
1682                                  if(!lru_list[nlist]) {
1683                                          printk("Dirty list empty %d\n", i);
1684                                          break;
1685                                  }
1686                                  
1687                                  /* Clean buffer on dirty list?  Refile it */
1688                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1689                                   {
1690                                           refile_buffer(bh);
1691                                           continue;
1692                                   }
1693                                  
1694                                  if (bh->b_lock || !bh->b_dirt)
1695                                           continue;
1696                                  ndirty++;
1697                                  if(bh->b_flushtime > jiffies) continue;
1698                                  nwritten++;
1699                                  bh->b_count++;
1700                                  bh->b_flushtime = 0;
1701 #ifdef DEBUG
1702                                  if(nlist != BUF_DIRTY) ncount++;
1703 #endif
1704                                  ll_rw_block(WRITE, 1, &bh);
1705                                  bh->b_count--;
1706                          }
1707         }
1708 #ifdef DEBUG
1709         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1710         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1711 #endif
1712         
1713         /* We assume that we only come through here on a regular
1714            schedule, like every 5 seconds.  Now update load averages.  
1715            Shift usage counts to prevent overflow. */
1716         for(isize = 0; isize<NR_SIZES; isize++){
1717                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1718                 buffer_usage[isize] = 0;
1719         };
1720         return 0;
1721 }
1722 
1723 
1724 /* This is the interface to bdflush.  As we get more sophisticated, we can
1725  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1726  * invoke this again after you have done this once, you would simply modify 
1727  * the tuning parameters.  We would want to verify each parameter, however,
1728  * to make sure that it is reasonable. */
1729 
1730 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1731 {
1732         int i, error;
1733         int ndirty;
1734         int nlist;
1735         int ncount;
1736         struct buffer_head * bh, *next;
1737 
1738         if (!suser())
1739                 return -EPERM;
1740 
1741         if (func == 1)
1742                  return sync_old_buffers();
1743 
1744         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1745         if (func >= 2) {
1746                 i = (func-2) >> 1;
1747                 if (i < 0 || i >= N_PARAM)
1748                         return -EINVAL;
1749                 if((func & 1) == 0) {
1750                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1751                         if (error)
1752                                 return error;
1753                         put_fs_long(bdf_prm.data[i], data);
1754                         return 0;
1755                 };
1756                 if (data < bdflush_min[i] || data > bdflush_max[i])
1757                         return -EINVAL;
1758                 bdf_prm.data[i] = data;
1759                 return 0;
1760         };
1761         
1762         if (bdflush_running)
1763                 return -EBUSY; /* Only one copy of this running at one time */
1764         bdflush_running++;
1765         
1766         /* OK, from here on is the daemon */
1767         
1768         for (;;) {
1769 #ifdef DEBUG
1770                 printk("bdflush() activated...");
1771 #endif
1772                 
1773                 ncount = 0;
1774 #ifdef DEBUG
1775                 for(nlist = 0; nlist < NR_LIST; nlist++)
1776 #else
1777                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1778 #endif
1779                  {
1780                          ndirty = 0;
1781                  repeat:
1782                          bh = lru_list[nlist];
1783                          if(bh) 
1784                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1785                                        bh = next) {
1786                                           /* We may have stalled while waiting for I/O to complete. */
1787                                           if(bh->b_list != nlist) goto repeat;
1788                                           next = bh->b_next_free;
1789                                           if(!lru_list[nlist]) {
1790                                                   printk("Dirty list empty %d\n", i);
1791                                                   break;
1792                                           }
1793                                           
1794                                           /* Clean buffer on dirty list?  Refile it */
1795                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1796                                            {
1797                                                    refile_buffer(bh);
1798                                                    continue;
1799                                            }
1800                                           
1801                                           if (bh->b_lock || !bh->b_dirt)
1802                                                    continue;
1803                                           /* Should we write back buffers that are shared or not??
1804                                              currently dirty buffers are not shared, so it does not matter */
1805                                           bh->b_count++;
1806                                           ndirty++;
1807                                           bh->b_flushtime = 0;
1808                                           ll_rw_block(WRITE, 1, &bh);
1809 #ifdef DEBUG
1810                                           if(nlist != BUF_DIRTY) ncount++;
1811 #endif
1812                                           bh->b_count--;
1813                                   }
1814                  }
1815 #ifdef DEBUG
1816                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1817                 printk("sleeping again.\n");
1818 #endif
1819                 wake_up(&bdflush_done);
1820                 
1821                 /* If there are still a lot of dirty buffers around, skip the sleep
1822                    and flush some more */
1823                 
1824                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1825                    bdf_prm.b_un.nfract/100) {
1826                         if (current->signal & (1 << (SIGKILL-1))) {
1827                                 bdflush_running--;
1828                                 return 0;
1829                         }
1830                         current->signal = 0;
1831                         interruptible_sleep_on(&bdflush_wait);
1832                 }
1833         }
1834 }
1835 
1836 
1837 /*
1838  * Overrides for Emacs so that we follow Linus's tabbing style.
1839  * Emacs will notice this stuff at the end of the file and automatically
1840  * adjust the settings for this buffer only.  This must remain at the end
1841  * of the file.
1842  * ---------------------------------------------------------------------------
1843  * Local variables:
1844  * c-indent-level: 8
1845  * c-brace-imaginary-offset: 0
1846  * c-brace-offset: -8
1847  * c-argdecl-indent: 8
1848  * c-label-offset: -8
1849  * c-continued-statement-offset: 8
1850  * c-continued-brace-offset: 0
1851  * End:
1852  */

/* [previous][next][first][last][top][bottom][index][help] */