root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. try_to_align
  32. check_aligned
  33. try_to_load_aligned
  34. try_to_share_buffers
  35. bread_page
  36. grow_buffers
  37. try_to_free
  38. maybe_shrink_lav_buffers
  39. shrink_buffers
  40. shrink_specific_buffers
  41. show_buffers
  42. try_to_reassign
  43. reassign_cluster
  44. try_to_generate_cluster
  45. generate_cluster
  46. buffer_init
  47. wakeup_bdflush
  48. sync_old_buffers
  49. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/sched.h>
  20 #include <linux/kernel.h>
  21 #include <linux/major.h>
  22 #include <linux/string.h>
  23 #include <linux/locks.h>
  24 #include <linux/errno.h>
  25 #include <linux/malloc.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/segment.h>
  29 #include <asm/io.h>
  30 
  31 #define NR_SIZES 4
  32 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  34 
  35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  36 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 extern int *blksize_size[];
  60 
  61 /* Here is the parameter block for the bdflush process. */
  62 static void wakeup_bdflush(int);
  63 
  64 #define N_PARAM 9
  65 #define LAV
  66 
  67 static union bdflush_param{
  68         struct {
  69                 int nfract;  /* Percentage of buffer cache dirty to 
  70                                 activate bdflush */
  71                 int ndirty;  /* Maximum number of dirty blocks to write out per
  72                                 wake-cycle */
  73                 int nrefill; /* Number of clean buffers to try and obtain
  74                                 each time we call refill */
  75                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  76                                   when trying to refill buffers. */
  77                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  78                                     search for free clusters */
  79                 int age_buffer;  /* Time for normal buffer to age before 
  80                                     we flush it */
  81                 int age_super;  /* Time for superblock to age before we 
  82                                    flush it */
  83                 int lav_const;  /* Constant used for load average (time
  84                                    constant */
  85                 int lav_ratio;  /* Used to determine how low a lav for a
  86                                    particular size can go before we start to
  87                                    trim back the buffers */
  88         } b_un;
  89         unsigned int data[N_PARAM];
  90 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  91 
  92 /* The lav constant is set for 1 minute, as long as the update process runs
  93    every 5 seconds.  If you change the frequency of update, the time
  94    constant will also change. */
  95 
  96 
  97 /* These are the min and max parameter values that we will allow to be assigned */
  98 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
  99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 100 
 101 /*
 102  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 103  * and getting rid of the cli-sti pairs. The wait-queue routines still
 104  * need cli-sti, but now it's just a couple of 386 instructions or so.
 105  *
 106  * Note that the real wait_on_buffer() is an inline function that checks
 107  * if 'b_wait' is set before calling this, so that the queues aren't set
 108  * up unnecessarily.
 109  */
 110 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         struct wait_queue wait = { current, NULL };
 113 
 114         bh->b_count++;
 115         add_wait_queue(&bh->b_wait, &wait);
 116 repeat:
 117         current->state = TASK_UNINTERRUPTIBLE;
 118         if (bh->b_lock) {
 119                 schedule();
 120                 goto repeat;
 121         }
 122         remove_wait_queue(&bh->b_wait, &wait);
 123         bh->b_count--;
 124         current->state = TASK_RUNNING;
 125 }
 126 
 127 /* Call sync_buffers with wait!=0 to ensure that the call does not
 128    return until all buffer writes have completed.  Sync() may return
 129    before the writes have finished; fsync() may not. */
 130 
 131 
 132 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 133    spontaneously dirty themselves without ever brelse being called.
 134    We will ultimately want to put these in a separate list, but for
 135    now we search all of the lists for dirty buffers */
 136 
 137 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         int i, retry, pass = 0, err = 0;
 140         int nlist, ncount;
 141         struct buffer_head * bh, *next;
 142 
 143         /* One pass for no-wait, three for wait:
 144            0) write out all dirty, unlocked buffers;
 145            1) write out all dirty buffers, waiting if locked;
 146            2) wait for completion by waiting for all buffers to unlock. */
 147  repeat:
 148         retry = 0;
 149  repeat2:
 150         ncount = 0;
 151         /* We search all lists as a failsafe mechanism, not because we expect
 152            there to be dirty buffers on any of the other lists. */
 153         for(nlist = 0; nlist < NR_LIST; nlist++)
 154          {
 155          repeat1:
 156                  bh = lru_list[nlist];
 157                  if(!bh) continue;
 158                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 159                          if(bh->b_list != nlist) goto repeat1;
 160                          next = bh->b_next_free;
 161                          if(!lru_list[nlist]) break;
 162                          if (dev && bh->b_dev != dev)
 163                                   continue;
 164                          if (bh->b_lock)
 165                           {
 166                                   /* Buffer is locked; skip it unless wait is
 167                                      requested AND pass > 0. */
 168                                   if (!wait || !pass) {
 169                                           retry = 1;
 170                                           continue;
 171                                   }
 172                                   wait_on_buffer (bh);
 173                                   goto repeat2;
 174                           }
 175                          /* If an unlocked buffer is not uptodate, there has
 176                              been an IO error. Skip it. */
 177                          if (wait && bh->b_req && !bh->b_lock &&
 178                              !bh->b_dirt && !bh->b_uptodate) {
 179                                   err = 1;
 180                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 181                                   continue;
 182                           }
 183                          /* Don't write clean buffers.  Don't write ANY buffers
 184                             on the third pass. */
 185                          if (!bh->b_dirt || pass>=2)
 186                                   continue;
 187                          /* don't bother about locked buffers */
 188                          if (bh->b_lock)
 189                                  continue;
 190                          bh->b_count++;
 191                          bh->b_flushtime = 0;
 192                          ll_rw_block(WRITE, 1, &bh);
 193 
 194                          if(nlist != BUF_DIRTY) { 
 195                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 196                                  ncount++;
 197                          };
 198                          bh->b_count--;
 199                          retry = 1;
 200                  }
 201          }
 202         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 203         
 204         /* If we are waiting for the sync to succeed, and if any dirty
 205            blocks were written, then repeat; on the second pass, only
 206            wait for buffers being written (do not pass to write any
 207            more buffers on the second pass). */
 208         if (wait && retry && ++pass<=2)
 209                  goto repeat;
 210         return err;
 211 }
 212 
 213 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 {
 215         sync_buffers(dev, 0);
 216         sync_supers(dev);
 217         sync_inodes(dev);
 218         sync_buffers(dev, 0);
 219 }
 220 
 221 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         sync_buffers(dev, 0);
 224         sync_supers(dev);
 225         sync_inodes(dev);
 226         return sync_buffers(dev, 1);
 227 }
 228 
 229 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         sync_dev(0);
 232         return 0;
 233 }
 234 
 235 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237         return fsync_dev(inode->i_dev);
 238 }
 239 
 240 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         struct file * file;
 243         struct inode * inode;
 244 
 245         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 246                 return -EBADF;
 247         if (!file->f_op || !file->f_op->fsync)
 248                 return -EINVAL;
 249         if (file->f_op->fsync(inode,file))
 250                 return -EIO;
 251         return 0;
 252 }
 253 
 254 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         int i;
 257         int nlist;
 258         struct buffer_head * bh;
 259 
 260         for(nlist = 0; nlist < NR_LIST; nlist++) {
 261                 bh = lru_list[nlist];
 262                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
 263                         if (bh->b_dev != dev)
 264                                 continue;
 265                         wait_on_buffer(bh);
 266                         if (bh->b_dev != dev)
 267                                 continue;
 268                         if (bh->b_count)
 269                                 continue;
 270                         bh->b_flushtime = bh->b_uptodate = 
 271                                 bh->b_dirt = bh->b_req = 0;
 272                 }
 273         }
 274 }
 275 
 276 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 277 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 278 
 279 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 280 {
 281         if (bh->b_next)
 282                 bh->b_next->b_prev = bh->b_prev;
 283         if (bh->b_prev)
 284                 bh->b_prev->b_next = bh->b_next;
 285         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 286                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 287         bh->b_next = bh->b_prev = NULL;
 288 }
 289 
 290 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 291 {
 292         if (!(bh->b_prev_free) || !(bh->b_next_free))
 293                 panic("VFS: LRU block list corrupted");
 294         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 295         bh->b_prev_free->b_next_free = bh->b_next_free;
 296         bh->b_next_free->b_prev_free = bh->b_prev_free;
 297 
 298         if (lru_list[bh->b_list] == bh)
 299                  lru_list[bh->b_list] = bh->b_next_free;
 300         if(lru_list[bh->b_list] == bh)
 301                  lru_list[bh->b_list] = NULL;
 302         bh->b_next_free = bh->b_prev_free = NULL;
 303 }
 304 
 305 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         int isize = BUFSIZE_INDEX(bh->b_size);
 308         if (!(bh->b_prev_free) || !(bh->b_next_free))
 309                 panic("VFS: Free block list corrupted");
 310         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 311         if(!free_list[isize])
 312                  panic("Free list empty");
 313         nr_free[isize]--;
 314         if(bh->b_next_free == bh)
 315                  free_list[isize] = NULL;
 316         else {
 317                 bh->b_prev_free->b_next_free = bh->b_next_free;
 318                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 319                 if (free_list[isize] == bh)
 320                          free_list[isize] = bh->b_next_free;
 321         };
 322         bh->b_next_free = bh->b_prev_free = NULL;
 323 }
 324 
 325 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327         if(bh->b_dev == 0xffff) {
 328                 remove_from_free_list(bh); /* Free list entries should not be
 329                                               in the hash queue */
 330                 return;
 331         };
 332         nr_buffers_type[bh->b_list]--;
 333         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 334         remove_from_hash_queue(bh);
 335         remove_from_lru_list(bh);
 336 }
 337 
 338 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340         if (!bh)
 341                 return;
 342         if (bh == lru_list[bh->b_list]) {
 343                 lru_list[bh->b_list] = bh->b_next_free;
 344                 return;
 345         }
 346         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 347         remove_from_lru_list(bh);
 348 /* add to back of free list */
 349 
 350         if(!lru_list[bh->b_list]) {
 351                 lru_list[bh->b_list] = bh;
 352                 lru_list[bh->b_list]->b_prev_free = bh;
 353         };
 354 
 355         bh->b_next_free = lru_list[bh->b_list];
 356         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 357         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 358         lru_list[bh->b_list]->b_prev_free = bh;
 359 }
 360 
 361 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 362 {
 363         int isize;
 364         if (!bh)
 365                 return;
 366 
 367         isize = BUFSIZE_INDEX(bh->b_size);      
 368         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 369 /* add to back of free list */
 370 
 371         if(!free_list[isize]) {
 372                 free_list[isize] = bh;
 373                 bh->b_prev_free = bh;
 374         };
 375 
 376         nr_free[isize]++;
 377         bh->b_next_free = free_list[isize];
 378         bh->b_prev_free = free_list[isize]->b_prev_free;
 379         free_list[isize]->b_prev_free->b_next_free = bh;
 380         free_list[isize]->b_prev_free = bh;
 381 }
 382 
 383 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385 /* put at end of free list */
 386 
 387         if(bh->b_dev == 0xffff) {
 388                 put_last_free(bh);
 389                 return;
 390         };
 391         if(!lru_list[bh->b_list]) {
 392                 lru_list[bh->b_list] = bh;
 393                 bh->b_prev_free = bh;
 394         };
 395         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 396         bh->b_next_free = lru_list[bh->b_list];
 397         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 398         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 399         lru_list[bh->b_list]->b_prev_free = bh;
 400         nr_buffers_type[bh->b_list]++;
 401         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 402 /* put the buffer in new hash-queue if it has a device */
 403         bh->b_prev = NULL;
 404         bh->b_next = NULL;
 405         if (!bh->b_dev)
 406                 return;
 407         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 408         hash(bh->b_dev,bh->b_blocknr) = bh;
 409         if (bh->b_next)
 410                 bh->b_next->b_prev = bh;
 411 }
 412 
 413 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 414 {               
 415         struct buffer_head * tmp;
 416 
 417         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 418                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 419                         if (tmp->b_size == size)
 420                                 return tmp;
 421                         else {
 422                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 423                                                         MAJOR(dev), MINOR(dev));
 424                                 return NULL;
 425                         }
 426         return NULL;
 427 }
 428 
 429 /*
 430  * Why like this, I hear you say... The reason is race-conditions.
 431  * As we don't lock buffers (unless we are reading them, that is),
 432  * something might happen to it while we sleep (ie a read-error
 433  * will force it bad). This shouldn't really happen currently, but
 434  * the code is ready.
 435  */
 436 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438         struct buffer_head * bh;
 439 
 440         for (;;) {
 441                 if (!(bh=find_buffer(dev,block,size)))
 442                         return NULL;
 443                 bh->b_count++;
 444                 wait_on_buffer(bh);
 445                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 446                         return bh;
 447                 bh->b_count--;
 448         }
 449 }
 450 
 451 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 452 {
 453         int i, nlist;
 454         struct buffer_head * bh, *bhnext;
 455 
 456         if (!blksize_size[MAJOR(dev)])
 457                 return;
 458 
 459         switch(size) {
 460                 default: panic("Invalid blocksize passed to set_blocksize");
 461                 case 512: case 1024: case 2048: case 4096:;
 462         }
 463 
 464         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 465                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 466                 return;
 467         }
 468         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 469                 return;
 470         sync_buffers(dev, 2);
 471         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 472 
 473   /* We need to be quite careful how we do this - we are moving entries
 474      around on the free list, and we can get in a loop if we are not careful.*/
 475 
 476         for(nlist = 0; nlist < NR_LIST; nlist++) {
 477                 bh = lru_list[nlist];
 478                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 479                         if(!bh) break;
 480                         bhnext = bh->b_next_free; 
 481                         if (bh->b_dev != dev)
 482                                  continue;
 483                         if (bh->b_size == size)
 484                                  continue;
 485                         
 486                         wait_on_buffer(bh);
 487                         if (bh->b_dev == dev && bh->b_size != size) {
 488                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 489                                          bh->b_flushtime = 0;
 490                         };
 491                         remove_from_hash_queue(bh);
 492                 }
 493         }
 494 }
 495 
 496 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 497 
 498 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 499 {
 500         struct buffer_head * bh, * tmp;
 501         struct buffer_head * candidate[NR_LIST];
 502         unsigned int best_time, winner;
 503         int isize = BUFSIZE_INDEX(size);
 504         int buffers[NR_LIST];
 505         int i;
 506         int needed;
 507 
 508         /* First see if we even need this.  Sometimes it is advantageous
 509          to request some blocks in a filesystem that we know that we will
 510          be needing ahead of time. */
 511 
 512         if (nr_free[isize] > 100)
 513                 return;
 514 
 515         /* If there are too many dirty buffers, we wake up the update process
 516            now so as to ensure that there are still clean buffers available
 517            for user processes to use (and dirty) */
 518         
 519         /* We are going to try and locate this much memory */
 520         needed =bdf_prm.b_un.nrefill * size;  
 521 
 522         while (nr_free_pages > min_free_pages*2 && needed > 0 &&
 523                grow_buffers(GFP_BUFFER, size)) {
 524                 needed -= PAGE_SIZE;
 525         }
 526 
 527         if(needed <= 0) return;
 528 
 529         /* See if there are too many buffers of a different size.
 530            If so, victimize them */
 531 
 532         while(maybe_shrink_lav_buffers(size))
 533          {
 534                  if(!grow_buffers(GFP_BUFFER, size)) break;
 535                  needed -= PAGE_SIZE;
 536                  if(needed <= 0) return;
 537          };
 538 
 539         /* OK, we cannot grow the buffer cache, now try and get some
 540            from the lru list */
 541 
 542         /* First set the candidate pointers to usable buffers.  This
 543            should be quick nearly all of the time. */
 544 
 545 repeat0:
 546         for(i=0; i<NR_LIST; i++){
 547                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 548                    nr_buffers_type[i] == 0) {
 549                         candidate[i] = NULL;
 550                         buffers[i] = 0;
 551                         continue;
 552                 }
 553                 buffers[i] = nr_buffers_type[i];
 554                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 555                  {
 556                          if(buffers[i] < 0) panic("Here is the problem");
 557                          tmp = bh->b_next_free;
 558                          if (!bh) break;
 559                          
 560                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 561                              bh->b_dirt) {
 562                                  refile_buffer(bh);
 563                                  continue;
 564                          };
 565                          
 566                          if (bh->b_count || bh->b_size != size)
 567                                   continue;
 568                          
 569                          /* Buffers are written in the order they are placed 
 570                             on the locked list. If we encounter a locked
 571                             buffer here, this means that the rest of them
 572                             are also locked */
 573                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 574                                  buffers[i] = 0;
 575                                  break;
 576                          }
 577                          
 578                          if (BADNESS(bh)) continue;
 579                          break;
 580                  };
 581                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 582                 else candidate[i] = bh;
 583                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 584         }
 585         
 586  repeat:
 587         if(needed <= 0) return;
 588         
 589         /* Now see which candidate wins the election */
 590         
 591         winner = best_time = UINT_MAX;  
 592         for(i=0; i<NR_LIST; i++){
 593                 if(!candidate[i]) continue;
 594                 if(candidate[i]->b_lru_time < best_time){
 595                         best_time = candidate[i]->b_lru_time;
 596                         winner = i;
 597                 }
 598         }
 599         
 600         /* If we have a winner, use it, and then get a new candidate from that list */
 601         if(winner != UINT_MAX) {
 602                 i = winner;
 603                 bh = candidate[i];
 604                 candidate[i] = bh->b_next_free;
 605                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 606                 if (bh->b_count || bh->b_size != size)
 607                          panic("Busy buffer in candidate list\n");
 608                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 609                          panic("Shared buffer in candidate list\n");
 610                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 611                 
 612                 if(bh->b_dev == 0xffff) panic("Wrong list");
 613                 remove_from_queues(bh);
 614                 bh->b_dev = 0xffff;
 615                 put_last_free(bh);
 616                 needed -= bh->b_size;
 617                 buffers[i]--;
 618                 if(buffers[i] < 0) panic("Here is the problem");
 619                 
 620                 if(buffers[i] == 0) candidate[i] = NULL;
 621                 
 622                 /* Now all we need to do is advance the candidate pointer
 623                    from the winner list to the next usable buffer */
 624                 if(candidate[i] && buffers[i] > 0){
 625                         if(buffers[i] <= 0) panic("Here is another problem");
 626                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 627                                 if(buffers[i] < 0) panic("Here is the problem");
 628                                 tmp = bh->b_next_free;
 629                                 if (!bh) break;
 630                                 
 631                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 632                                     bh->b_dirt) {
 633                                         refile_buffer(bh);
 634                                         continue;
 635                                 };
 636                                 
 637                                 if (bh->b_count || bh->b_size != size)
 638                                          continue;
 639                                 
 640                                 /* Buffers are written in the order they are
 641                                    placed on the locked list.  If we encounter
 642                                    a locked buffer here, this means that the
 643                                    rest of them are also locked */
 644                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 645                                         buffers[i] = 0;
 646                                         break;
 647                                 }
 648               
 649                                 if (BADNESS(bh)) continue;
 650                                 break;
 651                         };
 652                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 653                         else candidate[i] = bh;
 654                         if(candidate[i] && candidate[i]->b_count) 
 655                                  panic("Here is the problem");
 656                 }
 657                 
 658                 goto repeat;
 659         }
 660         
 661         if(needed <= 0) return;
 662         
 663         /* Too bad, that was not enough. Try a little harder to grow some. */
 664         
 665         if (nr_free_pages > 5) {
 666                 if (grow_buffers(GFP_BUFFER, size)) {
 667                         needed -= PAGE_SIZE;
 668                         goto repeat0;
 669                 };
 670         }
 671         
 672         /* and repeat until we find something good */
 673         if (!grow_buffers(GFP_ATOMIC, size))
 674                 wakeup_bdflush(1);
 675         needed -= PAGE_SIZE;
 676         goto repeat0;
 677 }
 678 
 679 /*
 680  * Ok, this is getblk, and it isn't very clear, again to hinder
 681  * race-conditions. Most of the code is seldom used, (ie repeating),
 682  * so it should be much more efficient than it looks.
 683  *
 684  * The algorithm is changed: hopefully better, and an elusive bug removed.
 685  *
 686  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 687  * when the filesystem starts to get full of dirty blocks (I hope).
 688  */
 689 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 690 {
 691         struct buffer_head * bh;
 692         int isize = BUFSIZE_INDEX(size);
 693 
 694         /* Update this for the buffer size lav. */
 695         buffer_usage[isize]++;
 696 
 697         /* If there are too many dirty buffers, we wake up the update process
 698            now so as to ensure that there are still clean buffers available
 699            for user processes to use (and dirty) */
 700 repeat:
 701         bh = get_hash_table(dev, block, size);
 702         if (bh) {
 703                 if (bh->b_uptodate && !bh->b_dirt)
 704                          put_last_lru(bh);
 705                 if(!bh->b_dirt) bh->b_flushtime = 0;
 706                 return bh;
 707         }
 708 
 709         while(!free_list[isize]) refill_freelist(size);
 710         
 711         if (find_buffer(dev,block,size))
 712                  goto repeat;
 713 
 714         bh = free_list[isize];
 715         remove_from_free_list(bh);
 716 
 717 /* OK, FINALLY we know that this buffer is the only one of its kind, */
 718 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 719         bh->b_count=1;
 720         bh->b_dirt=0;
 721         bh->b_lock=0;
 722         bh->b_uptodate=0;
 723         bh->b_flushtime = 0;
 724         bh->b_req=0;
 725         bh->b_dev=dev;
 726         bh->b_blocknr=block;
 727         insert_into_queues(bh);
 728         return bh;
 729 }
 730 
 731 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 732 {
 733         int newtime;
 734 
 735         if (buf->b_dirt){
 736                 /* Move buffer to dirty list if jiffies is clear */
 737                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 738                                      bdf_prm.b_un.age_buffer);
 739                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 740                          buf->b_flushtime = newtime;
 741         } else {
 742                 buf->b_flushtime = 0;
 743         }
 744 }
 745 
 746 
 747 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 748         int dispose;
 749         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 750         if (buf->b_dirt)
 751                 dispose = BUF_DIRTY;
 752         else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
 753                 dispose = BUF_SHARED;
 754         else if (buf->b_lock)
 755                 dispose = BUF_LOCKED;
 756         else if (buf->b_list == BUF_SHARED)
 757                 dispose = BUF_UNSHARED;
 758         else
 759                 dispose = BUF_CLEAN;
 760         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 761         if(dispose != buf->b_list)  {
 762                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 763                          buf->b_lru_time = jiffies;
 764                 if(dispose == BUF_LOCKED && 
 765                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 766                          dispose = BUF_LOCKED1;
 767                 remove_from_queues(buf);
 768                 buf->b_list = dispose;
 769                 insert_into_queues(buf);
 770                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 771                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 772                    bdf_prm.b_un.nfract/100)
 773                          wakeup_bdflush(0);
 774         }
 775 }
 776 
 777 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 778 {
 779         if (!buf)
 780                 return;
 781         wait_on_buffer(buf);
 782 
 783         /* If dirty, mark the time this buffer should be written back */
 784         set_writetime(buf, 0);
 785         refile_buffer(buf);
 786 
 787         if (buf->b_count) {
 788                 if (--buf->b_count)
 789                         return;
 790                 wake_up(&buffer_wait);
 791                 return;
 792         }
 793         printk("VFS: brelse: Trying to free free buffer\n");
 794 }
 795 
 796 /*
 797  * bread() reads a specified block and returns the buffer that contains
 798  * it. It returns NULL if the block was unreadable.
 799  */
 800 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 801 {
 802         struct buffer_head * bh;
 803 
 804         if (!(bh = getblk(dev, block, size))) {
 805                 printk("VFS: bread: READ error on device %d/%d\n",
 806                                                 MAJOR(dev), MINOR(dev));
 807                 return NULL;
 808         }
 809         if (bh->b_uptodate)
 810                 return bh;
 811         ll_rw_block(READ, 1, &bh);
 812         wait_on_buffer(bh);
 813         if (bh->b_uptodate)
 814                 return bh;
 815         brelse(bh);
 816         return NULL;
 817 }
 818 
 819 /*
 820  * Ok, breada can be used as bread, but additionally to mark other
 821  * blocks for reading as well. End the argument list with a negative
 822  * number.
 823  */
 824 
 825 #define NBUF 16
 826 
 827 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 828         unsigned int pos, unsigned int filesize)
 829 {
 830         struct buffer_head * bhlist[NBUF];
 831         unsigned int blocks;
 832         struct buffer_head * bh;
 833         int index;
 834         int i, j;
 835 
 836         if (pos >= filesize)
 837                 return NULL;
 838 
 839         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 840                 return NULL;
 841 
 842         index = BUFSIZE_INDEX(bh->b_size);
 843 
 844         if (bh->b_uptodate)
 845                 return bh;
 846 
 847         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 848 
 849         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 850                 blocks = read_ahead[MAJOR(dev)] >> index;
 851         if (blocks > NBUF)
 852                 blocks = NBUF;
 853         
 854         bhlist[0] = bh;
 855         j = 1;
 856         for(i=1; i<blocks; i++) {
 857                 bh = getblk(dev,block+i,bufsize);
 858                 if (bh->b_uptodate) {
 859                         brelse(bh);
 860                         break;
 861                 }
 862                 bhlist[j++] = bh;
 863         }
 864 
 865         /* Request the read for these buffers, and then release them */
 866         ll_rw_block(READ, j, bhlist);
 867 
 868         for(i=1; i<j; i++)
 869                 brelse(bhlist[i]);
 870 
 871         /* Wait for this buffer, and then continue on */
 872         bh = bhlist[0];
 873         wait_on_buffer(bh);
 874         if (bh->b_uptodate)
 875                 return bh;
 876         brelse(bh);
 877         return NULL;
 878 }
 879 
 880 /*
 881  * See fs/inode.c for the weird use of volatile..
 882  */
 883 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 884 {
 885         struct wait_queue * wait;
 886 
 887         wait = ((volatile struct buffer_head *) bh)->b_wait;
 888         memset(bh,0,sizeof(*bh));
 889         ((volatile struct buffer_head *) bh)->b_wait = wait;
 890         bh->b_next_free = unused_list;
 891         unused_list = bh;
 892 }
 893 
 894 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 895 {
 896         int i;
 897         struct buffer_head * bh;
 898 
 899         if (unused_list)
 900                 return;
 901 
 902         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 903                 return;
 904 
 905         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 906                 bh->b_next_free = unused_list;  /* only make link */
 907                 unused_list = bh++;
 908         }
 909 }
 910 
 911 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 912 {
 913         struct buffer_head * bh;
 914 
 915         get_more_buffer_heads();
 916         if (!unused_list)
 917                 return NULL;
 918         bh = unused_list;
 919         unused_list = bh->b_next_free;
 920         bh->b_next_free = NULL;
 921         bh->b_data = NULL;
 922         bh->b_size = 0;
 923         bh->b_req = 0;
 924         return bh;
 925 }
 926 
 927 /*
 928  * Create the appropriate buffers when given a page for data area and
 929  * the size of each buffer.. Use the bh->b_this_page linked list to
 930  * follow the buffers created.  Return NULL if unable to create more
 931  * buffers.
 932  */
 933 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 934 {
 935         struct buffer_head *bh, *head;
 936         unsigned long offset;
 937 
 938         head = NULL;
 939         offset = PAGE_SIZE;
 940         while ((offset -= size) < PAGE_SIZE) {
 941                 bh = get_unused_buffer_head();
 942                 if (!bh)
 943                         goto no_grow;
 944                 bh->b_this_page = head;
 945                 head = bh;
 946                 bh->b_data = (char *) (page+offset);
 947                 bh->b_size = size;
 948                 bh->b_dev = 0xffff;  /* Flag as unused */
 949         }
 950         return head;
 951 /*
 952  * In case anything failed, we just free everything we got.
 953  */
 954 no_grow:
 955         bh = head;
 956         while (bh) {
 957                 head = bh;
 958                 bh = bh->b_this_page;
 959                 put_unused_buffer_head(head);
 960         }
 961         return NULL;
 962 }
 963 
 964 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 965 {
 966         int i;
 967         int bhnum = 0;
 968         struct buffer_head * bhr[MAX_BUF_PER_PAGE];
 969 
 970         for (i = 0 ; i < nrbuf ; i++) {
 971                 if (bh[i] && !bh[i]->b_uptodate)
 972                         bhr[bhnum++] = bh[i];
 973         }
 974         if (bhnum)
 975                 ll_rw_block(READ, bhnum, bhr);
 976         for (i = 0 ; i < nrbuf ; i++) {
 977                 if (bh[i]) {
 978                         wait_on_buffer(bh[i]);
 979                 }
 980         }
 981 }
 982 
 983 /*
 984  * This actually gets enough info to try to align the stuff,
 985  * but we don't bother yet.. We'll have to check that nobody
 986  * else uses the buffers etc.
 987  *
 988  * "address" points to the new page we can use to move things
 989  * around..
 990  */
 991 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
     /* [previous][next][first][last][top][bottom][index][help] */
 992         unsigned long address)
 993 {
 994         while (nrbuf-- > 0)
 995                 brelse(bh[nrbuf]);
 996         return 0;
 997 }
 998 
 999 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1000         dev_t dev, int *b, int size)
1001 {
1002         struct buffer_head * bh[MAX_BUF_PER_PAGE];
1003         unsigned long page;
1004         unsigned long offset;
1005         int block;
1006         int nrbuf;
1007         int aligned = 1;
1008 
1009         bh[0] = first;
1010         nrbuf = 1;
1011         page = (unsigned long) first->b_data;
1012         if (page & ~PAGE_MASK)
1013                 aligned = 0;
1014         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1015                 block = *++b;
1016                 if (!block)
1017                         goto no_go;
1018                 first = get_hash_table(dev, block, size);
1019                 if (!first)
1020                         goto no_go;
1021                 bh[nrbuf++] = first;
1022                 if (page+offset != (unsigned long) first->b_data)
1023                         aligned = 0;
1024         }
1025         if (!aligned)
1026                 return try_to_align(bh, nrbuf, address);
1027         mem_map[MAP_NR(page)]++;
1028         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1029         while (nrbuf-- > 0)
1030                 brelse(bh[nrbuf]);
1031         free_page(address);
1032         ++current->mm->min_flt;
1033         return page;
1034 no_go:
1035         while (nrbuf-- > 0)
1036                 brelse(bh[nrbuf]);
1037         return 0;
1038 }
1039 
1040 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1041         dev_t dev, int b[], int size)
1042 {
1043         struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1044         unsigned long offset;
1045         int isize = BUFSIZE_INDEX(size);
1046         int * p;
1047         int block;
1048 
1049         bh = create_buffers(address, size);
1050         if (!bh)
1051                 return 0;
1052         /* do any of the buffers already exist? punt if so.. */
1053         p = b;
1054         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1055                 block = *(p++);
1056                 if (!block)
1057                         goto not_aligned;
1058                 if (find_buffer(dev, block, size))
1059                         goto not_aligned;
1060         }
1061         tmp = bh;
1062         p = b;
1063         block = 0;
1064         while (1) {
1065                 arr[block++] = bh;
1066                 bh->b_count = 1;
1067                 bh->b_dirt = 0;
1068                 bh->b_flushtime = 0;
1069                 bh->b_uptodate = 0;
1070                 bh->b_req = 0;
1071                 bh->b_dev = dev;
1072                 bh->b_blocknr = *(p++);
1073                 bh->b_list = BUF_CLEAN;
1074                 nr_buffers++;
1075                 nr_buffers_size[isize]++;
1076                 insert_into_queues(bh);
1077                 if (bh->b_this_page)
1078                         bh = bh->b_this_page;
1079                 else
1080                         break;
1081         }
1082         buffermem += PAGE_SIZE;
1083         bh->b_this_page = tmp;
1084         mem_map[MAP_NR(address)]++;
1085         buffer_pages[MAP_NR(address)] = bh;
1086         read_buffers(arr,block);
1087         while (block-- > 0)
1088                 brelse(arr[block]);
1089         ++current->mm->maj_flt;
1090         return address;
1091 not_aligned:
1092         while ((tmp = bh) != NULL) {
1093                 bh = bh->b_this_page;
1094                 put_unused_buffer_head(tmp);
1095         }
1096         return 0;
1097 }
1098 
1099 /*
1100  * Try-to-share-buffers tries to minimize memory use by trying to keep
1101  * both code pages and the buffer area in the same page. This is done by
1102  * (a) checking if the buffers are already aligned correctly in memory and
1103  * (b) if none of the buffer heads are in memory at all, trying to load
1104  * them into memory the way we want them.
1105  *
1106  * This doesn't guarantee that the memory is shared, but should under most
1107  * circumstances work very well indeed (ie >90% sharing of code pages on
1108  * demand-loadable executables).
1109  */
1110 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1111         dev_t dev, int *b, int size)
1112 {
1113         struct buffer_head * bh;
1114         int block;
1115 
1116         block = b[0];
1117         if (!block)
1118                 return 0;
1119         bh = get_hash_table(dev, block, size);
1120         if (bh)
1121                 return check_aligned(bh, address, dev, b, size);
1122         return try_to_load_aligned(address, dev, b, size);
1123 }
1124 
1125 /*
1126  * bread_page reads four buffers into memory at the desired address. It's
1127  * a function of its own, as there is some speed to be got by reading them
1128  * all at the same time, not waiting for one to be read, and then another
1129  * etc. This also allows us to optimize memory usage by sharing code pages
1130  * and filesystem buffers..
1131  */
1132 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1133 {
1134         struct buffer_head * bh[MAX_BUF_PER_PAGE];
1135         unsigned long where;
1136         int i, j;
1137 
1138         if (!no_share) {
1139                 where = try_to_share_buffers(address, dev, b, size);
1140                 if (where)
1141                         return where;
1142         }
1143         ++current->mm->maj_flt;
1144         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1145                 bh[i] = NULL;
1146                 if (b[i])
1147                         bh[i] = getblk(dev, b[i], size);
1148         }
1149         read_buffers(bh,i);
1150         where = address;
1151         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1152                 if (bh[i]) {
1153                         if (bh[i]->b_uptodate)
1154                                 memcpy((void *) where, bh[i]->b_data, size);
1155                         brelse(bh[i]);
1156                 }
1157         }
1158         return address;
1159 }
1160 
1161 /*
1162  * Try to increase the number of buffers available: the size argument
1163  * is used to determine what kind of buffers we want.
1164  */
1165 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1166 {
1167         unsigned long page;
1168         struct buffer_head *bh, *tmp;
1169         struct buffer_head * insert_point;
1170         int isize;
1171 
1172         if ((size & 511) || (size > PAGE_SIZE)) {
1173                 printk("VFS: grow_buffers: size = %d\n",size);
1174                 return 0;
1175         }
1176 
1177         isize = BUFSIZE_INDEX(size);
1178 
1179         if (!(page = __get_free_page(pri)))
1180                 return 0;
1181         bh = create_buffers(page, size);
1182         if (!bh) {
1183                 free_page(page);
1184                 return 0;
1185         }
1186 
1187         insert_point = free_list[isize];
1188 
1189         tmp = bh;
1190         while (1) {
1191                 nr_free[isize]++;
1192                 if (insert_point) {
1193                         tmp->b_next_free = insert_point->b_next_free;
1194                         tmp->b_prev_free = insert_point;
1195                         insert_point->b_next_free->b_prev_free = tmp;
1196                         insert_point->b_next_free = tmp;
1197                 } else {
1198                         tmp->b_prev_free = tmp;
1199                         tmp->b_next_free = tmp;
1200                 }
1201                 insert_point = tmp;
1202                 ++nr_buffers;
1203                 if (tmp->b_this_page)
1204                         tmp = tmp->b_this_page;
1205                 else
1206                         break;
1207         }
1208         free_list[isize] = bh;
1209         buffer_pages[MAP_NR(page)] = bh;
1210         tmp->b_this_page = bh;
1211         wake_up(&buffer_wait);
1212         buffermem += PAGE_SIZE;
1213         return 1;
1214 }
1215 
1216 
1217 /* =========== Reduce the buffer memory ============= */
1218 
1219 /*
1220  * try_to_free() checks if all the buffers on this particular page
1221  * are unused, and free's the page if so.
1222  */
1223 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1224 {
1225         unsigned long page;
1226         struct buffer_head * tmp, * p;
1227         int isize = BUFSIZE_INDEX(bh->b_size);
1228 
1229         *bhp = bh;
1230         page = (unsigned long) bh->b_data;
1231         page &= PAGE_MASK;
1232         tmp = bh;
1233         do {
1234                 if (!tmp)
1235                         return 0;
1236                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1237                         return 0;
1238                 tmp = tmp->b_this_page;
1239         } while (tmp != bh);
1240         tmp = bh;
1241         do {
1242                 p = tmp;
1243                 tmp = tmp->b_this_page;
1244                 nr_buffers--;
1245                 nr_buffers_size[isize]--;
1246                 if (p == *bhp)
1247                   {
1248                     *bhp = p->b_prev_free;
1249                     if (p == *bhp) /* Was this the last in the list? */
1250                       *bhp = NULL;
1251                   }
1252                 remove_from_queues(p);
1253                 put_unused_buffer_head(p);
1254         } while (tmp != bh);
1255         buffermem -= PAGE_SIZE;
1256         buffer_pages[MAP_NR(page)] = NULL;
1257         free_page(page);
1258         return !mem_map[MAP_NR(page)];
1259 }
1260 
1261 
1262 /*
1263  * Consult the load average for buffers and decide whether or not
1264  * we should shrink the buffers of one size or not.  If we decide yes,
1265  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1266  * that is specified.
1267  *
1268  * I would prefer not to use a load average, but the way things are now it
1269  * seems unavoidable.  The way to get rid of it would be to force clustering
1270  * universally, so that when we reclaim buffers we always reclaim an entire
1271  * page.  Doing this would mean that we all need to move towards QMAGIC.
1272  */
1273 
1274 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1275 {          
1276         int nlist;
1277         int isize;
1278         int total_lav, total_n_buffers, n_sizes;
1279         
1280         /* Do not consider the shared buffers since they would not tend
1281            to have getblk called very often, and this would throw off
1282            the lav.  They are not easily reclaimable anyway (let the swapper
1283            make the first move). */
1284   
1285         total_lav = total_n_buffers = n_sizes = 0;
1286         for(nlist = 0; nlist < NR_SIZES; nlist++)
1287          {
1288                  total_lav += buffers_lav[nlist];
1289                  if(nr_buffers_size[nlist]) n_sizes++;
1290                  total_n_buffers += nr_buffers_size[nlist];
1291                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1292          }
1293         
1294         /* See if we have an excessive number of buffers of a particular
1295            size - if so, victimize that bunch. */
1296   
1297         isize = (size ? BUFSIZE_INDEX(size) : -1);
1298         
1299         if (n_sizes > 1)
1300                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1301                   {
1302                           if(nlist == isize) continue;
1303                           if(nr_buffers_size[nlist] &&
1304                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1305                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1306                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1307                                             return 1;
1308                   }
1309         return 0;
1310 }
1311 /*
1312  * Try to free up some pages by shrinking the buffer-cache
1313  *
1314  * Priority tells the routine how hard to try to shrink the
1315  * buffers: 3 means "don't bother too much", while a value
1316  * of 0 means "we'd better get some free pages now".
1317  */
1318 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1319 {
1320         if (priority < 2) {
1321                 sync_buffers(0,0);
1322         }
1323 
1324         if(priority == 2) wakeup_bdflush(1);
1325 
1326         if(maybe_shrink_lav_buffers(0)) return 1;
1327 
1328         /* No good candidate size - take any size we can find */
1329         return shrink_specific_buffers(priority, 0);
1330 }
1331 
1332 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1333 {
1334         struct buffer_head *bh;
1335         int nlist;
1336         int i, isize, isize1;
1337 
1338 #ifdef DEBUG
1339         if(size) printk("Shrinking buffers of size %d\n", size);
1340 #endif
1341         /* First try the free lists, and see if we can get a complete page
1342            from here */
1343         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1344 
1345         for(isize = 0; isize<NR_SIZES; isize++){
1346                 if(isize1 != -1 && isize1 != isize) continue;
1347                 bh = free_list[isize];
1348                 if(!bh) continue;
1349                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1350                         if (bh->b_count || !bh->b_this_page)
1351                                  continue;
1352                         if (try_to_free(bh, &bh))
1353                                  return 1;
1354                         if(!bh) break; /* Some interrupt must have used it after we
1355                                           freed the page.  No big deal - keep looking */
1356                 }
1357         }
1358         
1359         /* Not enough in the free lists, now try the lru list */
1360         
1361         for(nlist = 0; nlist < NR_LIST; nlist++) {
1362         repeat1:
1363                 if(priority > 3 && nlist == BUF_SHARED) continue;
1364                 bh = lru_list[nlist];
1365                 if(!bh) continue;
1366                 i = 2*nr_buffers_type[nlist] >> priority;
1367                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1368                         /* We may have stalled while waiting for I/O to complete. */
1369                         if(bh->b_list != nlist) goto repeat1;
1370                         if (bh->b_count || !bh->b_this_page)
1371                                  continue;
1372                         if(size && bh->b_size != size) continue;
1373                         if (bh->b_lock)
1374                                  if (priority)
1375                                           continue;
1376                                  else
1377                                           wait_on_buffer(bh);
1378                         if (bh->b_dirt) {
1379                                 bh->b_count++;
1380                                 bh->b_flushtime = 0;
1381                                 ll_rw_block(WRITEA, 1, &bh);
1382                                 bh->b_count--;
1383                                 continue;
1384                         }
1385                         if (try_to_free(bh, &bh))
1386                                  return 1;
1387                         if(!bh) break;
1388                 }
1389         }
1390         return 0;
1391 }
1392 
1393 
1394 /* ================== Debugging =================== */
1395 
1396 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1397 {
1398         struct buffer_head * bh;
1399         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1400         int shared;
1401         int nlist, isize;
1402 
1403         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1404         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1405         printk("Buffer blocks:   %6d\n",nr_buffers);
1406 
1407         for(nlist = 0; nlist < NR_LIST; nlist++) {
1408           shared = found = locked = dirty = used = lastused = 0;
1409           bh = lru_list[nlist];
1410           if(!bh) continue;
1411           do {
1412                 found++;
1413                 if (bh->b_lock)
1414                         locked++;
1415                 if (bh->b_dirt)
1416                         dirty++;
1417                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1418                 if (bh->b_count)
1419                         used++, lastused = found;
1420                 bh = bh->b_next_free;
1421               } while (bh != lru_list[nlist]);
1422         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1423                 nlist, found, used, lastused, locked, dirty, shared);
1424         };
1425         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1426         for(isize = 0; isize<NR_SIZES; isize++){
1427                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1428                        buffers_lav[isize], nr_free[isize]);
1429                 for(nlist = 0; nlist < NR_LIST; nlist++)
1430                          printk("%7d ", nr_buffers_st[isize][nlist]);
1431                 printk("\n");
1432         }
1433 }
1434 
1435 
1436 /* ====================== Cluster patches for ext2 ==================== */
1437 
1438 /*
1439  * try_to_reassign() checks if all the buffers on this particular page
1440  * are unused, and reassign to a new cluster them if this is true.
1441  */
1442 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1443                            dev_t dev, unsigned int starting_block)
1444 {
1445         unsigned long page;
1446         struct buffer_head * tmp, * p;
1447 
1448         *bhp = bh;
1449         page = (unsigned long) bh->b_data;
1450         page &= PAGE_MASK;
1451         if(mem_map[MAP_NR(page)] != 1) return 0;
1452         tmp = bh;
1453         do {
1454                 if (!tmp)
1455                          return 0;
1456                 
1457                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1458                          return 0;
1459                 tmp = tmp->b_this_page;
1460         } while (tmp != bh);
1461         tmp = bh;
1462         
1463         while((unsigned long) tmp->b_data & (PAGE_SIZE - 1)) 
1464                  tmp = tmp->b_this_page;
1465         
1466         /* This is the buffer at the head of the page */
1467         bh = tmp;
1468         do {
1469                 p = tmp;
1470                 tmp = tmp->b_this_page;
1471                 remove_from_queues(p);
1472                 p->b_dev=dev;
1473                 p->b_uptodate = 0;
1474                 p->b_req = 0;
1475                 p->b_blocknr=starting_block++;
1476                 insert_into_queues(p);
1477         } while (tmp != bh);
1478         return 1;
1479 }
1480 
1481 /*
1482  * Try to find a free cluster by locating a page where
1483  * all of the buffers are unused.  We would like this function
1484  * to be atomic, so we do not call anything that might cause
1485  * the process to sleep.  The priority is somewhat similar to
1486  * the priority used in shrink_buffers.
1487  * 
1488  * My thinking is that the kernel should end up using whole
1489  * pages for the buffer cache as much of the time as possible.
1490  * This way the other buffers on a particular page are likely
1491  * to be very near each other on the free list, and we will not
1492  * be expiring data prematurely.  For now we only cannibalize buffers
1493  * of the same size to keep the code simpler.
1494  */
1495 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1496                      unsigned int starting_block, int size)
1497 {
1498         struct buffer_head *bh;
1499         int isize = BUFSIZE_INDEX(size);
1500         int i;
1501 
1502         /* We want to give ourselves a really good shot at generating
1503            a cluster, and since we only take buffers from the free
1504            list, we "overfill" it a little. */
1505 
1506         while(nr_free[isize] < 32) refill_freelist(size);
1507 
1508         bh = free_list[isize];
1509         if(bh)
1510                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1511                          if (!bh->b_this_page)  continue;
1512                          if (try_to_reassign(bh, &bh, dev, starting_block))
1513                                  return 4;
1514                  }
1515         return 0;
1516 }
1517 
1518 /* This function tries to generate a new cluster of buffers
1519  * from a new page in memory.  We should only do this if we have
1520  * not expanded the buffer cache to the maximum size that we allow.
1521  */
1522 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1523 {
1524         struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1525         int isize = BUFSIZE_INDEX(size);
1526         unsigned long offset;
1527         unsigned long page;
1528         int nblock;
1529 
1530         page = get_free_page(GFP_NOBUFFER);
1531         if(!page) return 0;
1532 
1533         bh = create_buffers(page, size);
1534         if (!bh) {
1535                 free_page(page);
1536                 return 0;
1537         };
1538         nblock = block;
1539         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1540                 if (find_buffer(dev, nblock++, size))
1541                          goto not_aligned;
1542         }
1543         tmp = bh;
1544         nblock = 0;
1545         while (1) {
1546                 arr[nblock++] = bh;
1547                 bh->b_count = 1;
1548                 bh->b_dirt = 0;
1549                 bh->b_flushtime = 0;
1550                 bh->b_lock = 0;
1551                 bh->b_uptodate = 0;
1552                 bh->b_req = 0;
1553                 bh->b_dev = dev;
1554                 bh->b_list = BUF_CLEAN;
1555                 bh->b_blocknr = block++;
1556                 nr_buffers++;
1557                 nr_buffers_size[isize]++;
1558                 insert_into_queues(bh);
1559                 if (bh->b_this_page)
1560                         bh = bh->b_this_page;
1561                 else
1562                         break;
1563         }
1564         buffermem += PAGE_SIZE;
1565         buffer_pages[MAP_NR(page)] = bh;
1566         bh->b_this_page = tmp;
1567         while (nblock-- > 0)
1568                 brelse(arr[nblock]);
1569         return 4; /* ?? */
1570 not_aligned:
1571         while ((tmp = bh) != NULL) {
1572                 bh = bh->b_this_page;
1573                 put_unused_buffer_head(tmp);
1574         }
1575         free_page(page);
1576         return 0;
1577 }
1578 
1579 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1580 {
1581         int i, offset;
1582         
1583         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1584                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1585                 if(find_buffer(dev, b[i], size)) return 0;
1586         };
1587 
1588         /* OK, we have a candidate for a new cluster */
1589         
1590         /* See if one size of buffer is over-represented in the buffer cache,
1591            if so reduce the numbers of buffers */
1592         if(maybe_shrink_lav_buffers(size))
1593          {
1594                  int retval;
1595                  retval = try_to_generate_cluster(dev, b[0], size);
1596                  if(retval) return retval;
1597          };
1598         
1599         if (nr_free_pages > min_free_pages*2) 
1600                  return try_to_generate_cluster(dev, b[0], size);
1601         else
1602                  return reassign_cluster(dev, b[0], size);
1603 }
1604 
1605 
1606 /* ===================== Init ======================= */
1607 
1608 /*
1609  * This initializes the initial buffer free list.  nr_buffers_type is set
1610  * to one less the actual number of buffers, as a sop to backwards
1611  * compatibility --- the old code did this (I think unintentionally,
1612  * but I'm not sure), and programs in the ps package expect it.
1613  *                                      - TYT 8/30/92
1614  */
1615 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1616 {
1617         int i;
1618         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1619 
1620         if (high_memory >= 4*1024*1024) {
1621                 if(high_memory >= 16*1024*1024)
1622                          nr_hash = 16381;
1623                 else
1624                          nr_hash = 4093;
1625         } else {
1626                 nr_hash = 997;
1627         };
1628         
1629         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1630                                                      sizeof(struct buffer_head *));
1631 
1632 
1633         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1634                                                      sizeof(struct buffer_head *));
1635         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1636                 buffer_pages[i] = NULL;
1637 
1638         for (i = 0 ; i < nr_hash ; i++)
1639                 hash_table[i] = NULL;
1640         lru_list[BUF_CLEAN] = 0;
1641         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1642         if (!free_list[isize])
1643                 panic("VFS: Unable to initialize buffer free list!");
1644         return;
1645 }
1646 
1647 
1648 /* ====================== bdflush support =================== */
1649 
1650 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1651  * response to dirty buffers.  Once this process is activated, we write back
1652  * a limited number of buffers to the disks and then go back to sleep again.
1653  * In effect this is a process which never leaves kernel mode, and does not have
1654  * any user memory associated with it except for the stack.  There is also
1655  * a kernel stack page, which obviously must be separate from the user stack.
1656  */
1657 struct wait_queue * bdflush_wait = NULL;
1658 struct wait_queue * bdflush_done = NULL;
1659 
1660 static int bdflush_running = 0;
1661 
1662 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1663 {
1664         if(!bdflush_running){
1665                 printk("Warning - bdflush not running\n");
1666                 sync_buffers(0,0);
1667                 return;
1668         };
1669         wake_up(&bdflush_wait);
1670         if(wait) sleep_on(&bdflush_done);
1671 }
1672 
1673 
1674 
1675 /* 
1676  * Here we attempt to write back old buffers.  We also try and flush inodes 
1677  * and supers as well, since this function is essentially "update", and 
1678  * otherwise there would be no way of ensuring that these quantities ever 
1679  * get written back.  Ideally, we would have a timestamp on the inodes
1680  * and superblocks so that we could write back only the old ones as well
1681  */
1682 
1683 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1684 {
1685         int i, isize;
1686         int ndirty, nwritten;
1687         int nlist;
1688         int ncount;
1689         struct buffer_head * bh, *next;
1690 
1691         sync_supers(0);
1692         sync_inodes(0);
1693 
1694         ncount = 0;
1695 #ifdef DEBUG
1696         for(nlist = 0; nlist < NR_LIST; nlist++)
1697 #else
1698         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1699 #endif
1700         {
1701                 ndirty = 0;
1702                 nwritten = 0;
1703         repeat:
1704                 bh = lru_list[nlist];
1705                 if(bh) 
1706                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1707                                  /* We may have stalled while waiting for I/O to complete. */
1708                                  if(bh->b_list != nlist) goto repeat;
1709                                  next = bh->b_next_free;
1710                                  if(!lru_list[nlist]) {
1711                                          printk("Dirty list empty %d\n", i);
1712                                          break;
1713                                  }
1714                                  
1715                                  /* Clean buffer on dirty list?  Refile it */
1716                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1717                                   {
1718                                           refile_buffer(bh);
1719                                           continue;
1720                                   }
1721                                  
1722                                  if (bh->b_lock || !bh->b_dirt)
1723                                           continue;
1724                                  ndirty++;
1725                                  if(bh->b_flushtime > jiffies) continue;
1726                                  nwritten++;
1727                                  bh->b_count++;
1728                                  bh->b_flushtime = 0;
1729 #ifdef DEBUG
1730                                  if(nlist != BUF_DIRTY) ncount++;
1731 #endif
1732                                  ll_rw_block(WRITE, 1, &bh);
1733                                  bh->b_count--;
1734                          }
1735         }
1736 #ifdef DEBUG
1737         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1738         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1739 #endif
1740         
1741         /* We assume that we only come through here on a regular
1742            schedule, like every 5 seconds.  Now update load averages.  
1743            Shift usage counts to prevent overflow. */
1744         for(isize = 0; isize<NR_SIZES; isize++){
1745                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1746                 buffer_usage[isize] = 0;
1747         };
1748         return 0;
1749 }
1750 
1751 
1752 /* This is the interface to bdflush.  As we get more sophisticated, we can
1753  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1754  * invoke this again after you have done this once, you would simply modify 
1755  * the tuning parameters.  We would want to verify each parameter, however,
1756  * to make sure that it is reasonable. */
1757 
1758 asmlinkage int sys_bdflush(int func, long data)
     /* [previous][next][first][last][top][bottom][index][help] */
1759 {
1760         int i, error;
1761         int ndirty;
1762         int nlist;
1763         int ncount;
1764         struct buffer_head * bh, *next;
1765 
1766         if (!suser())
1767                 return -EPERM;
1768 
1769         if (func == 1)
1770                  return sync_old_buffers();
1771 
1772         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1773         if (func >= 2) {
1774                 i = (func-2) >> 1;
1775                 if (i < 0 || i >= N_PARAM)
1776                         return -EINVAL;
1777                 if((func & 1) == 0) {
1778                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1779                         if (error)
1780                                 return error;
1781                         put_fs_long(bdf_prm.data[i], data);
1782                         return 0;
1783                 };
1784                 if (data < bdflush_min[i] || data > bdflush_max[i])
1785                         return -EINVAL;
1786                 bdf_prm.data[i] = data;
1787                 return 0;
1788         };
1789         
1790         if (bdflush_running)
1791                 return -EBUSY; /* Only one copy of this running at one time */
1792         bdflush_running++;
1793         
1794         /* OK, from here on is the daemon */
1795         
1796         for (;;) {
1797 #ifdef DEBUG
1798                 printk("bdflush() activated...");
1799 #endif
1800                 
1801                 ncount = 0;
1802 #ifdef DEBUG
1803                 for(nlist = 0; nlist < NR_LIST; nlist++)
1804 #else
1805                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1806 #endif
1807                  {
1808                          ndirty = 0;
1809                  repeat:
1810                          bh = lru_list[nlist];
1811                          if(bh) 
1812                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1813                                        bh = next) {
1814                                           /* We may have stalled while waiting for I/O to complete. */
1815                                           if(bh->b_list != nlist) goto repeat;
1816                                           next = bh->b_next_free;
1817                                           if(!lru_list[nlist]) {
1818                                                   printk("Dirty list empty %d\n", i);
1819                                                   break;
1820                                           }
1821                                           
1822                                           /* Clean buffer on dirty list?  Refile it */
1823                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1824                                            {
1825                                                    refile_buffer(bh);
1826                                                    continue;
1827                                            }
1828                                           
1829                                           if (bh->b_lock || !bh->b_dirt)
1830                                                    continue;
1831                                           /* Should we write back buffers that are shared or not??
1832                                              currently dirty buffers are not shared, so it does not matter */
1833                                           bh->b_count++;
1834                                           ndirty++;
1835                                           bh->b_flushtime = 0;
1836                                           ll_rw_block(WRITE, 1, &bh);
1837 #ifdef DEBUG
1838                                           if(nlist != BUF_DIRTY) ncount++;
1839 #endif
1840                                           bh->b_count--;
1841                                   }
1842                  }
1843 #ifdef DEBUG
1844                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1845                 printk("sleeping again.\n");
1846 #endif
1847                 wake_up(&bdflush_done);
1848                 
1849                 /* If there are still a lot of dirty buffers around, skip the sleep
1850                    and flush some more */
1851                 
1852                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1853                    bdf_prm.b_un.nfract/100) {
1854                         if (current->signal & (1 << (SIGKILL-1))) {
1855                                 bdflush_running--;
1856                                 return 0;
1857                         }
1858                         current->signal = 0;
1859                         interruptible_sleep_on(&bdflush_wait);
1860                 }
1861         }
1862 }
1863 
1864 
1865 /*
1866  * Overrides for Emacs so that we follow Linus's tabbing style.
1867  * Emacs will notice this stuff at the end of the file and automatically
1868  * adjust the settings for this buffer only.  This must remain at the end
1869  * of the file.
1870  * ---------------------------------------------------------------------------
1871  * Local variables:
1872  * c-indent-level: 8
1873  * c-brace-imaginary-offset: 0
1874  * c-brace-offset: -8
1875  * c-argdecl-indent: 8
1876  * c-label-offset: -8
1877  * c-continued-statement-offset: 8
1878  * c-continued-brace-offset: 0
1879  * End:
1880  */

/* [previous][next][first][last][top][bottom][index][help] */