root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. try_to_align
  32. check_aligned
  33. try_to_load_aligned
  34. try_to_share_buffers
  35. bread_page
  36. grow_buffers
  37. try_to_free
  38. maybe_shrink_lav_buffers
  39. shrink_buffers
  40. shrink_specific_buffers
  41. show_buffers
  42. try_to_reassign
  43. reassign_cluster
  44. try_to_generate_cluster
  45. generate_cluster
  46. buffer_init
  47. wakeup_bdflush
  48. sync_old_buffers
  49. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/sched.h>
  20 #include <linux/kernel.h>
  21 #include <linux/major.h>
  22 #include <linux/string.h>
  23 #include <linux/locks.h>
  24 #include <linux/errno.h>
  25 #include <linux/malloc.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/segment.h>
  29 #include <asm/io.h>
  30 
  31 #define NR_SIZES 4
  32 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  34 
  35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  36 
  37 static int grow_buffers(int pri, int size);
  38 static int shrink_specific_buffers(unsigned int priority, int size);
  39 static int maybe_shrink_lav_buffers(int);
  40 
  41 static int nr_hash = 0;  /* Size of hash table */
  42 static struct buffer_head ** hash_table;
  43 struct buffer_head ** buffer_pages;
  44 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  45 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  46 static struct buffer_head * unused_list = NULL;
  47 static struct wait_queue * buffer_wait = NULL;
  48 
  49 int nr_buffers = 0;
  50 int nr_buffers_type[NR_LIST] = {0,};
  51 int nr_buffers_size[NR_SIZES] = {0,};
  52 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  53 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  54 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  55 int nr_free[NR_SIZES] = {0,};
  56 int buffermem = 0;
  57 int nr_buffer_heads = 0;
  58 extern int *blksize_size[];
  59 
  60 /* Here is the parameter block for the bdflush process. */
  61 static void wakeup_bdflush(int);
  62 
  63 #define N_PARAM 9
  64 #define LAV
  65 
  66 static union bdflush_param{
  67         struct {
  68                 int nfract;  /* Percentage of buffer cache dirty to 
  69                                 activate bdflush */
  70                 int ndirty;  /* Maximum number of dirty blocks to write out per
  71                                 wake-cycle */
  72                 int nrefill; /* Number of clean buffers to try and obtain
  73                                 each time we call refill */
  74                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  75                                   when trying to refill buffers. */
  76                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  77                                     search for free clusters */
  78                 int age_buffer;  /* Time for normal buffer to age before 
  79                                     we flush it */
  80                 int age_super;  /* Time for superblock to age before we 
  81                                    flush it */
  82                 int lav_const;  /* Constant used for load average (time
  83                                    constant */
  84                 int lav_ratio;  /* Used to determine how low a lav for a
  85                                    particular size can go before we start to
  86                                    trim back the buffers */
  87         } b_un;
  88         unsigned int data[N_PARAM];
  89 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  90 
  91 /* The lav constant is set for 1 minute, as long as the update process runs
  92    every 5 seconds.  If you change the frequency of update, the time
  93    constant will also change. */
  94 
  95 
  96 /* These are the min and max parameter values that we will allow to be assigned */
  97 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
  98 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
  99 
 100 /*
 101  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 102  * and getting rid of the cli-sti pairs. The wait-queue routines still
 103  * need cli-sti, but now it's just a couple of 386 instructions or so.
 104  *
 105  * Note that the real wait_on_buffer() is an inline function that checks
 106  * if 'b_wait' is set before calling this, so that the queues aren't set
 107  * up unnecessarily.
 108  */
 109 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111         struct wait_queue wait = { current, NULL };
 112 
 113         bh->b_count++;
 114         add_wait_queue(&bh->b_wait, &wait);
 115 repeat:
 116         current->state = TASK_UNINTERRUPTIBLE;
 117         if (bh->b_lock) {
 118                 schedule();
 119                 goto repeat;
 120         }
 121         remove_wait_queue(&bh->b_wait, &wait);
 122         bh->b_count--;
 123         current->state = TASK_RUNNING;
 124 }
 125 
 126 /* Call sync_buffers with wait!=0 to ensure that the call does not
 127    return until all buffer writes have completed.  Sync() may return
 128    before the writes have finished; fsync() may not. */
 129 
 130 
 131 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 132    spontaneously dirty themselves without ever brelse being called.
 133    We will ultimately want to put these in a separate list, but for
 134    now we search all of the lists for dirty buffers */
 135 
 136 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         int i, retry, pass = 0, err = 0;
 139         int nlist, ncount;
 140         struct buffer_head * bh, *next;
 141 
 142         /* One pass for no-wait, three for wait:
 143            0) write out all dirty, unlocked buffers;
 144            1) write out all dirty buffers, waiting if locked;
 145            2) wait for completion by waiting for all buffers to unlock. */
 146  repeat:
 147         retry = 0;
 148  repeat2:
 149         ncount = 0;
 150         /* We search all lists as a failsafe mechanism, not because we expect
 151            there to be dirty buffers on any of the other lists. */
 152         for(nlist = 0; nlist < NR_LIST; nlist++)
 153          {
 154          repeat1:
 155                  bh = lru_list[nlist];
 156                  if(!bh) continue;
 157                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 158                          if(bh->b_list != nlist) goto repeat1;
 159                          next = bh->b_next_free;
 160                          if(!lru_list[nlist]) break;
 161                          if (dev && bh->b_dev != dev)
 162                                   continue;
 163                          if (bh->b_lock)
 164                           {
 165                                   /* Buffer is locked; skip it unless wait is
 166                                      requested AND pass > 0. */
 167                                   if (!wait || !pass) {
 168                                           retry = 1;
 169                                           continue;
 170                                   }
 171                                   wait_on_buffer (bh);
 172                                   goto repeat2;
 173                           }
 174                          /* If an unlocked buffer is not uptodate, there has
 175                              been an IO error. Skip it. */
 176                          if (wait && bh->b_req && !bh->b_lock &&
 177                              !bh->b_dirt && !bh->b_uptodate) {
 178                                   err = 1;
 179                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 180                                   continue;
 181                           }
 182                          /* Don't write clean buffers.  Don't write ANY buffers
 183                             on the third pass. */
 184                          if (!bh->b_dirt || pass>=2)
 185                                   continue;
 186                          /* don't bother about locked buffers */
 187                          if (bh->b_lock)
 188                                  continue;
 189                          bh->b_count++;
 190                          bh->b_flushtime = 0;
 191                          ll_rw_block(WRITE, 1, &bh);
 192 
 193                          if(nlist != BUF_DIRTY) { 
 194                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 195                                  ncount++;
 196                          };
 197                          bh->b_count--;
 198                          retry = 1;
 199                  }
 200          }
 201         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 202         
 203         /* If we are waiting for the sync to succeed, and if any dirty
 204            blocks were written, then repeat; on the second pass, only
 205            wait for buffers being written (do not pass to write any
 206            more buffers on the second pass). */
 207         if (wait && retry && ++pass<=2)
 208                  goto repeat;
 209         return err;
 210 }
 211 
 212 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         sync_buffers(dev, 0);
 215         sync_supers(dev);
 216         sync_inodes(dev);
 217         sync_buffers(dev, 0);
 218 }
 219 
 220 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         sync_buffers(dev, 0);
 223         sync_supers(dev);
 224         sync_inodes(dev);
 225         return sync_buffers(dev, 1);
 226 }
 227 
 228 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 {
 230         sync_dev(0);
 231         return 0;
 232 }
 233 
 234 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         return fsync_dev(inode->i_dev);
 237 }
 238 
 239 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 240 {
 241         struct file * file;
 242         struct inode * inode;
 243 
 244         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 245                 return -EBADF;
 246         if (!file->f_op || !file->f_op->fsync)
 247                 return -EINVAL;
 248         if (file->f_op->fsync(inode,file))
 249                 return -EIO;
 250         return 0;
 251 }
 252 
 253 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         int i;
 256         int nlist;
 257         struct buffer_head * bh;
 258 
 259         for(nlist = 0; nlist < NR_LIST; nlist++) {
 260                 bh = lru_list[nlist];
 261                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 262                      bh = bh->b_next_free) {
 263                         if (bh->b_dev != dev)
 264                                  continue;
 265                         wait_on_buffer(bh);
 266                         if (bh->b_dev == dev)
 267                                  bh->b_flushtime = bh->b_uptodate = 
 268                                           bh->b_dirt = bh->b_req = 0;
 269                 }
 270         }
 271 }
 272 
 273 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 274 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 275 
 276 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 277 {
 278         if (bh->b_next)
 279                 bh->b_next->b_prev = bh->b_prev;
 280         if (bh->b_prev)
 281                 bh->b_prev->b_next = bh->b_next;
 282         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 283                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 284         bh->b_next = bh->b_prev = NULL;
 285 }
 286 
 287 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 {
 289         if (!(bh->b_prev_free) || !(bh->b_next_free))
 290                 panic("VFS: LRU block list corrupted");
 291         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 292         bh->b_prev_free->b_next_free = bh->b_next_free;
 293         bh->b_next_free->b_prev_free = bh->b_prev_free;
 294 
 295         if (lru_list[bh->b_list] == bh)
 296                  lru_list[bh->b_list] = bh->b_next_free;
 297         if(lru_list[bh->b_list] == bh)
 298                  lru_list[bh->b_list] = NULL;
 299         bh->b_next_free = bh->b_prev_free = NULL;
 300 }
 301 
 302 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 303 {
 304         int isize = BUFSIZE_INDEX(bh->b_size);
 305         if (!(bh->b_prev_free) || !(bh->b_next_free))
 306                 panic("VFS: Free block list corrupted");
 307         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 308         if(!free_list[isize])
 309                  panic("Free list empty");
 310         nr_free[isize]--;
 311         if(bh->b_next_free == bh)
 312                  free_list[isize] = NULL;
 313         else {
 314                 bh->b_prev_free->b_next_free = bh->b_next_free;
 315                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 316                 if (free_list[isize] == bh)
 317                          free_list[isize] = bh->b_next_free;
 318         };
 319         bh->b_next_free = bh->b_prev_free = NULL;
 320 }
 321 
 322 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 323 {
 324         if(bh->b_dev == 0xffff) {
 325                 remove_from_free_list(bh); /* Free list entries should not be
 326                                               in the hash queue */
 327                 return;
 328         };
 329         nr_buffers_type[bh->b_list]--;
 330         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 331         remove_from_hash_queue(bh);
 332         remove_from_lru_list(bh);
 333 }
 334 
 335 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 336 {
 337         if (!bh)
 338                 return;
 339         if (bh == lru_list[bh->b_list]) {
 340                 lru_list[bh->b_list] = bh->b_next_free;
 341                 return;
 342         }
 343         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 344         remove_from_lru_list(bh);
 345 /* add to back of free list */
 346 
 347         if(!lru_list[bh->b_list]) {
 348                 lru_list[bh->b_list] = bh;
 349                 lru_list[bh->b_list]->b_prev_free = bh;
 350         };
 351 
 352         bh->b_next_free = lru_list[bh->b_list];
 353         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 354         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 355         lru_list[bh->b_list]->b_prev_free = bh;
 356 }
 357 
 358 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 359 {
 360         int isize;
 361         if (!bh)
 362                 return;
 363 
 364         isize = BUFSIZE_INDEX(bh->b_size);      
 365         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 366 /* add to back of free list */
 367 
 368         if(!free_list[isize]) {
 369                 free_list[isize] = bh;
 370                 bh->b_prev_free = bh;
 371         };
 372 
 373         nr_free[isize]++;
 374         bh->b_next_free = free_list[isize];
 375         bh->b_prev_free = free_list[isize]->b_prev_free;
 376         free_list[isize]->b_prev_free->b_next_free = bh;
 377         free_list[isize]->b_prev_free = bh;
 378 }
 379 
 380 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382 /* put at end of free list */
 383 
 384         if(bh->b_dev == 0xffff) {
 385                 put_last_free(bh);
 386                 return;
 387         };
 388         if(!lru_list[bh->b_list]) {
 389                 lru_list[bh->b_list] = bh;
 390                 bh->b_prev_free = bh;
 391         };
 392         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 393         bh->b_next_free = lru_list[bh->b_list];
 394         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 395         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 396         lru_list[bh->b_list]->b_prev_free = bh;
 397         nr_buffers_type[bh->b_list]++;
 398         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 399 /* put the buffer in new hash-queue if it has a device */
 400         bh->b_prev = NULL;
 401         bh->b_next = NULL;
 402         if (!bh->b_dev)
 403                 return;
 404         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 405         hash(bh->b_dev,bh->b_blocknr) = bh;
 406         if (bh->b_next)
 407                 bh->b_next->b_prev = bh;
 408 }
 409 
 410 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 411 {               
 412         struct buffer_head * tmp;
 413 
 414         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 415                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 416                         if (tmp->b_size == size)
 417                                 return tmp;
 418                         else {
 419                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 420                                                         MAJOR(dev), MINOR(dev));
 421                                 return NULL;
 422                         }
 423         return NULL;
 424 }
 425 
 426 /*
 427  * Why like this, I hear you say... The reason is race-conditions.
 428  * As we don't lock buffers (unless we are reading them, that is),
 429  * something might happen to it while we sleep (ie a read-error
 430  * will force it bad). This shouldn't really happen currently, but
 431  * the code is ready.
 432  */
 433 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 434 {
 435         struct buffer_head * bh;
 436 
 437         for (;;) {
 438                 if (!(bh=find_buffer(dev,block,size)))
 439                         return NULL;
 440                 bh->b_count++;
 441                 wait_on_buffer(bh);
 442                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 443                         return bh;
 444                 bh->b_count--;
 445         }
 446 }
 447 
 448 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         int i, nlist;
 451         struct buffer_head * bh, *bhnext;
 452 
 453         if (!blksize_size[MAJOR(dev)])
 454                 return;
 455 
 456         switch(size) {
 457                 default: panic("Invalid blocksize passed to set_blocksize");
 458                 case 512: case 1024: case 2048: case 4096:;
 459         }
 460 
 461         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 462                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 463                 return;
 464         }
 465         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 466                 return;
 467         sync_buffers(dev, 2);
 468         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 469 
 470   /* We need to be quite careful how we do this - we are moving entries
 471      around on the free list, and we can get in a loop if we are not careful.*/
 472 
 473         for(nlist = 0; nlist < NR_LIST; nlist++) {
 474                 bh = lru_list[nlist];
 475                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 476                         if(!bh) break;
 477                         bhnext = bh->b_next_free; 
 478                         if (bh->b_dev != dev)
 479                                  continue;
 480                         if (bh->b_size == size)
 481                                  continue;
 482                         
 483                         wait_on_buffer(bh);
 484                         if (bh->b_dev == dev && bh->b_size != size) {
 485                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 486                                          bh->b_flushtime = 0;
 487                         };
 488                         remove_from_hash_queue(bh);
 489                 }
 490         }
 491 }
 492 
 493 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 494 
 495 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 496 {
 497         struct buffer_head * bh, * tmp;
 498         struct buffer_head * candidate[NR_LIST];
 499         unsigned int best_time, winner;
 500         int isize = BUFSIZE_INDEX(size);
 501         int buffers[NR_LIST];
 502         int i;
 503         int needed;
 504 
 505         /* First see if we even need this.  Sometimes it is advantageous
 506          to request some blocks in a filesystem that we know that we will
 507          be needing ahead of time. */
 508 
 509         if (nr_free[isize] > 100)
 510                 return;
 511 
 512         /* If there are too many dirty buffers, we wake up the update process
 513            now so as to ensure that there are still clean buffers available
 514            for user processes to use (and dirty) */
 515         
 516         /* We are going to try and locate this much memory */
 517         needed =bdf_prm.b_un.nrefill * size;  
 518 
 519         while (nr_free_pages > min_free_pages*2 && needed > 0 &&
 520                grow_buffers(GFP_BUFFER, size)) {
 521                 needed -= PAGE_SIZE;
 522         }
 523 
 524         if(needed <= 0) return;
 525 
 526         /* See if there are too many buffers of a different size.
 527            If so, victimize them */
 528 
 529         while(maybe_shrink_lav_buffers(size))
 530          {
 531                  if(!grow_buffers(GFP_BUFFER, size)) break;
 532                  needed -= PAGE_SIZE;
 533                  if(needed <= 0) return;
 534          };
 535 
 536         /* OK, we cannot grow the buffer cache, now try and get some
 537            from the lru list */
 538 
 539         /* First set the candidate pointers to usable buffers.  This
 540            should be quick nearly all of the time. */
 541 
 542 repeat0:
 543         for(i=0; i<NR_LIST; i++){
 544                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 545                    nr_buffers_type[i] == 0) {
 546                         candidate[i] = NULL;
 547                         buffers[i] = 0;
 548                         continue;
 549                 }
 550                 buffers[i] = nr_buffers_type[i];
 551                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 552                  {
 553                          if(buffers[i] < 0) panic("Here is the problem");
 554                          tmp = bh->b_next_free;
 555                          if (!bh) break;
 556                          
 557                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 558                              bh->b_dirt) {
 559                                  refile_buffer(bh);
 560                                  continue;
 561                          };
 562                          
 563                          if (bh->b_count || bh->b_size != size)
 564                                   continue;
 565                          
 566                          /* Buffers are written in the order they are placed 
 567                             on the locked list. If we encounter a locked
 568                             buffer here, this means that the rest of them
 569                             are also locked */
 570                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 571                                  buffers[i] = 0;
 572                                  break;
 573                          }
 574                          
 575                          if (BADNESS(bh)) continue;
 576                          break;
 577                  };
 578                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 579                 else candidate[i] = bh;
 580                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 581         }
 582         
 583  repeat:
 584         if(needed <= 0) return;
 585         
 586         /* Now see which candidate wins the election */
 587         
 588         winner = best_time = UINT_MAX;  
 589         for(i=0; i<NR_LIST; i++){
 590                 if(!candidate[i]) continue;
 591                 if(candidate[i]->b_lru_time < best_time){
 592                         best_time = candidate[i]->b_lru_time;
 593                         winner = i;
 594                 }
 595         }
 596         
 597         /* If we have a winner, use it, and then get a new candidate from that list */
 598         if(winner != UINT_MAX) {
 599                 i = winner;
 600                 bh = candidate[i];
 601                 candidate[i] = bh->b_next_free;
 602                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 603                 if (bh->b_count || bh->b_size != size)
 604                          panic("Busy buffer in candidate list\n");
 605                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 606                          panic("Shared buffer in candidate list\n");
 607                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 608                 
 609                 if(bh->b_dev == 0xffff) panic("Wrong list");
 610                 remove_from_queues(bh);
 611                 bh->b_dev = 0xffff;
 612                 put_last_free(bh);
 613                 needed -= bh->b_size;
 614                 buffers[i]--;
 615                 if(buffers[i] < 0) panic("Here is the problem");
 616                 
 617                 if(buffers[i] == 0) candidate[i] = NULL;
 618                 
 619                 /* Now all we need to do is advance the candidate pointer
 620                    from the winner list to the next usable buffer */
 621                 if(candidate[i] && buffers[i] > 0){
 622                         if(buffers[i] <= 0) panic("Here is another problem");
 623                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 624                                 if(buffers[i] < 0) panic("Here is the problem");
 625                                 tmp = bh->b_next_free;
 626                                 if (!bh) break;
 627                                 
 628                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 629                                     bh->b_dirt) {
 630                                         refile_buffer(bh);
 631                                         continue;
 632                                 };
 633                                 
 634                                 if (bh->b_count || bh->b_size != size)
 635                                          continue;
 636                                 
 637                                 /* Buffers are written in the order they are
 638                                    placed on the locked list.  If we encounter
 639                                    a locked buffer here, this means that the
 640                                    rest of them are also locked */
 641                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 642                                         buffers[i] = 0;
 643                                         break;
 644                                 }
 645               
 646                                 if (BADNESS(bh)) continue;
 647                                 break;
 648                         };
 649                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 650                         else candidate[i] = bh;
 651                         if(candidate[i] && candidate[i]->b_count) 
 652                                  panic("Here is the problem");
 653                 }
 654                 
 655                 goto repeat;
 656         }
 657         
 658         if(needed <= 0) return;
 659         
 660         /* Too bad, that was not enough. Try a little harder to grow some. */
 661         
 662         if (nr_free_pages > 5) {
 663                 if (grow_buffers(GFP_BUFFER, size)) {
 664                         needed -= PAGE_SIZE;
 665                         goto repeat0;
 666                 };
 667         }
 668         
 669         /* and repeat until we find something good */
 670         if (!grow_buffers(GFP_ATOMIC, size))
 671                 wakeup_bdflush(1);
 672         needed -= PAGE_SIZE;
 673         goto repeat0;
 674 }
 675 
 676 /*
 677  * Ok, this is getblk, and it isn't very clear, again to hinder
 678  * race-conditions. Most of the code is seldom used, (ie repeating),
 679  * so it should be much more efficient than it looks.
 680  *
 681  * The algorithm is changed: hopefully better, and an elusive bug removed.
 682  *
 683  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 684  * when the filesystem starts to get full of dirty blocks (I hope).
 685  */
 686 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 687 {
 688         struct buffer_head * bh;
 689         int isize = BUFSIZE_INDEX(size);
 690 
 691         /* Update this for the buffer size lav. */
 692         buffer_usage[isize]++;
 693 
 694         /* If there are too many dirty buffers, we wake up the update process
 695            now so as to ensure that there are still clean buffers available
 696            for user processes to use (and dirty) */
 697 repeat:
 698         bh = get_hash_table(dev, block, size);
 699         if (bh) {
 700                 if (bh->b_uptodate && !bh->b_dirt)
 701                          put_last_lru(bh);
 702                 if(!bh->b_dirt) bh->b_flushtime = 0;
 703                 return bh;
 704         }
 705 
 706         while(!free_list[isize]) refill_freelist(size);
 707         
 708         if (find_buffer(dev,block,size))
 709                  goto repeat;
 710 
 711         bh = free_list[isize];
 712         remove_from_free_list(bh);
 713 
 714 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 715 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 716         bh->b_count=1;
 717         bh->b_dirt=0;
 718         bh->b_lock=0;
 719         bh->b_uptodate=0;
 720         bh->b_flushtime = 0;
 721         bh->b_req=0;
 722         bh->b_dev=dev;
 723         bh->b_blocknr=block;
 724         insert_into_queues(bh);
 725         return bh;
 726 }
 727 
 728 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 729 {
 730         int newtime;
 731 
 732         if (buf->b_dirt){
 733                 /* Move buffer to dirty list if jiffies is clear */
 734                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 735                                      bdf_prm.b_un.age_buffer);
 736                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 737                          buf->b_flushtime = newtime;
 738         } else {
 739                 buf->b_flushtime = 0;
 740         }
 741 }
 742 
 743 
 744 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 745         int dispose;
 746         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 747         if (buf->b_dirt)
 748                 dispose = BUF_DIRTY;
 749         else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
 750                 dispose = BUF_SHARED;
 751         else if (buf->b_lock)
 752                 dispose = BUF_LOCKED;
 753         else if (buf->b_list == BUF_SHARED)
 754                 dispose = BUF_UNSHARED;
 755         else
 756                 dispose = BUF_CLEAN;
 757         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 758         if(dispose != buf->b_list)  {
 759                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 760                          buf->b_lru_time = jiffies;
 761                 if(dispose == BUF_LOCKED && 
 762                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 763                          dispose = BUF_LOCKED1;
 764                 remove_from_queues(buf);
 765                 buf->b_list = dispose;
 766                 insert_into_queues(buf);
 767                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 768                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 769                    bdf_prm.b_un.nfract/100)
 770                          wakeup_bdflush(0);
 771         }
 772 }
 773 
 774 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776         if (!buf)
 777                 return;
 778         wait_on_buffer(buf);
 779 
 780         /* If dirty, mark the time this buffer should be written back */
 781         set_writetime(buf, 0);
 782         refile_buffer(buf);
 783 
 784         if (buf->b_count) {
 785                 if (--buf->b_count)
 786                         return;
 787                 wake_up(&buffer_wait);
 788                 return;
 789         }
 790         printk("VFS: brelse: Trying to free free buffer\n");
 791 }
 792 
 793 /*
 794  * bread() reads a specified block and returns the buffer that contains
 795  * it. It returns NULL if the block was unreadable.
 796  */
 797 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 798 {
 799         struct buffer_head * bh;
 800 
 801         if (!(bh = getblk(dev, block, size))) {
 802                 printk("VFS: bread: READ error on device %d/%d\n",
 803                                                 MAJOR(dev), MINOR(dev));
 804                 return NULL;
 805         }
 806         if (bh->b_uptodate)
 807                 return bh;
 808         ll_rw_block(READ, 1, &bh);
 809         wait_on_buffer(bh);
 810         if (bh->b_uptodate)
 811                 return bh;
 812         brelse(bh);
 813         return NULL;
 814 }
 815 
 816 /*
 817  * Ok, breada can be used as bread, but additionally to mark other
 818  * blocks for reading as well. End the argument list with a negative
 819  * number.
 820  */
 821 
 822 #define NBUF 16
 823 
 824 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 825         unsigned int pos, unsigned int filesize)
 826 {
 827         struct buffer_head * bhlist[NBUF];
 828         unsigned int blocks;
 829         struct buffer_head * bh;
 830         int index;
 831         int i, j;
 832 
 833         if (pos >= filesize)
 834                 return NULL;
 835 
 836         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 837                 return NULL;
 838 
 839         index = BUFSIZE_INDEX(bh->b_size);
 840 
 841         if (bh->b_uptodate)
 842                 return bh;
 843 
 844         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 845 
 846         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 847                 blocks = read_ahead[MAJOR(dev)] >> index;
 848         if (blocks > NBUF)
 849                 blocks = NBUF;
 850         
 851         bhlist[0] = bh;
 852         j = 1;
 853         for(i=1; i<blocks; i++) {
 854                 bh = getblk(dev,block+i,bufsize);
 855                 if (bh->b_uptodate) {
 856                         brelse(bh);
 857                         break;
 858                 }
 859                 bhlist[j++] = bh;
 860         }
 861 
 862         /* Request the read for these buffers, and then release them */
 863         ll_rw_block(READ, j, bhlist);
 864 
 865         for(i=1; i<j; i++)
 866                 brelse(bhlist[i]);
 867 
 868         /* Wait for this buffer, and then continue on */
 869         bh = bhlist[0];
 870         wait_on_buffer(bh);
 871         if (bh->b_uptodate)
 872                 return bh;
 873         brelse(bh);
 874         return NULL;
 875 }
 876 
 877 /*
 878  * See fs/inode.c for the weird use of volatile..
 879  */
 880 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 881 {
 882         struct wait_queue * wait;
 883 
 884         wait = ((volatile struct buffer_head *) bh)->b_wait;
 885         memset(bh,0,sizeof(*bh));
 886         ((volatile struct buffer_head *) bh)->b_wait = wait;
 887         bh->b_next_free = unused_list;
 888         unused_list = bh;
 889 }
 890 
 891 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 892 {
 893         int i;
 894         struct buffer_head * bh;
 895 
 896         if (unused_list)
 897                 return;
 898 
 899         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 900                 return;
 901 
 902         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 903                 bh->b_next_free = unused_list;  /* only make link */
 904                 unused_list = bh++;
 905         }
 906 }
 907 
 908 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 909 {
 910         struct buffer_head * bh;
 911 
 912         get_more_buffer_heads();
 913         if (!unused_list)
 914                 return NULL;
 915         bh = unused_list;
 916         unused_list = bh->b_next_free;
 917         bh->b_next_free = NULL;
 918         bh->b_data = NULL;
 919         bh->b_size = 0;
 920         bh->b_req = 0;
 921         return bh;
 922 }
 923 
 924 /*
 925  * Create the appropriate buffers when given a page for data area and
 926  * the size of each buffer.. Use the bh->b_this_page linked list to
 927  * follow the buffers created.  Return NULL if unable to create more
 928  * buffers.
 929  */
 930 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 931 {
 932         struct buffer_head *bh, *head;
 933         unsigned long offset;
 934 
 935         head = NULL;
 936         offset = PAGE_SIZE;
 937         while ((offset -= size) < PAGE_SIZE) {
 938                 bh = get_unused_buffer_head();
 939                 if (!bh)
 940                         goto no_grow;
 941                 bh->b_this_page = head;
 942                 head = bh;
 943                 bh->b_data = (char *) (page+offset);
 944                 bh->b_size = size;
 945                 bh->b_dev = 0xffff;  /* Flag as unused */
 946         }
 947         return head;
 948 /*
 949  * In case anything failed, we just free everything we got.
 950  */
 951 no_grow:
 952         bh = head;
 953         while (bh) {
 954                 head = bh;
 955                 bh = bh->b_this_page;
 956                 put_unused_buffer_head(head);
 957         }
 958         return NULL;
 959 }
 960 
 961 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 962 {
 963         int i;
 964         int bhnum = 0;
 965         struct buffer_head * bhr[8];
 966 
 967         for (i = 0 ; i < nrbuf ; i++) {
 968                 if (bh[i] && !bh[i]->b_uptodate)
 969                         bhr[bhnum++] = bh[i];
 970         }
 971         if (bhnum)
 972                 ll_rw_block(READ, bhnum, bhr);
 973         for (i = 0 ; i < nrbuf ; i++) {
 974                 if (bh[i]) {
 975                         wait_on_buffer(bh[i]);
 976                 }
 977         }
 978 }
 979 
 980 /*
 981  * This actually gets enough info to try to align the stuff,
 982  * but we don't bother yet.. We'll have to check that nobody
 983  * else uses the buffers etc.
 984  *
 985  * "address" points to the new page we can use to move things
 986  * around..
 987  */
 988 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
     /* [previous][next][first][last][top][bottom][index][help] */
 989         unsigned long address)
 990 {
 991         while (nrbuf-- > 0)
 992                 brelse(bh[nrbuf]);
 993         return 0;
 994 }
 995 
 996 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 997         dev_t dev, int *b, int size)
 998 {
 999         struct buffer_head * bh[8];
1000         unsigned long page;
1001         unsigned long offset;
1002         int block;
1003         int nrbuf;
1004         int aligned = 1;
1005 
1006         bh[0] = first;
1007         nrbuf = 1;
1008         page = (unsigned long) first->b_data;
1009         if (page & ~PAGE_MASK)
1010                 aligned = 0;
1011         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1012                 block = *++b;
1013                 if (!block)
1014                         goto no_go;
1015                 first = get_hash_table(dev, block, size);
1016                 if (!first)
1017                         goto no_go;
1018                 bh[nrbuf++] = first;
1019                 if (page+offset != (unsigned long) first->b_data)
1020                         aligned = 0;
1021         }
1022         if (!aligned)
1023                 return try_to_align(bh, nrbuf, address);
1024         mem_map[MAP_NR(page)]++;
1025         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1026         while (nrbuf-- > 0)
1027                 brelse(bh[nrbuf]);
1028         free_page(address);
1029         ++current->mm->min_flt;
1030         return page;
1031 no_go:
1032         while (nrbuf-- > 0)
1033                 brelse(bh[nrbuf]);
1034         return 0;
1035 }
1036 
1037 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1038         dev_t dev, int b[], int size)
1039 {
1040         struct buffer_head * bh, * tmp, * arr[8];
1041         unsigned long offset;
1042         int isize = BUFSIZE_INDEX(size);
1043         int * p;
1044         int block;
1045 
1046         bh = create_buffers(address, size);
1047         if (!bh)
1048                 return 0;
1049         /* do any of the buffers already exist? punt if so.. */
1050         p = b;
1051         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1052                 block = *(p++);
1053                 if (!block)
1054                         goto not_aligned;
1055                 if (find_buffer(dev, block, size))
1056                         goto not_aligned;
1057         }
1058         tmp = bh;
1059         p = b;
1060         block = 0;
1061         while (1) {
1062                 arr[block++] = bh;
1063                 bh->b_count = 1;
1064                 bh->b_dirt = 0;
1065                 bh->b_flushtime = 0;
1066                 bh->b_uptodate = 0;
1067                 bh->b_req = 0;
1068                 bh->b_dev = dev;
1069                 bh->b_blocknr = *(p++);
1070                 bh->b_list = BUF_CLEAN;
1071                 nr_buffers++;
1072                 nr_buffers_size[isize]++;
1073                 insert_into_queues(bh);
1074                 if (bh->b_this_page)
1075                         bh = bh->b_this_page;
1076                 else
1077                         break;
1078         }
1079         buffermem += PAGE_SIZE;
1080         bh->b_this_page = tmp;
1081         mem_map[MAP_NR(address)]++;
1082         buffer_pages[MAP_NR(address)] = bh;
1083         read_buffers(arr,block);
1084         while (block-- > 0)
1085                 brelse(arr[block]);
1086         ++current->mm->maj_flt;
1087         return address;
1088 not_aligned:
1089         while ((tmp = bh) != NULL) {
1090                 bh = bh->b_this_page;
1091                 put_unused_buffer_head(tmp);
1092         }
1093         return 0;
1094 }
1095 
1096 /*
1097  * Try-to-share-buffers tries to minimize memory use by trying to keep
1098  * both code pages and the buffer area in the same page. This is done by
1099  * (a) checking if the buffers are already aligned correctly in memory and
1100  * (b) if none of the buffer heads are in memory at all, trying to load
1101  * them into memory the way we want them.
1102  *
1103  * This doesn't guarantee that the memory is shared, but should under most
1104  * circumstances work very well indeed (ie >90% sharing of code pages on
1105  * demand-loadable executables).
1106  */
1107 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1108         dev_t dev, int *b, int size)
1109 {
1110         struct buffer_head * bh;
1111         int block;
1112 
1113         block = b[0];
1114         if (!block)
1115                 return 0;
1116         bh = get_hash_table(dev, block, size);
1117         if (bh)
1118                 return check_aligned(bh, address, dev, b, size);
1119         return try_to_load_aligned(address, dev, b, size);
1120 }
1121 
1122 /*
1123  * bread_page reads four buffers into memory at the desired address. It's
1124  * a function of its own, as there is some speed to be got by reading them
1125  * all at the same time, not waiting for one to be read, and then another
1126  * etc. This also allows us to optimize memory usage by sharing code pages
1127  * and filesystem buffers..
1128  */
1129 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1130 {
1131         struct buffer_head * bh[8];
1132         unsigned long where;
1133         int i, j;
1134 
1135         if (!no_share) {
1136                 where = try_to_share_buffers(address, dev, b, size);
1137                 if (where)
1138                         return where;
1139         }
1140         ++current->mm->maj_flt;
1141         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1142                 bh[i] = NULL;
1143                 if (b[i])
1144                         bh[i] = getblk(dev, b[i], size);
1145         }
1146         read_buffers(bh,i);
1147         where = address;
1148         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1149                 if (bh[i]) {
1150                         if (bh[i]->b_uptodate)
1151                                 memcpy((void *) where, bh[i]->b_data, size);
1152                         brelse(bh[i]);
1153                 }
1154         }
1155         return address;
1156 }
1157 
1158 /*
1159  * Try to increase the number of buffers available: the size argument
1160  * is used to determine what kind of buffers we want.
1161  */
1162 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1163 {
1164         unsigned long page;
1165         struct buffer_head *bh, *tmp;
1166         struct buffer_head * insert_point;
1167         int isize;
1168 
1169         if ((size & 511) || (size > PAGE_SIZE)) {
1170                 printk("VFS: grow_buffers: size = %d\n",size);
1171                 return 0;
1172         }
1173 
1174         isize = BUFSIZE_INDEX(size);
1175 
1176         if (!(page = __get_free_page(pri)))
1177                 return 0;
1178         bh = create_buffers(page, size);
1179         if (!bh) {
1180                 free_page(page);
1181                 return 0;
1182         }
1183 
1184         insert_point = free_list[isize];
1185 
1186         tmp = bh;
1187         while (1) {
1188                 nr_free[isize]++;
1189                 if (insert_point) {
1190                         tmp->b_next_free = insert_point->b_next_free;
1191                         tmp->b_prev_free = insert_point;
1192                         insert_point->b_next_free->b_prev_free = tmp;
1193                         insert_point->b_next_free = tmp;
1194                 } else {
1195                         tmp->b_prev_free = tmp;
1196                         tmp->b_next_free = tmp;
1197                 }
1198                 insert_point = tmp;
1199                 ++nr_buffers;
1200                 if (tmp->b_this_page)
1201                         tmp = tmp->b_this_page;
1202                 else
1203                         break;
1204         }
1205         free_list[isize] = bh;
1206         buffer_pages[MAP_NR(page)] = bh;
1207         tmp->b_this_page = bh;
1208         wake_up(&buffer_wait);
1209         buffermem += PAGE_SIZE;
1210         return 1;
1211 }
1212 
1213 
1214 /* =========== Reduce the buffer memory ============= */
1215 
1216 /*
1217  * try_to_free() checks if all the buffers on this particular page
1218  * are unused, and free's the page if so.
1219  */
1220 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1221 {
1222         unsigned long page;
1223         struct buffer_head * tmp, * p;
1224         int isize = BUFSIZE_INDEX(bh->b_size);
1225 
1226         *bhp = bh;
1227         page = (unsigned long) bh->b_data;
1228         page &= PAGE_MASK;
1229         tmp = bh;
1230         do {
1231                 if (!tmp)
1232                         return 0;
1233                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1234                         return 0;
1235                 tmp = tmp->b_this_page;
1236         } while (tmp != bh);
1237         tmp = bh;
1238         do {
1239                 p = tmp;
1240                 tmp = tmp->b_this_page;
1241                 nr_buffers--;
1242                 nr_buffers_size[isize]--;
1243                 if (p == *bhp)
1244                   {
1245                     *bhp = p->b_prev_free;
1246                     if (p == *bhp) /* Was this the last in the list? */
1247                       *bhp = NULL;
1248                   }
1249                 remove_from_queues(p);
1250                 put_unused_buffer_head(p);
1251         } while (tmp != bh);
1252         buffermem -= PAGE_SIZE;
1253         buffer_pages[MAP_NR(page)] = NULL;
1254         free_page(page);
1255         return !mem_map[MAP_NR(page)];
1256 }
1257 
1258 
1259 /*
1260  * Consult the load average for buffers and decide whether or not
1261  * we should shrink the buffers of one size or not.  If we decide yes,
1262  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1263  * that is specified.
1264  *
1265  * I would prefer not to use a load average, but the way things are now it
1266  * seems unavoidable.  The way to get rid of it would be to force clustering
1267  * universally, so that when we reclaim buffers we always reclaim an entire
1268  * page.  Doing this would mean that we all need to move towards QMAGIC.
1269  */
1270 
1271 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1272 {          
1273         int nlist;
1274         int isize;
1275         int total_lav, total_n_buffers, n_sizes;
1276         
1277         /* Do not consider the shared buffers since they would not tend
1278            to have getblk called very often, and this would throw off
1279            the lav.  They are not easily reclaimable anyway (let the swapper
1280            make the first move). */
1281   
1282         total_lav = total_n_buffers = n_sizes = 0;
1283         for(nlist = 0; nlist < NR_SIZES; nlist++)
1284          {
1285                  total_lav += buffers_lav[nlist];
1286                  if(nr_buffers_size[nlist]) n_sizes++;
1287                  total_n_buffers += nr_buffers_size[nlist];
1288                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1289          }
1290         
1291         /* See if we have an excessive number of buffers of a particular
1292            size - if so, victimize that bunch. */
1293   
1294         isize = (size ? BUFSIZE_INDEX(size) : -1);
1295         
1296         if (n_sizes > 1)
1297                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1298                   {
1299                           if(nlist == isize) continue;
1300                           if(nr_buffers_size[nlist] &&
1301                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1302                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1303                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1304                                             return 1;
1305                   }
1306         return 0;
1307 }
1308 /*
1309  * Try to free up some pages by shrinking the buffer-cache
1310  *
1311  * Priority tells the routine how hard to try to shrink the
1312  * buffers: 3 means "don't bother too much", while a value
1313  * of 0 means "we'd better get some free pages now".
1314  */
1315 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1316 {
1317         if (priority < 2) {
1318                 sync_buffers(0,0);
1319         }
1320 
1321         if(priority == 2) wakeup_bdflush(1);
1322 
1323         if(maybe_shrink_lav_buffers(0)) return 1;
1324 
1325         /* No good candidate size - take any size we can find */
1326         return shrink_specific_buffers(priority, 0);
1327 }
1328 
1329 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1330 {
1331         struct buffer_head *bh;
1332         int nlist;
1333         int i, isize, isize1;
1334 
1335 #ifdef DEBUG
1336         if(size) printk("Shrinking buffers of size %d\n", size);
1337 #endif
1338         /* First try the free lists, and see if we can get a complete page
1339            from here */
1340         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1341 
1342         for(isize = 0; isize<NR_SIZES; isize++){
1343                 if(isize1 != -1 && isize1 != isize) continue;
1344                 bh = free_list[isize];
1345                 if(!bh) continue;
1346                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1347                         if (bh->b_count || !bh->b_this_page)
1348                                  continue;
1349                         if (try_to_free(bh, &bh))
1350                                  return 1;
1351                         if(!bh) break; /* Some interrupt must have used it after we
1352                                           freed the page.  No big deal - keep looking */
1353                 }
1354         }
1355         
1356         /* Not enough in the free lists, now try the lru list */
1357         
1358         for(nlist = 0; nlist < NR_LIST; nlist++) {
1359         repeat1:
1360                 if(priority > 3 && nlist == BUF_SHARED) continue;
1361                 bh = lru_list[nlist];
1362                 if(!bh) continue;
1363                 i = nr_buffers_type[nlist] >> priority;
1364                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1365                         /* We may have stalled while waiting for I/O to complete. */
1366                         if(bh->b_list != nlist) goto repeat1;
1367                         if (bh->b_count || !bh->b_this_page)
1368                                  continue;
1369                         if(size && bh->b_size != size) continue;
1370                         if (bh->b_lock)
1371                                  if (priority)
1372                                           continue;
1373                                  else
1374                                           wait_on_buffer(bh);
1375                         if (bh->b_dirt) {
1376                                 bh->b_count++;
1377                                 bh->b_flushtime = 0;
1378                                 ll_rw_block(WRITEA, 1, &bh);
1379                                 bh->b_count--;
1380                                 continue;
1381                         }
1382                         if (try_to_free(bh, &bh))
1383                                  return 1;
1384                         if(!bh) break;
1385                 }
1386         }
1387         return 0;
1388 }
1389 
1390 
1391 /* ================== Debugging =================== */
1392 
1393 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1394 {
1395         struct buffer_head * bh;
1396         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1397         int shared;
1398         int nlist, isize;
1399 
1400         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1401         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1402         printk("Buffer blocks:   %6d\n",nr_buffers);
1403 
1404         for(nlist = 0; nlist < NR_LIST; nlist++) {
1405           shared = found = locked = dirty = used = lastused = 0;
1406           bh = lru_list[nlist];
1407           if(!bh) continue;
1408           do {
1409                 found++;
1410                 if (bh->b_lock)
1411                         locked++;
1412                 if (bh->b_dirt)
1413                         dirty++;
1414                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1415                 if (bh->b_count)
1416                         used++, lastused = found;
1417                 bh = bh->b_next_free;
1418               } while (bh != lru_list[nlist]);
1419         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1420                 nlist, found, used, lastused, locked, dirty, shared);
1421         };
1422         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1423         for(isize = 0; isize<NR_SIZES; isize++){
1424                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1425                        buffers_lav[isize], nr_free[isize]);
1426                 for(nlist = 0; nlist < NR_LIST; nlist++)
1427                          printk("%7d ", nr_buffers_st[isize][nlist]);
1428                 printk("\n");
1429         }
1430 }
1431 
1432 
1433 /* ====================== Cluster patches for ext2 ==================== */
1434 
1435 /*
1436  * try_to_reassign() checks if all the buffers on this particular page
1437  * are unused, and reassign to a new cluster them if this is true.
1438  */
1439 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1440                            dev_t dev, unsigned int starting_block)
1441 {
1442         unsigned long page;
1443         struct buffer_head * tmp, * p;
1444 
1445         *bhp = bh;
1446         page = (unsigned long) bh->b_data;
1447         page &= PAGE_MASK;
1448         if(mem_map[MAP_NR(page)] != 1) return 0;
1449         tmp = bh;
1450         do {
1451                 if (!tmp)
1452                          return 0;
1453                 
1454                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1455                          return 0;
1456                 tmp = tmp->b_this_page;
1457         } while (tmp != bh);
1458         tmp = bh;
1459         
1460         while((unsigned long) tmp->b_data & (PAGE_SIZE - 1)) 
1461                  tmp = tmp->b_this_page;
1462         
1463         /* This is the buffer at the head of the page */
1464         bh = tmp;
1465         do {
1466                 p = tmp;
1467                 tmp = tmp->b_this_page;
1468                 remove_from_queues(p);
1469                 p->b_dev=dev;
1470                 p->b_uptodate = 0;
1471                 p->b_req = 0;
1472                 p->b_blocknr=starting_block++;
1473                 insert_into_queues(p);
1474         } while (tmp != bh);
1475         return 1;
1476 }
1477 
1478 /*
1479  * Try to find a free cluster by locating a page where
1480  * all of the buffers are unused.  We would like this function
1481  * to be atomic, so we do not call anything that might cause
1482  * the process to sleep.  The priority is somewhat similar to
1483  * the priority used in shrink_buffers.
1484  * 
1485  * My thinking is that the kernel should end up using whole
1486  * pages for the buffer cache as much of the time as possible.
1487  * This way the other buffers on a particular page are likely
1488  * to be very near each other on the free list, and we will not
1489  * be expiring data prematurely.  For now we only cannibalize buffers
1490  * of the same size to keep the code simpler.
1491  */
1492 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1493                      unsigned int starting_block, int size)
1494 {
1495         struct buffer_head *bh;
1496         int isize = BUFSIZE_INDEX(size);
1497         int i;
1498 
1499         /* We want to give ourselves a really good shot at generating
1500            a cluster, and since we only take buffers from the free
1501            list, we "overfill" it a little. */
1502 
1503         while(nr_free[isize] < 32) refill_freelist(size);
1504 
1505         bh = free_list[isize];
1506         if(bh)
1507                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1508                          if (!bh->b_this_page)  continue;
1509                          if (try_to_reassign(bh, &bh, dev, starting_block))
1510                                  return 4;
1511                  }
1512         return 0;
1513 }
1514 
1515 /* This function tries to generate a new cluster of buffers
1516  * from a new page in memory.  We should only do this if we have
1517  * not expanded the buffer cache to the maximum size that we allow.
1518  */
1519 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1520 {
1521         struct buffer_head * bh, * tmp, * arr[8];
1522         int isize = BUFSIZE_INDEX(size);
1523         unsigned long offset;
1524         unsigned long page;
1525         int nblock;
1526 
1527         page = get_free_page(GFP_NOBUFFER);
1528         if(!page) return 0;
1529 
1530         bh = create_buffers(page, size);
1531         if (!bh) {
1532                 free_page(page);
1533                 return 0;
1534         };
1535         nblock = block;
1536         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1537                 if (find_buffer(dev, nblock++, size))
1538                          goto not_aligned;
1539         }
1540         tmp = bh;
1541         nblock = 0;
1542         while (1) {
1543                 arr[nblock++] = bh;
1544                 bh->b_count = 1;
1545                 bh->b_dirt = 0;
1546                 bh->b_flushtime = 0;
1547                 bh->b_lock = 0;
1548                 bh->b_uptodate = 0;
1549                 bh->b_req = 0;
1550                 bh->b_dev = dev;
1551                 bh->b_list = BUF_CLEAN;
1552                 bh->b_blocknr = block++;
1553                 nr_buffers++;
1554                 nr_buffers_size[isize]++;
1555                 insert_into_queues(bh);
1556                 if (bh->b_this_page)
1557                         bh = bh->b_this_page;
1558                 else
1559                         break;
1560         }
1561         buffermem += PAGE_SIZE;
1562         buffer_pages[MAP_NR(page)] = bh;
1563         bh->b_this_page = tmp;
1564         while (nblock-- > 0)
1565                 brelse(arr[nblock]);
1566         return 4; /* ?? */
1567 not_aligned:
1568         while ((tmp = bh) != NULL) {
1569                 bh = bh->b_this_page;
1570                 put_unused_buffer_head(tmp);
1571         }
1572         free_page(page);
1573         return 0;
1574 }
1575 
1576 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1577 {
1578         int i, offset;
1579         
1580         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1581                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1582                 if(find_buffer(dev, b[i], size)) return 0;
1583         };
1584 
1585         /* OK, we have a candidate for a new cluster */
1586         
1587         /* See if one size of buffer is over-represented in the buffer cache,
1588            if so reduce the numbers of buffers */
1589         if(maybe_shrink_lav_buffers(size))
1590          {
1591                  int retval;
1592                  retval = try_to_generate_cluster(dev, b[0], size);
1593                  if(retval) return retval;
1594          };
1595         
1596         if (nr_free_pages > min_free_pages*2) 
1597                  return try_to_generate_cluster(dev, b[0], size);
1598         else
1599                  return reassign_cluster(dev, b[0], size);
1600 }
1601 
1602 
1603 /* ===================== Init ======================= */
1604 
1605 /*
1606  * This initializes the initial buffer free list.  nr_buffers_type is set
1607  * to one less the actual number of buffers, as a sop to backwards
1608  * compatibility --- the old code did this (I think unintentionally,
1609  * but I'm not sure), and programs in the ps package expect it.
1610  *                                      - TYT 8/30/92
1611  */
1612 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1613 {
1614         int i;
1615         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1616 
1617         if (high_memory >= 4*1024*1024) {
1618                 if(high_memory >= 16*1024*1024)
1619                          nr_hash = 16381;
1620                 else
1621                          nr_hash = 4093;
1622         } else {
1623                 nr_hash = 997;
1624         };
1625         
1626         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1627                                                      sizeof(struct buffer_head *));
1628 
1629 
1630         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1631                                                      sizeof(struct buffer_head *));
1632         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1633                 buffer_pages[i] = NULL;
1634 
1635         for (i = 0 ; i < nr_hash ; i++)
1636                 hash_table[i] = NULL;
1637         lru_list[BUF_CLEAN] = 0;
1638         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1639         if (!free_list[isize])
1640                 panic("VFS: Unable to initialize buffer free list!");
1641         return;
1642 }
1643 
1644 
1645 /* ====================== bdflush support =================== */
1646 
1647 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1648  * response to dirty buffers.  Once this process is activated, we write back
1649  * a limited number of buffers to the disks and then go back to sleep again.
1650  * In effect this is a process which never leaves kernel mode, and does not have
1651  * any user memory associated with it except for the stack.  There is also
1652  * a kernel stack page, which obviously must be separate from the user stack.
1653  */
1654 struct wait_queue * bdflush_wait = NULL;
1655 struct wait_queue * bdflush_done = NULL;
1656 
1657 static int bdflush_running = 0;
1658 
1659 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1660 {
1661         if(!bdflush_running){
1662                 printk("Warning - bdflush not running\n");
1663                 sync_buffers(0,0);
1664                 return;
1665         };
1666         wake_up(&bdflush_wait);
1667         if(wait) sleep_on(&bdflush_done);
1668 }
1669 
1670 
1671 
1672 /* 
1673  * Here we attempt to write back old buffers.  We also try and flush inodes 
1674  * and supers as well, since this function is essentially "update", and 
1675  * otherwise there would be no way of ensuring that these quantities ever 
1676  * get written back.  Ideally, we would have a timestamp on the inodes
1677  * and superblocks so that we could write back only the old ones as well
1678  */
1679 
1680 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1681 {
1682         int i, isize;
1683         int ndirty, nwritten;
1684         int nlist;
1685         int ncount;
1686         struct buffer_head * bh, *next;
1687 
1688         sync_supers(0);
1689         sync_inodes(0);
1690 
1691         ncount = 0;
1692 #ifdef DEBUG
1693         for(nlist = 0; nlist < NR_LIST; nlist++)
1694 #else
1695         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1696 #endif
1697         {
1698                 ndirty = 0;
1699                 nwritten = 0;
1700         repeat:
1701                 bh = lru_list[nlist];
1702                 if(bh) 
1703                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1704                                  /* We may have stalled while waiting for I/O to complete. */
1705                                  if(bh->b_list != nlist) goto repeat;
1706                                  next = bh->b_next_free;
1707                                  if(!lru_list[nlist]) {
1708                                          printk("Dirty list empty %d\n", i);
1709                                          break;
1710                                  }
1711                                  
1712                                  /* Clean buffer on dirty list?  Refile it */
1713                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1714                                   {
1715                                           refile_buffer(bh);
1716                                           continue;
1717                                   }
1718                                  
1719                                  if (bh->b_lock || !bh->b_dirt)
1720                                           continue;
1721                                  ndirty++;
1722                                  if(bh->b_flushtime > jiffies) continue;
1723                                  nwritten++;
1724                                  bh->b_count++;
1725                                  bh->b_flushtime = 0;
1726 #ifdef DEBUG
1727                                  if(nlist != BUF_DIRTY) ncount++;
1728 #endif
1729                                  ll_rw_block(WRITE, 1, &bh);
1730                                  bh->b_count--;
1731                          }
1732         }
1733 #ifdef DEBUG
1734         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1735         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1736 #endif
1737         
1738         /* We assume that we only come through here on a regular
1739            schedule, like every 5 seconds.  Now update load averages.  
1740            Shift usage counts to prevent overflow. */
1741         for(isize = 0; isize<NR_SIZES; isize++){
1742                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1743                 buffer_usage[isize] = 0;
1744         };
1745         return 0;
1746 }
1747 
1748 
1749 /* This is the interface to bdflush.  As we get more sophisticated, we can
1750  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1751  * invoke this again after you have done this once, you would simply modify 
1752  * the tuning parameters.  We would want to verify each parameter, however,
1753  * to make sure that it is reasonable. */
1754 
1755 asmlinkage int sys_bdflush(int func, long data)
     /* [previous][next][first][last][top][bottom][index][help] */
1756 {
1757         int i, error;
1758         int ndirty;
1759         int nlist;
1760         int ncount;
1761         struct buffer_head * bh, *next;
1762 
1763         if (!suser())
1764                 return -EPERM;
1765 
1766         if (func == 1)
1767                  return sync_old_buffers();
1768 
1769         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1770         if (func >= 2) {
1771                 i = (func-2) >> 1;
1772                 if (i < 0 || i >= N_PARAM)
1773                         return -EINVAL;
1774                 if((func & 1) == 0) {
1775                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1776                         if (error)
1777                                 return error;
1778                         put_fs_long(bdf_prm.data[i], data);
1779                         return 0;
1780                 };
1781                 if (data < bdflush_min[i] || data > bdflush_max[i])
1782                         return -EINVAL;
1783                 bdf_prm.data[i] = data;
1784                 return 0;
1785         };
1786         
1787         if (bdflush_running)
1788                 return -EBUSY; /* Only one copy of this running at one time */
1789         bdflush_running++;
1790         
1791         /* OK, from here on is the daemon */
1792         
1793         for (;;) {
1794 #ifdef DEBUG
1795                 printk("bdflush() activated...");
1796 #endif
1797                 
1798                 ncount = 0;
1799 #ifdef DEBUG
1800                 for(nlist = 0; nlist < NR_LIST; nlist++)
1801 #else
1802                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1803 #endif
1804                  {
1805                          ndirty = 0;
1806                  repeat:
1807                          bh = lru_list[nlist];
1808                          if(bh) 
1809                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1810                                        bh = next) {
1811                                           /* We may have stalled while waiting for I/O to complete. */
1812                                           if(bh->b_list != nlist) goto repeat;
1813                                           next = bh->b_next_free;
1814                                           if(!lru_list[nlist]) {
1815                                                   printk("Dirty list empty %d\n", i);
1816                                                   break;
1817                                           }
1818                                           
1819                                           /* Clean buffer on dirty list?  Refile it */
1820                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1821                                            {
1822                                                    refile_buffer(bh);
1823                                                    continue;
1824                                            }
1825                                           
1826                                           if (bh->b_lock || !bh->b_dirt)
1827                                                    continue;
1828                                           /* Should we write back buffers that are shared or not??
1829                                              currently dirty buffers are not shared, so it does not matter */
1830                                           bh->b_count++;
1831                                           ndirty++;
1832                                           bh->b_flushtime = 0;
1833                                           ll_rw_block(WRITE, 1, &bh);
1834 #ifdef DEBUG
1835                                           if(nlist != BUF_DIRTY) ncount++;
1836 #endif
1837                                           bh->b_count--;
1838                                   }
1839                  }
1840 #ifdef DEBUG
1841                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1842                 printk("sleeping again.\n");
1843 #endif
1844                 wake_up(&bdflush_done);
1845                 
1846                 /* If there are still a lot of dirty buffers around, skip the sleep
1847                    and flush some more */
1848                 
1849                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1850                    bdf_prm.b_un.nfract/100) {
1851                         if (current->signal & (1 << (SIGKILL-1))) {
1852                                 bdflush_running--;
1853                                 return 0;
1854                         }
1855                         current->signal = 0;
1856                         interruptible_sleep_on(&bdflush_wait);
1857                 }
1858         }
1859 }
1860 
1861 
1862 /*
1863  * Overrides for Emacs so that we follow Linus's tabbing style.
1864  * Emacs will notice this stuff at the end of the file and automatically
1865  * adjust the settings for this buffer only.  This must remain at the end
1866  * of the file.
1867  * ---------------------------------------------------------------------------
1868  * Local variables:
1869  * c-indent-level: 8
1870  * c-brace-imaginary-offset: 0
1871  * c-brace-offset: -8
1872  * c-argdecl-indent: 8
1873  * c-label-offset: -8
1874  * c-continued-statement-offset: 8
1875  * c-continued-brace-offset: 0
1876  * End:
1877  */

/* [previous][next][first][last][top][bottom][index][help] */