root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. try_to_align
  32. check_aligned
  33. try_to_load_aligned
  34. try_to_share_buffers
  35. bread_page
  36. grow_buffers
  37. try_to_free
  38. maybe_shrink_lav_buffers
  39. shrink_buffers
  40. shrink_specific_buffers
  41. show_buffers
  42. try_to_reassign
  43. reassign_cluster
  44. try_to_generate_cluster
  45. generate_cluster
  46. buffer_init
  47. wakeup_bdflush
  48. sync_old_buffers
  49. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/sched.h>
  20 #include <linux/kernel.h>
  21 #include <linux/major.h>
  22 #include <linux/string.h>
  23 #include <linux/locks.h>
  24 #include <linux/errno.h>
  25 #include <linux/malloc.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/segment.h>
  29 #include <asm/io.h>
  30 
  31 #define NR_SIZES 4
  32 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  34 
  35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  36 
  37 static int grow_buffers(int pri, int size);
  38 static int shrink_specific_buffers(unsigned int priority, int size);
  39 static int maybe_shrink_lav_buffers(int);
  40 
  41 static int nr_hash = 0;  /* Size of hash table */
  42 static struct buffer_head ** hash_table;
  43 struct buffer_head ** buffer_pages;
  44 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  45 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  46 static struct buffer_head * unused_list = NULL;
  47 static struct wait_queue * buffer_wait = NULL;
  48 
  49 int nr_buffers = 0;
  50 int nr_buffers_type[NR_LIST] = {0,};
  51 int nr_buffers_size[NR_SIZES] = {0,};
  52 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  53 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  54 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  55 int nr_free[NR_SIZES] = {0,};
  56 int buffermem = 0;
  57 int nr_buffer_heads = 0;
  58 extern int *blksize_size[];
  59 
  60 /* Here is the parameter block for the bdflush process. */
  61 static void wakeup_bdflush(int);
  62 
  63 #define N_PARAM 9
  64 #define LAV
  65 
  66 static union bdflush_param{
  67         struct {
  68                 int nfract;  /* Percentage of buffer cache dirty to 
  69                                 activate bdflush */
  70                 int ndirty;  /* Maximum number of dirty blocks to write out per
  71                                 wake-cycle */
  72                 int nrefill; /* Number of clean buffers to try and obtain
  73                                 each time we call refill */
  74                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  75                                   when trying to refill buffers. */
  76                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  77                                     search for free clusters */
  78                 int age_buffer;  /* Time for normal buffer to age before 
  79                                     we flush it */
  80                 int age_super;  /* Time for superblock to age before we 
  81                                    flush it */
  82                 int lav_const;  /* Constant used for load average (time
  83                                    constant */
  84                 int lav_ratio;  /* Used to determine how low a lav for a
  85                                    particular size can go before we start to
  86                                    trim back the buffers */
  87         } b_un;
  88         unsigned int data[N_PARAM];
  89 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  90 
  91 /* The lav constant is set for 1 minute, as long as the update process runs
  92    every 5 seconds.  If you change the frequency of update, the time
  93    constant will also change. */
  94 
  95 
  96 /* These are the min and max parameter values that we will allow to be assigned */
  97 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
  98 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
  99 
 100 /*
 101  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 102  * and getting rid of the cli-sti pairs. The wait-queue routines still
 103  * need cli-sti, but now it's just a couple of 386 instructions or so.
 104  *
 105  * Note that the real wait_on_buffer() is an inline function that checks
 106  * if 'b_wait' is set before calling this, so that the queues aren't set
 107  * up unnecessarily.
 108  */
 109 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111         struct wait_queue wait = { current, NULL };
 112 
 113         bh->b_count++;
 114         add_wait_queue(&bh->b_wait, &wait);
 115 repeat:
 116         current->state = TASK_UNINTERRUPTIBLE;
 117         if (bh->b_lock) {
 118                 schedule();
 119                 goto repeat;
 120         }
 121         remove_wait_queue(&bh->b_wait, &wait);
 122         bh->b_count--;
 123         current->state = TASK_RUNNING;
 124 }
 125 
 126 /* Call sync_buffers with wait!=0 to ensure that the call does not
 127    return until all buffer writes have completed.  Sync() may return
 128    before the writes have finished; fsync() may not. */
 129 
 130 
 131 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 132    spontaneously dirty themselves without ever brelse being called.
 133    We will ultimately want to put these in a separate list, but for
 134    now we search all of the lists for dirty buffers */
 135 
 136 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         int i, retry, pass = 0, err = 0;
 139         int nlist, ncount;
 140         struct buffer_head * bh, *next;
 141 
 142         /* One pass for no-wait, three for wait:
 143            0) write out all dirty, unlocked buffers;
 144            1) write out all dirty buffers, waiting if locked;
 145            2) wait for completion by waiting for all buffers to unlock. */
 146  repeat:
 147         retry = 0;
 148  repeat2:
 149         ncount = 0;
 150         /* We search all lists as a failsafe mechanism, not because we expect
 151            there to be dirty buffers on any of the other lists. */
 152         for(nlist = 0; nlist < NR_LIST; nlist++)
 153          {
 154          repeat1:
 155                  bh = lru_list[nlist];
 156                  if(!bh) continue;
 157                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 158                          if(bh->b_list != nlist) goto repeat1;
 159                          next = bh->b_next_free;
 160                          if(!lru_list[nlist]) break;
 161                          if (dev && bh->b_dev != dev)
 162                                   continue;
 163                          if (bh->b_lock)
 164                           {
 165                                   /* Buffer is locked; skip it unless wait is
 166                                      requested AND pass > 0. */
 167                                   if (!wait || !pass) {
 168                                           retry = 1;
 169                                           continue;
 170                                   }
 171                                   wait_on_buffer (bh);
 172                                   goto repeat2;
 173                           }
 174                          /* If an unlocked buffer is not uptodate, there has
 175                              been an IO error. Skip it. */
 176                          if (wait && bh->b_req && !bh->b_lock &&
 177                              !bh->b_dirt && !bh->b_uptodate) {
 178                                   err = 1;
 179                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 180                                   continue;
 181                           }
 182                          /* Don't write clean buffers.  Don't write ANY buffers
 183                             on the third pass. */
 184                          if (!bh->b_dirt || pass>=2)
 185                                   continue;
 186                          /* don't bother about locked buffers */
 187                          if (bh->b_lock)
 188                                  continue;
 189                          bh->b_count++;
 190                          bh->b_flushtime = 0;
 191                          ll_rw_block(WRITE, 1, &bh);
 192 
 193                          if(nlist != BUF_DIRTY) { 
 194                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 195                                  ncount++;
 196                          };
 197                          bh->b_count--;
 198                          retry = 1;
 199                  }
 200          }
 201         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 202         
 203         /* If we are waiting for the sync to succeed, and if any dirty
 204            blocks were written, then repeat; on the second pass, only
 205            wait for buffers being written (do not pass to write any
 206            more buffers on the second pass). */
 207         if (wait && retry && ++pass<=2)
 208                  goto repeat;
 209         return err;
 210 }
 211 
 212 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         sync_buffers(dev, 0);
 215         sync_supers(dev);
 216         sync_inodes(dev);
 217         sync_buffers(dev, 0);
 218 }
 219 
 220 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         sync_buffers(dev, 0);
 223         sync_supers(dev);
 224         sync_inodes(dev);
 225         return sync_buffers(dev, 1);
 226 }
 227 
 228 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 {
 230         sync_dev(0);
 231         return 0;
 232 }
 233 
 234 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         return fsync_dev(inode->i_dev);
 237 }
 238 
 239 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 240 {
 241         struct file * file;
 242         struct inode * inode;
 243 
 244         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 245                 return -EBADF;
 246         if (!file->f_op || !file->f_op->fsync)
 247                 return -EINVAL;
 248         if (file->f_op->fsync(inode,file))
 249                 return -EIO;
 250         return 0;
 251 }
 252 
 253 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         int i;
 256         int nlist;
 257         struct buffer_head * bh;
 258 
 259         for(nlist = 0; nlist < NR_LIST; nlist++) {
 260                 bh = lru_list[nlist];
 261                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 262                      bh = bh->b_next_free) {
 263                         if (bh->b_dev != dev)
 264                                  continue;
 265                         wait_on_buffer(bh);
 266                         if (bh->b_dev == dev)
 267                                  bh->b_flushtime = bh->b_uptodate = 
 268                                           bh->b_dirt = bh->b_req = 0;
 269                 }
 270         }
 271 }
 272 
 273 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 274 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 275 
 276 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 277 {
 278         if (bh->b_next)
 279                 bh->b_next->b_prev = bh->b_prev;
 280         if (bh->b_prev)
 281                 bh->b_prev->b_next = bh->b_next;
 282         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 283                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 284         bh->b_next = bh->b_prev = NULL;
 285 }
 286 
 287 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 {
 289         if (!(bh->b_prev_free) || !(bh->b_next_free))
 290                 panic("VFS: LRU block list corrupted");
 291         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 292         bh->b_prev_free->b_next_free = bh->b_next_free;
 293         bh->b_next_free->b_prev_free = bh->b_prev_free;
 294 
 295         if (lru_list[bh->b_list] == bh)
 296                  lru_list[bh->b_list] = bh->b_next_free;
 297         if(lru_list[bh->b_list] == bh)
 298                  lru_list[bh->b_list] = NULL;
 299         bh->b_next_free = bh->b_prev_free = NULL;
 300 }
 301 
 302 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 303 {
 304         int isize = BUFSIZE_INDEX(bh->b_size);
 305         if (!(bh->b_prev_free) || !(bh->b_next_free))
 306                 panic("VFS: Free block list corrupted");
 307         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 308         if(!free_list[isize])
 309                  panic("Free list empty");
 310         nr_free[isize]--;
 311         if(bh->b_next_free == bh)
 312                  free_list[isize] = NULL;
 313         else {
 314                 bh->b_prev_free->b_next_free = bh->b_next_free;
 315                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 316                 if (free_list[isize] == bh)
 317                          free_list[isize] = bh->b_next_free;
 318         };
 319         bh->b_next_free = bh->b_prev_free = NULL;
 320 }
 321 
 322 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 323 {
 324         if(bh->b_dev == 0xffff) {
 325                 remove_from_free_list(bh); /* Free list entries should not be
 326                                               in the hash queue */
 327                 return;
 328         };
 329         nr_buffers_type[bh->b_list]--;
 330         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 331         remove_from_hash_queue(bh);
 332         remove_from_lru_list(bh);
 333 }
 334 
 335 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 336 {
 337         if (!bh)
 338                 return;
 339         if (bh == lru_list[bh->b_list]) {
 340                 lru_list[bh->b_list] = bh->b_next_free;
 341                 return;
 342         }
 343         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 344         remove_from_lru_list(bh);
 345 /* add to back of free list */
 346 
 347         if(!lru_list[bh->b_list]) {
 348                 lru_list[bh->b_list] = bh;
 349                 lru_list[bh->b_list]->b_prev_free = bh;
 350         };
 351 
 352         bh->b_next_free = lru_list[bh->b_list];
 353         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 354         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 355         lru_list[bh->b_list]->b_prev_free = bh;
 356 }
 357 
 358 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 359 {
 360         int isize;
 361         if (!bh)
 362                 return;
 363 
 364         isize = BUFSIZE_INDEX(bh->b_size);      
 365         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 366 /* add to back of free list */
 367 
 368         if(!free_list[isize]) {
 369                 free_list[isize] = bh;
 370                 bh->b_prev_free = bh;
 371         };
 372 
 373         nr_free[isize]++;
 374         bh->b_next_free = free_list[isize];
 375         bh->b_prev_free = free_list[isize]->b_prev_free;
 376         free_list[isize]->b_prev_free->b_next_free = bh;
 377         free_list[isize]->b_prev_free = bh;
 378 }
 379 
 380 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382 /* put at end of free list */
 383 
 384         if(bh->b_dev == 0xffff) {
 385                 put_last_free(bh);
 386                 return;
 387         };
 388         if(!lru_list[bh->b_list]) {
 389                 lru_list[bh->b_list] = bh;
 390                 bh->b_prev_free = bh;
 391         };
 392         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 393         bh->b_next_free = lru_list[bh->b_list];
 394         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 395         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 396         lru_list[bh->b_list]->b_prev_free = bh;
 397         nr_buffers_type[bh->b_list]++;
 398         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 399 /* put the buffer in new hash-queue if it has a device */
 400         bh->b_prev = NULL;
 401         bh->b_next = NULL;
 402         if (!bh->b_dev)
 403                 return;
 404         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 405         hash(bh->b_dev,bh->b_blocknr) = bh;
 406         if (bh->b_next)
 407                 bh->b_next->b_prev = bh;
 408 }
 409 
 410 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 411 {               
 412         struct buffer_head * tmp;
 413 
 414         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 415                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 416                         if (tmp->b_size == size)
 417                                 return tmp;
 418                         else {
 419                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 420                                                         MAJOR(dev), MINOR(dev));
 421                                 return NULL;
 422                         }
 423         return NULL;
 424 }
 425 
 426 /*
 427  * Why like this, I hear you say... The reason is race-conditions.
 428  * As we don't lock buffers (unless we are reading them, that is),
 429  * something might happen to it while we sleep (ie a read-error
 430  * will force it bad). This shouldn't really happen currently, but
 431  * the code is ready.
 432  */
 433 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 434 {
 435         struct buffer_head * bh;
 436 
 437         for (;;) {
 438                 if (!(bh=find_buffer(dev,block,size)))
 439                         return NULL;
 440                 bh->b_count++;
 441                 wait_on_buffer(bh);
 442                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 443                         return bh;
 444                 bh->b_count--;
 445         }
 446 }
 447 
 448 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         int i, nlist;
 451         struct buffer_head * bh, *bhnext;
 452 
 453         if (!blksize_size[MAJOR(dev)])
 454                 return;
 455 
 456         switch(size) {
 457                 default: panic("Invalid blocksize passed to set_blocksize");
 458                 case 512: case 1024: case 2048: case 4096:;
 459         }
 460 
 461         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 462                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 463                 return;
 464         }
 465         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 466                 return;
 467         sync_buffers(dev, 2);
 468         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 469 
 470   /* We need to be quite careful how we do this - we are moving entries
 471      around on the free list, and we can get in a loop if we are not careful.*/
 472 
 473         for(nlist = 0; nlist < NR_LIST; nlist++) {
 474                 bh = lru_list[nlist];
 475                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 476                         if(!bh) break;
 477                         bhnext = bh->b_next_free; 
 478                         if (bh->b_dev != dev)
 479                                  continue;
 480                         if (bh->b_size == size)
 481                                  continue;
 482                         
 483                         wait_on_buffer(bh);
 484                         if (bh->b_dev == dev && bh->b_size != size) {
 485                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 486                                          bh->b_flushtime = 0;
 487                         };
 488                         remove_from_hash_queue(bh);
 489                 }
 490         }
 491 }
 492 
 493 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 494 
 495 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 496 {
 497         struct buffer_head * bh, * tmp;
 498         struct buffer_head * candidate[NR_LIST];
 499         unsigned int best_time, winner;
 500         int isize = BUFSIZE_INDEX(size);
 501         int buffers[NR_LIST];
 502         int i;
 503         int needed;
 504 
 505         /* First see if we even need this.  Sometimes it is advantageous
 506          to request some blocks in a filesystem that we know that we will
 507          be needing ahead of time. */
 508 
 509         if (nr_free[isize] > 100)
 510                 return;
 511 
 512         /* If there are too many dirty buffers, we wake up the update process
 513            now so as to ensure that there are still clean buffers available
 514            for user processes to use (and dirty) */
 515         
 516         /* We are going to try and locate this much memory */
 517         needed =bdf_prm.b_un.nrefill * size;  
 518 
 519         while (nr_free_pages > min_free_pages*2 && needed > 0 &&
 520                grow_buffers(GFP_BUFFER, size)) {
 521                 needed -= PAGE_SIZE;
 522         }
 523 
 524         if(needed <= 0) return;
 525 
 526         /* See if there are too many buffers of a different size.
 527            If so, victimize them */
 528 
 529         while(maybe_shrink_lav_buffers(size))
 530          {
 531                  if(!grow_buffers(GFP_BUFFER, size)) break;
 532                  needed -= PAGE_SIZE;
 533                  if(needed <= 0) return;
 534          };
 535 
 536         /* OK, we cannot grow the buffer cache, now try and get some
 537            from the lru list */
 538 
 539         /* First set the candidate pointers to usable buffers.  This
 540            should be quick nearly all of the time. */
 541 
 542 repeat0:
 543         for(i=0; i<NR_LIST; i++){
 544                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 545                    nr_buffers_type[i] == 0) {
 546                         candidate[i] = NULL;
 547                         buffers[i] = 0;
 548                         continue;
 549                 }
 550                 buffers[i] = nr_buffers_type[i];
 551                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 552                  {
 553                          if(buffers[i] < 0) panic("Here is the problem");
 554                          tmp = bh->b_next_free;
 555                          if (!bh) break;
 556                          
 557                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 558                              bh->b_dirt) {
 559                                  refile_buffer(bh);
 560                                  continue;
 561                          };
 562                          
 563                          if (bh->b_count || bh->b_size != size)
 564                                   continue;
 565                          
 566                          /* Buffers are written in the order they are placed 
 567                             on the locked list. If we encounter a locked
 568                             buffer here, this means that the rest of them
 569                             are also locked */
 570                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 571                                  buffers[i] = 0;
 572                                  break;
 573                          }
 574                          
 575                          if (BADNESS(bh)) continue;
 576                          break;
 577                  };
 578                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 579                 else candidate[i] = bh;
 580                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 581         }
 582         
 583  repeat:
 584         if(needed <= 0) return;
 585         
 586         /* Now see which candidate wins the election */
 587         
 588         winner = best_time = UINT_MAX;  
 589         for(i=0; i<NR_LIST; i++){
 590                 if(!candidate[i]) continue;
 591                 if(candidate[i]->b_lru_time < best_time){
 592                         best_time = candidate[i]->b_lru_time;
 593                         winner = i;
 594                 }
 595         }
 596         
 597         /* If we have a winner, use it, and then get a new candidate from that list */
 598         if(winner != UINT_MAX) {
 599                 i = winner;
 600                 bh = candidate[i];
 601                 candidate[i] = bh->b_next_free;
 602                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 603                 if (bh->b_count || bh->b_size != size)
 604                          panic("Busy buffer in candidate list\n");
 605                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 606                          panic("Shared buffer in candidate list\n");
 607                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 608                 
 609                 if(bh->b_dev == 0xffff) panic("Wrong list");
 610                 remove_from_queues(bh);
 611                 bh->b_dev = 0xffff;
 612                 put_last_free(bh);
 613                 needed -= bh->b_size;
 614                 buffers[i]--;
 615                 if(buffers[i] < 0) panic("Here is the problem");
 616                 
 617                 if(buffers[i] == 0) candidate[i] = NULL;
 618                 
 619                 /* Now all we need to do is advance the candidate pointer
 620                    from the winner list to the next usable buffer */
 621                 if(candidate[i] && buffers[i] > 0){
 622                         if(buffers[i] <= 0) panic("Here is another problem");
 623                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 624                                 if(buffers[i] < 0) panic("Here is the problem");
 625                                 tmp = bh->b_next_free;
 626                                 if (!bh) break;
 627                                 
 628                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 629                                     bh->b_dirt) {
 630                                         refile_buffer(bh);
 631                                         continue;
 632                                 };
 633                                 
 634                                 if (bh->b_count || bh->b_size != size)
 635                                          continue;
 636                                 
 637                                 /* Buffers are written in the order they are
 638                                    placed on the locked list.  If we encounter
 639                                    a locked buffer here, this means that the
 640                                    rest of them are also locked */
 641                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 642                                         buffers[i] = 0;
 643                                         break;
 644                                 }
 645               
 646                                 if (BADNESS(bh)) continue;
 647                                 break;
 648                         };
 649                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 650                         else candidate[i] = bh;
 651                         if(candidate[i] && candidate[i]->b_count) 
 652                                  panic("Here is the problem");
 653                 }
 654                 
 655                 goto repeat;
 656         }
 657         
 658         if(needed <= 0) return;
 659         
 660         /* Too bad, that was not enough. Try a little harder to grow some. */
 661         
 662         if (nr_free_pages > 5) {
 663                 if (grow_buffers(GFP_BUFFER, size)) {
 664                         needed -= PAGE_SIZE;
 665                         goto repeat0;
 666                 };
 667         }
 668         
 669         /* and repeat until we find something good */
 670         if (!grow_buffers(GFP_ATOMIC, size))
 671                 wakeup_bdflush(1);
 672         needed -= PAGE_SIZE;
 673         goto repeat0;
 674 }
 675 
 676 /*
 677  * Ok, this is getblk, and it isn't very clear, again to hinder
 678  * race-conditions. Most of the code is seldom used, (ie repeating),
 679  * so it should be much more efficient than it looks.
 680  *
 681  * The algorithm is changed: hopefully better, and an elusive bug removed.
 682  *
 683  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 684  * when the filesystem starts to get full of dirty blocks (I hope).
 685  */
 686 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 687 {
 688         struct buffer_head * bh;
 689         int isize = BUFSIZE_INDEX(size);
 690 
 691         /* Update this for the buffer size lav. */
 692         buffer_usage[isize]++;
 693 
 694         /* If there are too many dirty buffers, we wake up the update process
 695            now so as to ensure that there are still clean buffers available
 696            for user processes to use (and dirty) */
 697 repeat:
 698         bh = get_hash_table(dev, block, size);
 699         if (bh) {
 700                 if (bh->b_uptodate && !bh->b_dirt)
 701                          put_last_lru(bh);
 702                 if(!bh->b_dirt) bh->b_flushtime = 0;
 703                 return bh;
 704         }
 705 
 706         while(!free_list[isize]) refill_freelist(size);
 707         
 708         if (find_buffer(dev,block,size))
 709                  goto repeat;
 710 
 711         bh = free_list[isize];
 712         remove_from_free_list(bh);
 713 
 714 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 715 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 716         bh->b_count=1;
 717         bh->b_dirt=0;
 718         bh->b_lock=0;
 719         bh->b_uptodate=0;
 720         bh->b_flushtime = 0;
 721         bh->b_req=0;
 722         bh->b_dev=dev;
 723         bh->b_blocknr=block;
 724         insert_into_queues(bh);
 725         return bh;
 726 }
 727 
 728 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 729 {
 730         int newtime;
 731 
 732         if (buf->b_dirt){
 733                 /* Move buffer to dirty list if jiffies is clear */
 734                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 735                                      bdf_prm.b_un.age_buffer);
 736                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 737                          buf->b_flushtime = newtime;
 738         } else {
 739                 buf->b_flushtime = 0;
 740         }
 741 }
 742 
 743 
 744 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 745                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 746 
 747 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 748         int i, dispose;
 749         i = 0;
 750         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 751         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 752         if(buf->b_lock) i |= 2;
 753         if(buf->b_dirt) i |= 4;
 754         dispose = buffer_disposition[i];
 755         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 756                  dispose = BUF_UNSHARED;
 757         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 758         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 759         if(dispose != buf->b_list)  {
 760                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 761                          buf->b_lru_time = jiffies;
 762                 if(dispose == BUF_LOCKED && 
 763                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 764                          dispose = BUF_LOCKED1;
 765                 remove_from_queues(buf);
 766                 buf->b_list = dispose;
 767                 insert_into_queues(buf);
 768                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 769                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 770                    bdf_prm.b_un.nfract/100)
 771                          wakeup_bdflush(0);
 772         }
 773 }
 774 
 775 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 776 {
 777         if (!buf)
 778                 return;
 779         wait_on_buffer(buf);
 780 
 781         /* If dirty, mark the time this buffer should be written back */
 782         set_writetime(buf, 0);
 783         refile_buffer(buf);
 784 
 785         if (buf->b_count) {
 786                 if (--buf->b_count)
 787                         return;
 788                 wake_up(&buffer_wait);
 789                 return;
 790         }
 791         printk("VFS: brelse: Trying to free free buffer\n");
 792 }
 793 
 794 /*
 795  * bread() reads a specified block and returns the buffer that contains
 796  * it. It returns NULL if the block was unreadable.
 797  */
 798 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 799 {
 800         struct buffer_head * bh;
 801 
 802         if (!(bh = getblk(dev, block, size))) {
 803                 printk("VFS: bread: READ error on device %d/%d\n",
 804                                                 MAJOR(dev), MINOR(dev));
 805                 return NULL;
 806         }
 807         if (bh->b_uptodate)
 808                 return bh;
 809         ll_rw_block(READ, 1, &bh);
 810         wait_on_buffer(bh);
 811         if (bh->b_uptodate)
 812                 return bh;
 813         brelse(bh);
 814         return NULL;
 815 }
 816 
 817 /*
 818  * Ok, breada can be used as bread, but additionally to mark other
 819  * blocks for reading as well. End the argument list with a negative
 820  * number.
 821  */
 822 
 823 #define NBUF 16
 824 
 825 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 826         unsigned int pos, unsigned int filesize)
 827 {
 828         struct buffer_head * bhlist[NBUF];
 829         unsigned int blocks;
 830         struct buffer_head * bh;
 831         int index;
 832         int i, j;
 833 
 834         if (pos >= filesize)
 835                 return NULL;
 836 
 837         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 838                 return NULL;
 839 
 840         index = BUFSIZE_INDEX(bh->b_size);
 841 
 842         if (bh->b_uptodate)
 843                 return bh;
 844 
 845         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 846 
 847         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 848                 blocks = read_ahead[MAJOR(dev)] >> index;
 849         if (blocks > NBUF)
 850                 blocks = NBUF;
 851         
 852         bhlist[0] = bh;
 853         j = 1;
 854         for(i=1; i<blocks; i++) {
 855                 bh = getblk(dev,block+i,bufsize);
 856                 if (bh->b_uptodate) {
 857                         brelse(bh);
 858                         break;
 859                 }
 860                 bhlist[j++] = bh;
 861         }
 862 
 863         /* Request the read for these buffers, and then release them */
 864         ll_rw_block(READ, j, bhlist);
 865 
 866         for(i=1; i<j; i++)
 867                 brelse(bhlist[i]);
 868 
 869         /* Wait for this buffer, and then continue on */
 870         bh = bhlist[0];
 871         wait_on_buffer(bh);
 872         if (bh->b_uptodate)
 873                 return bh;
 874         brelse(bh);
 875         return NULL;
 876 }
 877 
 878 /*
 879  * See fs/inode.c for the weird use of volatile..
 880  */
 881 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 882 {
 883         struct wait_queue * wait;
 884 
 885         wait = ((volatile struct buffer_head *) bh)->b_wait;
 886         memset(bh,0,sizeof(*bh));
 887         ((volatile struct buffer_head *) bh)->b_wait = wait;
 888         bh->b_next_free = unused_list;
 889         unused_list = bh;
 890 }
 891 
 892 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 893 {
 894         int i;
 895         struct buffer_head * bh;
 896 
 897         if (unused_list)
 898                 return;
 899 
 900         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 901                 return;
 902 
 903         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 904                 bh->b_next_free = unused_list;  /* only make link */
 905                 unused_list = bh++;
 906         }
 907 }
 908 
 909 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 910 {
 911         struct buffer_head * bh;
 912 
 913         get_more_buffer_heads();
 914         if (!unused_list)
 915                 return NULL;
 916         bh = unused_list;
 917         unused_list = bh->b_next_free;
 918         bh->b_next_free = NULL;
 919         bh->b_data = NULL;
 920         bh->b_size = 0;
 921         bh->b_req = 0;
 922         return bh;
 923 }
 924 
 925 /*
 926  * Create the appropriate buffers when given a page for data area and
 927  * the size of each buffer.. Use the bh->b_this_page linked list to
 928  * follow the buffers created.  Return NULL if unable to create more
 929  * buffers.
 930  */
 931 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 932 {
 933         struct buffer_head *bh, *head;
 934         unsigned long offset;
 935 
 936         head = NULL;
 937         offset = PAGE_SIZE;
 938         while ((offset -= size) < PAGE_SIZE) {
 939                 bh = get_unused_buffer_head();
 940                 if (!bh)
 941                         goto no_grow;
 942                 bh->b_this_page = head;
 943                 head = bh;
 944                 bh->b_data = (char *) (page+offset);
 945                 bh->b_size = size;
 946                 bh->b_dev = 0xffff;  /* Flag as unused */
 947         }
 948         return head;
 949 /*
 950  * In case anything failed, we just free everything we got.
 951  */
 952 no_grow:
 953         bh = head;
 954         while (bh) {
 955                 head = bh;
 956                 bh = bh->b_this_page;
 957                 put_unused_buffer_head(head);
 958         }
 959         return NULL;
 960 }
 961 
 962 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 963 {
 964         int i;
 965         int bhnum = 0;
 966         struct buffer_head * bhr[8];
 967 
 968         for (i = 0 ; i < nrbuf ; i++) {
 969                 if (bh[i] && !bh[i]->b_uptodate)
 970                         bhr[bhnum++] = bh[i];
 971         }
 972         if (bhnum)
 973                 ll_rw_block(READ, bhnum, bhr);
 974         for (i = 0 ; i < nrbuf ; i++) {
 975                 if (bh[i]) {
 976                         wait_on_buffer(bh[i]);
 977                 }
 978         }
 979 }
 980 
 981 /*
 982  * This actually gets enough info to try to align the stuff,
 983  * but we don't bother yet.. We'll have to check that nobody
 984  * else uses the buffers etc.
 985  *
 986  * "address" points to the new page we can use to move things
 987  * around..
 988  */
 989 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
     /* [previous][next][first][last][top][bottom][index][help] */
 990         unsigned long address)
 991 {
 992         while (nrbuf-- > 0)
 993                 brelse(bh[nrbuf]);
 994         return 0;
 995 }
 996 
 997 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 998         dev_t dev, int *b, int size)
 999 {
1000         struct buffer_head * bh[8];
1001         unsigned long page;
1002         unsigned long offset;
1003         int block;
1004         int nrbuf;
1005         int aligned = 1;
1006 
1007         bh[0] = first;
1008         nrbuf = 1;
1009         page = (unsigned long) first->b_data;
1010         if (page & ~PAGE_MASK)
1011                 aligned = 0;
1012         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1013                 block = *++b;
1014                 if (!block)
1015                         goto no_go;
1016                 first = get_hash_table(dev, block, size);
1017                 if (!first)
1018                         goto no_go;
1019                 bh[nrbuf++] = first;
1020                 if (page+offset != (unsigned long) first->b_data)
1021                         aligned = 0;
1022         }
1023         if (!aligned)
1024                 return try_to_align(bh, nrbuf, address);
1025         mem_map[MAP_NR(page)]++;
1026         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1027         while (nrbuf-- > 0)
1028                 brelse(bh[nrbuf]);
1029         free_page(address);
1030         ++current->mm->min_flt;
1031         return page;
1032 no_go:
1033         while (nrbuf-- > 0)
1034                 brelse(bh[nrbuf]);
1035         return 0;
1036 }
1037 
1038 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1039         dev_t dev, int b[], int size)
1040 {
1041         struct buffer_head * bh, * tmp, * arr[8];
1042         unsigned long offset;
1043         int isize = BUFSIZE_INDEX(size);
1044         int * p;
1045         int block;
1046 
1047         bh = create_buffers(address, size);
1048         if (!bh)
1049                 return 0;
1050         /* do any of the buffers already exist? punt if so.. */
1051         p = b;
1052         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1053                 block = *(p++);
1054                 if (!block)
1055                         goto not_aligned;
1056                 if (find_buffer(dev, block, size))
1057                         goto not_aligned;
1058         }
1059         tmp = bh;
1060         p = b;
1061         block = 0;
1062         while (1) {
1063                 arr[block++] = bh;
1064                 bh->b_count = 1;
1065                 bh->b_dirt = 0;
1066                 bh->b_flushtime = 0;
1067                 bh->b_uptodate = 0;
1068                 bh->b_req = 0;
1069                 bh->b_dev = dev;
1070                 bh->b_blocknr = *(p++);
1071                 bh->b_list = BUF_CLEAN;
1072                 nr_buffers++;
1073                 nr_buffers_size[isize]++;
1074                 insert_into_queues(bh);
1075                 if (bh->b_this_page)
1076                         bh = bh->b_this_page;
1077                 else
1078                         break;
1079         }
1080         buffermem += PAGE_SIZE;
1081         bh->b_this_page = tmp;
1082         mem_map[MAP_NR(address)]++;
1083         buffer_pages[MAP_NR(address)] = bh;
1084         read_buffers(arr,block);
1085         while (block-- > 0)
1086                 brelse(arr[block]);
1087         ++current->mm->maj_flt;
1088         return address;
1089 not_aligned:
1090         while ((tmp = bh) != NULL) {
1091                 bh = bh->b_this_page;
1092                 put_unused_buffer_head(tmp);
1093         }
1094         return 0;
1095 }
1096 
1097 /*
1098  * Try-to-share-buffers tries to minimize memory use by trying to keep
1099  * both code pages and the buffer area in the same page. This is done by
1100  * (a) checking if the buffers are already aligned correctly in memory and
1101  * (b) if none of the buffer heads are in memory at all, trying to load
1102  * them into memory the way we want them.
1103  *
1104  * This doesn't guarantee that the memory is shared, but should under most
1105  * circumstances work very well indeed (ie >90% sharing of code pages on
1106  * demand-loadable executables).
1107  */
1108 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1109         dev_t dev, int *b, int size)
1110 {
1111         struct buffer_head * bh;
1112         int block;
1113 
1114         block = b[0];
1115         if (!block)
1116                 return 0;
1117         bh = get_hash_table(dev, block, size);
1118         if (bh)
1119                 return check_aligned(bh, address, dev, b, size);
1120         return try_to_load_aligned(address, dev, b, size);
1121 }
1122 
1123 /*
1124  * bread_page reads four buffers into memory at the desired address. It's
1125  * a function of its own, as there is some speed to be got by reading them
1126  * all at the same time, not waiting for one to be read, and then another
1127  * etc. This also allows us to optimize memory usage by sharing code pages
1128  * and filesystem buffers..
1129  */
1130 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1131 {
1132         struct buffer_head * bh[8];
1133         unsigned long where;
1134         int i, j;
1135 
1136         if (!no_share) {
1137                 where = try_to_share_buffers(address, dev, b, size);
1138                 if (where)
1139                         return where;
1140         }
1141         ++current->mm->maj_flt;
1142         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1143                 bh[i] = NULL;
1144                 if (b[i])
1145                         bh[i] = getblk(dev, b[i], size);
1146         }
1147         read_buffers(bh,i);
1148         where = address;
1149         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1150                 if (bh[i]) {
1151                         if (bh[i]->b_uptodate)
1152                                 memcpy((void *) where, bh[i]->b_data, size);
1153                         brelse(bh[i]);
1154                 }
1155         }
1156         return address;
1157 }
1158 
1159 /*
1160  * Try to increase the number of buffers available: the size argument
1161  * is used to determine what kind of buffers we want.
1162  */
1163 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1164 {
1165         unsigned long page;
1166         struct buffer_head *bh, *tmp;
1167         struct buffer_head * insert_point;
1168         int isize;
1169 
1170         if ((size & 511) || (size > PAGE_SIZE)) {
1171                 printk("VFS: grow_buffers: size = %d\n",size);
1172                 return 0;
1173         }
1174 
1175         isize = BUFSIZE_INDEX(size);
1176 
1177         if (!(page = __get_free_page(pri)))
1178                 return 0;
1179         bh = create_buffers(page, size);
1180         if (!bh) {
1181                 free_page(page);
1182                 return 0;
1183         }
1184 
1185         insert_point = free_list[isize];
1186 
1187         tmp = bh;
1188         while (1) {
1189                 nr_free[isize]++;
1190                 if (insert_point) {
1191                         tmp->b_next_free = insert_point->b_next_free;
1192                         tmp->b_prev_free = insert_point;
1193                         insert_point->b_next_free->b_prev_free = tmp;
1194                         insert_point->b_next_free = tmp;
1195                 } else {
1196                         tmp->b_prev_free = tmp;
1197                         tmp->b_next_free = tmp;
1198                 }
1199                 insert_point = tmp;
1200                 ++nr_buffers;
1201                 if (tmp->b_this_page)
1202                         tmp = tmp->b_this_page;
1203                 else
1204                         break;
1205         }
1206         free_list[isize] = bh;
1207         buffer_pages[MAP_NR(page)] = bh;
1208         tmp->b_this_page = bh;
1209         wake_up(&buffer_wait);
1210         buffermem += PAGE_SIZE;
1211         return 1;
1212 }
1213 
1214 /*
1215  * try_to_free() checks if all the buffers on this particular page
1216  * are unused, and free's the page if so.
1217  */
1218 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1219 {
1220         unsigned long page;
1221         struct buffer_head * tmp, * p;
1222         int isize = BUFSIZE_INDEX(bh->b_size);
1223 
1224         *bhp = bh;
1225         page = (unsigned long) bh->b_data;
1226         page &= PAGE_MASK;
1227         tmp = bh;
1228         do {
1229                 if (!tmp)
1230                         return 0;
1231                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1232                         return 0;
1233                 tmp = tmp->b_this_page;
1234         } while (tmp != bh);
1235         tmp = bh;
1236         do {
1237                 p = tmp;
1238                 tmp = tmp->b_this_page;
1239                 nr_buffers--;
1240                 nr_buffers_size[isize]--;
1241                 if (p == *bhp)
1242                   {
1243                     *bhp = p->b_prev_free;
1244                     if (p == *bhp) /* Was this the last in the list? */
1245                       *bhp = NULL;
1246                   }
1247                 remove_from_queues(p);
1248                 put_unused_buffer_head(p);
1249         } while (tmp != bh);
1250         buffermem -= PAGE_SIZE;
1251         buffer_pages[MAP_NR(page)] = NULL;
1252         free_page(page);
1253         return !mem_map[MAP_NR(page)];
1254 }
1255 
1256 
1257 /*
1258  * Consult the load average for buffers and decide whether or not
1259  * we should shrink the buffers of one size or not.  If we decide yes,
1260  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1261  * that is specified.
1262  *
1263  * I would prefer not to use a load average, but the way things are now it
1264  * seems unavoidable.  The way to get rid of it would be to force clustering
1265  * universally, so that when we reclaim buffers we always reclaim an entire
1266  * page.  Doing this would mean that we all need to move towards QMAGIC.
1267  */
1268 
1269 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1270 {          
1271         int nlist;
1272         int isize;
1273         int total_lav, total_n_buffers, n_sizes;
1274         
1275         /* Do not consider the shared buffers since they would not tend
1276            to have getblk called very often, and this would throw off
1277            the lav.  They are not easily reclaimable anyway (let the swapper
1278            make the first move). */
1279   
1280         total_lav = total_n_buffers = n_sizes = 0;
1281         for(nlist = 0; nlist < NR_SIZES; nlist++)
1282          {
1283                  total_lav += buffers_lav[nlist];
1284                  if(nr_buffers_size[nlist]) n_sizes++;
1285                  total_n_buffers += nr_buffers_size[nlist];
1286                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1287          }
1288         
1289         /* See if we have an excessive number of buffers of a particular
1290            size - if so, victimize that bunch. */
1291   
1292         isize = (size ? BUFSIZE_INDEX(size) : -1);
1293         
1294         if (n_sizes > 1)
1295                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1296                   {
1297                           if(nlist == isize) continue;
1298                           if(nr_buffers_size[nlist] &&
1299                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1300                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1301                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1302                                             return 1;
1303                   }
1304         return 0;
1305 }
1306 /*
1307  * Try to free up some pages by shrinking the buffer-cache
1308  *
1309  * Priority tells the routine how hard to try to shrink the
1310  * buffers: 3 means "don't bother too much", while a value
1311  * of 0 means "we'd better get some free pages now".
1312  */
1313 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1314 {
1315         if (priority < 2) {
1316                 sync_buffers(0,0);
1317         }
1318 
1319         if(priority == 2) wakeup_bdflush(1);
1320 
1321         if(maybe_shrink_lav_buffers(0)) return 1;
1322 
1323         /* No good candidate size - take any size we can find */
1324         return shrink_specific_buffers(priority, 0);
1325 }
1326 
1327 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1328 {
1329         struct buffer_head *bh;
1330         int nlist;
1331         int i, isize, isize1;
1332 
1333 #ifdef DEBUG
1334         if(size) printk("Shrinking buffers of size %d\n", size);
1335 #endif
1336         /* First try the free lists, and see if we can get a complete page
1337            from here */
1338         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1339 
1340         for(isize = 0; isize<NR_SIZES; isize++){
1341                 if(isize1 != -1 && isize1 != isize) continue;
1342                 bh = free_list[isize];
1343                 if(!bh) continue;
1344                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1345                         if (bh->b_count || !bh->b_this_page)
1346                                  continue;
1347                         if (try_to_free(bh, &bh))
1348                                  return 1;
1349                         if(!bh) break; /* Some interrupt must have used it after we
1350                                           freed the page.  No big deal - keep looking */
1351                 }
1352         }
1353         
1354         /* Not enough in the free lists, now try the lru list */
1355         
1356         for(nlist = 0; nlist < NR_LIST; nlist++) {
1357         repeat1:
1358                 if(priority > 3 && nlist == BUF_SHARED) continue;
1359                 bh = lru_list[nlist];
1360                 if(!bh) continue;
1361                 i = nr_buffers_type[nlist] >> priority;
1362                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1363                         /* We may have stalled while waiting for I/O to complete. */
1364                         if(bh->b_list != nlist) goto repeat1;
1365                         if (bh->b_count || !bh->b_this_page)
1366                                  continue;
1367                         if(size && bh->b_size != size) continue;
1368                         if (bh->b_lock)
1369                                  if (priority)
1370                                           continue;
1371                                  else
1372                                           wait_on_buffer(bh);
1373                         if (bh->b_dirt) {
1374                                 bh->b_count++;
1375                                 bh->b_flushtime = 0;
1376                                 ll_rw_block(WRITEA, 1, &bh);
1377                                 bh->b_count--;
1378                                 continue;
1379                         }
1380                         if (try_to_free(bh, &bh))
1381                                  return 1;
1382                         if(!bh) break;
1383                 }
1384         }
1385         return 0;
1386 }
1387 
1388 
1389 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1390 {
1391         struct buffer_head * bh;
1392         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1393         int shared;
1394         int nlist, isize;
1395 
1396         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1397         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1398         printk("Buffer blocks:   %6d\n",nr_buffers);
1399 
1400         for(nlist = 0; nlist < NR_LIST; nlist++) {
1401           shared = found = locked = dirty = used = lastused = 0;
1402           bh = lru_list[nlist];
1403           if(!bh) continue;
1404           do {
1405                 found++;
1406                 if (bh->b_lock)
1407                         locked++;
1408                 if (bh->b_dirt)
1409                         dirty++;
1410                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1411                 if (bh->b_count)
1412                         used++, lastused = found;
1413                 bh = bh->b_next_free;
1414               } while (bh != lru_list[nlist]);
1415         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1416                 nlist, found, used, lastused, locked, dirty, shared);
1417         };
1418         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1419         for(isize = 0; isize<NR_SIZES; isize++){
1420                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1421                        buffers_lav[isize], nr_free[isize]);
1422                 for(nlist = 0; nlist < NR_LIST; nlist++)
1423                          printk("%7d ", nr_buffers_st[isize][nlist]);
1424                 printk("\n");
1425         }
1426 }
1427 
1428 /*
1429  * try_to_reassign() checks if all the buffers on this particular page
1430  * are unused, and reassign to a new cluster them if this is true.
1431  */
1432 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1433                            dev_t dev, unsigned int starting_block)
1434 {
1435         unsigned long page;
1436         struct buffer_head * tmp, * p;
1437 
1438         *bhp = bh;
1439         page = (unsigned long) bh->b_data;
1440         page &= PAGE_MASK;
1441         if(mem_map[MAP_NR(page)] != 1) return 0;
1442         tmp = bh;
1443         do {
1444                 if (!tmp)
1445                          return 0;
1446                 
1447                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1448                          return 0;
1449                 tmp = tmp->b_this_page;
1450         } while (tmp != bh);
1451         tmp = bh;
1452         
1453         while((unsigned long) tmp->b_data & (PAGE_SIZE - 1)) 
1454                  tmp = tmp->b_this_page;
1455         
1456         /* This is the buffer at the head of the page */
1457         bh = tmp;
1458         do {
1459                 p = tmp;
1460                 tmp = tmp->b_this_page;
1461                 remove_from_queues(p);
1462                 p->b_dev=dev;
1463                 p->b_uptodate = 0;
1464                 p->b_req = 0;
1465                 p->b_blocknr=starting_block++;
1466                 insert_into_queues(p);
1467         } while (tmp != bh);
1468         return 1;
1469 }
1470 
1471 /*
1472  * Try to find a free cluster by locating a page where
1473  * all of the buffers are unused.  We would like this function
1474  * to be atomic, so we do not call anything that might cause
1475  * the process to sleep.  The priority is somewhat similar to
1476  * the priority used in shrink_buffers.
1477  * 
1478  * My thinking is that the kernel should end up using whole
1479  * pages for the buffer cache as much of the time as possible.
1480  * This way the other buffers on a particular page are likely
1481  * to be very near each other on the free list, and we will not
1482  * be expiring data prematurely.  For now we only cannibalize buffers
1483  * of the same size to keep the code simpler.
1484  */
1485 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1486                      unsigned int starting_block, int size)
1487 {
1488         struct buffer_head *bh;
1489         int isize = BUFSIZE_INDEX(size);
1490         int i;
1491 
1492         /* We want to give ourselves a really good shot at generating
1493            a cluster, and since we only take buffers from the free
1494            list, we "overfill" it a little. */
1495 
1496         while(nr_free[isize] < 32) refill_freelist(size);
1497 
1498         bh = free_list[isize];
1499         if(bh)
1500                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1501                          if (!bh->b_this_page)  continue;
1502                          if (try_to_reassign(bh, &bh, dev, starting_block))
1503                                  return 4;
1504                  }
1505         return 0;
1506 }
1507 
1508 /* This function tries to generate a new cluster of buffers
1509  * from a new page in memory.  We should only do this if we have
1510  * not expanded the buffer cache to the maximum size that we allow.
1511  */
1512 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1513 {
1514         struct buffer_head * bh, * tmp, * arr[8];
1515         int isize = BUFSIZE_INDEX(size);
1516         unsigned long offset;
1517         unsigned long page;
1518         int nblock;
1519 
1520         page = get_free_page(GFP_NOBUFFER);
1521         if(!page) return 0;
1522 
1523         bh = create_buffers(page, size);
1524         if (!bh) {
1525                 free_page(page);
1526                 return 0;
1527         };
1528         nblock = block;
1529         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1530                 if (find_buffer(dev, nblock++, size))
1531                          goto not_aligned;
1532         }
1533         tmp = bh;
1534         nblock = 0;
1535         while (1) {
1536                 arr[nblock++] = bh;
1537                 bh->b_count = 1;
1538                 bh->b_dirt = 0;
1539                 bh->b_flushtime = 0;
1540                 bh->b_lock = 0;
1541                 bh->b_uptodate = 0;
1542                 bh->b_req = 0;
1543                 bh->b_dev = dev;
1544                 bh->b_list = BUF_CLEAN;
1545                 bh->b_blocknr = block++;
1546                 nr_buffers++;
1547                 nr_buffers_size[isize]++;
1548                 insert_into_queues(bh);
1549                 if (bh->b_this_page)
1550                         bh = bh->b_this_page;
1551                 else
1552                         break;
1553         }
1554         buffermem += PAGE_SIZE;
1555         buffer_pages[MAP_NR(page)] = bh;
1556         bh->b_this_page = tmp;
1557         while (nblock-- > 0)
1558                 brelse(arr[nblock]);
1559         return 4;
1560 not_aligned:
1561         while ((tmp = bh) != NULL) {
1562                 bh = bh->b_this_page;
1563                 put_unused_buffer_head(tmp);
1564         }
1565         free_page(page);
1566         return 0;
1567 }
1568 
1569 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1570 {
1571         int i, offset;
1572         
1573         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1574                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1575                 if(find_buffer(dev, b[i], size)) return 0;
1576         };
1577 
1578         /* OK, we have a candidate for a new cluster */
1579         
1580         /* See if one size of buffer is over-represented in the buffer cache,
1581            if so reduce the numbers of buffers */
1582         if(maybe_shrink_lav_buffers(size))
1583          {
1584                  int retval;
1585                  retval = try_to_generate_cluster(dev, b[0], size);
1586                  if(retval) return retval;
1587          };
1588         
1589         if (nr_free_pages > min_free_pages*2) 
1590                  return try_to_generate_cluster(dev, b[0], size);
1591         else
1592                  return reassign_cluster(dev, b[0], size);
1593 }
1594 
1595 /*
1596  * This initializes the initial buffer free list.  nr_buffers_type is set
1597  * to one less the actual number of buffers, as a sop to backwards
1598  * compatibility --- the old code did this (I think unintentionally,
1599  * but I'm not sure), and programs in the ps package expect it.
1600  *                                      - TYT 8/30/92
1601  */
1602 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1603 {
1604         int i;
1605         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1606 
1607         if (high_memory >= 4*1024*1024) {
1608                 if(high_memory >= 16*1024*1024)
1609                          nr_hash = 16381;
1610                 else
1611                          nr_hash = 4093;
1612         } else {
1613                 nr_hash = 997;
1614         };
1615         
1616         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1617                                                      sizeof(struct buffer_head *));
1618 
1619 
1620         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1621                                                      sizeof(struct buffer_head *));
1622         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1623                 buffer_pages[i] = NULL;
1624 
1625         for (i = 0 ; i < nr_hash ; i++)
1626                 hash_table[i] = NULL;
1627         lru_list[BUF_CLEAN] = 0;
1628         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1629         if (!free_list[isize])
1630                 panic("VFS: Unable to initialize buffer free list!");
1631         return;
1632 }
1633 
1634 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1635  * response to dirty buffers.  Once this process is activated, we write back
1636  * a limited number of buffers to the disks and then go back to sleep again.
1637  * In effect this is a process which never leaves kernel mode, and does not have
1638  * any user memory associated with it except for the stack.  There is also
1639  * a kernel stack page, which obviously must be separate from the user stack.
1640  */
1641 struct wait_queue * bdflush_wait = NULL;
1642 struct wait_queue * bdflush_done = NULL;
1643 
1644 static int bdflush_running = 0;
1645 
1646 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1647 {
1648         if(!bdflush_running){
1649                 printk("Warning - bdflush not running\n");
1650                 sync_buffers(0,0);
1651                 return;
1652         };
1653         wake_up(&bdflush_wait);
1654         if(wait) sleep_on(&bdflush_done);
1655 }
1656 
1657 
1658 
1659 /* 
1660  * Here we attempt to write back old buffers.  We also try and flush inodes 
1661  * and supers as well, since this function is essentially "update", and 
1662  * otherwise there would be no way of ensuring that these quantities ever 
1663  * get written back.  Ideally, we would have a timestamp on the inodes
1664  * and superblocks so that we could write back only the old ones as well
1665  */
1666 
1667 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1668 {
1669         int i, isize;
1670         int ndirty, nwritten;
1671         int nlist;
1672         int ncount;
1673         struct buffer_head * bh, *next;
1674 
1675         sync_supers(0);
1676         sync_inodes(0);
1677 
1678         ncount = 0;
1679 #ifdef DEBUG
1680         for(nlist = 0; nlist < NR_LIST; nlist++)
1681 #else
1682         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1683 #endif
1684         {
1685                 ndirty = 0;
1686                 nwritten = 0;
1687         repeat:
1688                 bh = lru_list[nlist];
1689                 if(bh) 
1690                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1691                                  /* We may have stalled while waiting for I/O to complete. */
1692                                  if(bh->b_list != nlist) goto repeat;
1693                                  next = bh->b_next_free;
1694                                  if(!lru_list[nlist]) {
1695                                          printk("Dirty list empty %d\n", i);
1696                                          break;
1697                                  }
1698                                  
1699                                  /* Clean buffer on dirty list?  Refile it */
1700                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1701                                   {
1702                                           refile_buffer(bh);
1703                                           continue;
1704                                   }
1705                                  
1706                                  if (bh->b_lock || !bh->b_dirt)
1707                                           continue;
1708                                  ndirty++;
1709                                  if(bh->b_flushtime > jiffies) continue;
1710                                  nwritten++;
1711                                  bh->b_count++;
1712                                  bh->b_flushtime = 0;
1713 #ifdef DEBUG
1714                                  if(nlist != BUF_DIRTY) ncount++;
1715 #endif
1716                                  ll_rw_block(WRITE, 1, &bh);
1717                                  bh->b_count--;
1718                          }
1719         }
1720 #ifdef DEBUG
1721         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1722         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1723 #endif
1724         
1725         /* We assume that we only come through here on a regular
1726            schedule, like every 5 seconds.  Now update load averages.  
1727            Shift usage counts to prevent overflow. */
1728         for(isize = 0; isize<NR_SIZES; isize++){
1729                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1730                 buffer_usage[isize] = 0;
1731         };
1732         return 0;
1733 }
1734 
1735 
1736 /* This is the interface to bdflush.  As we get more sophisticated, we can
1737  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1738  * invoke this again after you have done this once, you would simply modify 
1739  * the tuning parameters.  We would want to verify each parameter, however,
1740  * to make sure that it is reasonable. */
1741 
1742 asmlinkage int sys_bdflush(int func, long data)
     /* [previous][next][first][last][top][bottom][index][help] */
1743 {
1744         int i, error;
1745         int ndirty;
1746         int nlist;
1747         int ncount;
1748         struct buffer_head * bh, *next;
1749 
1750         if (!suser())
1751                 return -EPERM;
1752 
1753         if (func == 1)
1754                  return sync_old_buffers();
1755 
1756         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1757         if (func >= 2) {
1758                 i = (func-2) >> 1;
1759                 if (i < 0 || i >= N_PARAM)
1760                         return -EINVAL;
1761                 if((func & 1) == 0) {
1762                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1763                         if (error)
1764                                 return error;
1765                         put_fs_long(bdf_prm.data[i], data);
1766                         return 0;
1767                 };
1768                 if (data < bdflush_min[i] || data > bdflush_max[i])
1769                         return -EINVAL;
1770                 bdf_prm.data[i] = data;
1771                 return 0;
1772         };
1773         
1774         if (bdflush_running)
1775                 return -EBUSY; /* Only one copy of this running at one time */
1776         bdflush_running++;
1777         
1778         /* OK, from here on is the daemon */
1779         
1780         for (;;) {
1781 #ifdef DEBUG
1782                 printk("bdflush() activated...");
1783 #endif
1784                 
1785                 ncount = 0;
1786 #ifdef DEBUG
1787                 for(nlist = 0; nlist < NR_LIST; nlist++)
1788 #else
1789                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1790 #endif
1791                  {
1792                          ndirty = 0;
1793                  repeat:
1794                          bh = lru_list[nlist];
1795                          if(bh) 
1796                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1797                                        bh = next) {
1798                                           /* We may have stalled while waiting for I/O to complete. */
1799                                           if(bh->b_list != nlist) goto repeat;
1800                                           next = bh->b_next_free;
1801                                           if(!lru_list[nlist]) {
1802                                                   printk("Dirty list empty %d\n", i);
1803                                                   break;
1804                                           }
1805                                           
1806                                           /* Clean buffer on dirty list?  Refile it */
1807                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1808                                            {
1809                                                    refile_buffer(bh);
1810                                                    continue;
1811                                            }
1812                                           
1813                                           if (bh->b_lock || !bh->b_dirt)
1814                                                    continue;
1815                                           /* Should we write back buffers that are shared or not??
1816                                              currently dirty buffers are not shared, so it does not matter */
1817                                           bh->b_count++;
1818                                           ndirty++;
1819                                           bh->b_flushtime = 0;
1820                                           ll_rw_block(WRITE, 1, &bh);
1821 #ifdef DEBUG
1822                                           if(nlist != BUF_DIRTY) ncount++;
1823 #endif
1824                                           bh->b_count--;
1825                                   }
1826                  }
1827 #ifdef DEBUG
1828                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1829                 printk("sleeping again.\n");
1830 #endif
1831                 wake_up(&bdflush_done);
1832                 
1833                 /* If there are still a lot of dirty buffers around, skip the sleep
1834                    and flush some more */
1835                 
1836                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1837                    bdf_prm.b_un.nfract/100) {
1838                         if (current->signal & (1 << (SIGKILL-1))) {
1839                                 bdflush_running--;
1840                                 return 0;
1841                         }
1842                         current->signal = 0;
1843                         interruptible_sleep_on(&bdflush_wait);
1844                 }
1845         }
1846 }
1847 
1848 
1849 /*
1850  * Overrides for Emacs so that we follow Linus's tabbing style.
1851  * Emacs will notice this stuff at the end of the file and automatically
1852  * adjust the settings for this buffer only.  This must remain at the end
1853  * of the file.
1854  * ---------------------------------------------------------------------------
1855  * Local variables:
1856  * c-indent-level: 8
1857  * c-brace-imaginary-offset: 0
1858  * c-brace-offset: -8
1859  * c-argdecl-indent: 8
1860  * c-label-offset: -8
1861  * c-continued-statement-offset: 8
1862  * c-continued-brace-offset: 0
1863  * End:
1864  */

/* [previous][next][first][last][top][bottom][index][help] */