root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. remove_from_hash_queue
  10. remove_from_lru_list
  11. remove_from_free_list
  12. remove_from_queues
  13. put_last_lru
  14. put_last_free
  15. insert_into_queues
  16. find_buffer
  17. get_hash_table
  18. set_blocksize
  19. refill_freelist
  20. getblk
  21. set_writetime
  22. refile_buffer
  23. brelse
  24. bread
  25. breada
  26. put_unused_buffer_head
  27. get_more_buffer_heads
  28. get_unused_buffer_head
  29. create_buffers
  30. read_buffers
  31. try_to_align
  32. check_aligned
  33. try_to_load_aligned
  34. try_to_share_buffers
  35. bread_page
  36. grow_buffers
  37. try_to_free
  38. maybe_shrink_lav_buffers
  39. shrink_buffers
  40. shrink_specific_buffers
  41. show_buffers
  42. try_to_reassign
  43. reassign_cluster
  44. try_to_generate_cluster
  45. generate_cluster
  46. buffer_init
  47. wakeup_bdflush
  48. sync_old_buffers
  49. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #define NR_SIZES 4
  33 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  35 
  36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
  37 
  38 static int grow_buffers(int pri, int size);
  39 static int shrink_specific_buffers(unsigned int priority, int size);
  40 static int maybe_shrink_lav_buffers(int);
  41 
  42 static int nr_hash = 0;  /* Size of hash table */
  43 static struct buffer_head ** hash_table;
  44 struct buffer_head ** buffer_pages;
  45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  47 static struct buffer_head * unused_list = NULL;
  48 static struct wait_queue * buffer_wait = NULL;
  49 
  50 int nr_buffers = 0;
  51 int nr_buffers_type[NR_LIST] = {0,};
  52 int nr_buffers_size[NR_SIZES] = {0,};
  53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  54 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  55 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  56 int nr_free[NR_SIZES] = {0,};
  57 int buffermem = 0;
  58 int nr_buffer_heads = 0;
  59 extern int *blksize_size[];
  60 
  61 /* Here is the parameter block for the bdflush process. */
  62 static void wakeup_bdflush(int);
  63 
  64 #define N_PARAM 9
  65 #define LAV
  66 
  67 static union bdflush_param{
  68         struct {
  69                 int nfract;  /* Percentage of buffer cache dirty to 
  70                                 activate bdflush */
  71                 int ndirty;  /* Maximum number of dirty blocks to write out per
  72                                 wake-cycle */
  73                 int nrefill; /* Number of clean buffers to try and obtain
  74                                 each time we call refill */
  75                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  76                                   when trying to refill buffers. */
  77                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  78                                     search for free clusters */
  79                 int age_buffer;  /* Time for normal buffer to age before 
  80                                     we flush it */
  81                 int age_super;  /* Time for superblock to age before we 
  82                                    flush it */
  83                 int lav_const;  /* Constant used for load average (time
  84                                    constant */
  85                 int lav_ratio;  /* Used to determine how low a lav for a
  86                                    particular size can go before we start to
  87                                    trim back the buffers */
  88         } b_un;
  89         unsigned int data[N_PARAM];
  90 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
  91 
  92 /* The lav constant is set for 1 minute, as long as the update process runs
  93    every 5 seconds.  If you change the frequency of update, the time
  94    constant will also change. */
  95 
  96 
  97 /* These are the min and max parameter values that we will allow to be assigned */
  98 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
  99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 100 
 101 /*
 102  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 103  * and getting rid of the cli-sti pairs. The wait-queue routines still
 104  * need cli-sti, but now it's just a couple of 386 instructions or so.
 105  *
 106  * Note that the real wait_on_buffer() is an inline function that checks
 107  * if 'b_wait' is set before calling this, so that the queues aren't set
 108  * up unnecessarily.
 109  */
 110 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         struct wait_queue wait = { current, NULL };
 113 
 114         bh->b_count++;
 115         add_wait_queue(&bh->b_wait, &wait);
 116 repeat:
 117         current->state = TASK_UNINTERRUPTIBLE;
 118         if (bh->b_lock) {
 119                 schedule();
 120                 goto repeat;
 121         }
 122         remove_wait_queue(&bh->b_wait, &wait);
 123         bh->b_count--;
 124         current->state = TASK_RUNNING;
 125 }
 126 
 127 /* Call sync_buffers with wait!=0 to ensure that the call does not
 128    return until all buffer writes have completed.  Sync() may return
 129    before the writes have finished; fsync() may not. */
 130 
 131 
 132 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 133    spontaneously dirty themselves without ever brelse being called.
 134    We will ultimately want to put these in a separate list, but for
 135    now we search all of the lists for dirty buffers */
 136 
 137 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         int i, retry, pass = 0, err = 0;
 140         int nlist, ncount;
 141         struct buffer_head * bh, *next;
 142 
 143         /* One pass for no-wait, three for wait:
 144            0) write out all dirty, unlocked buffers;
 145            1) write out all dirty buffers, waiting if locked;
 146            2) wait for completion by waiting for all buffers to unlock. */
 147  repeat:
 148         retry = 0;
 149  repeat2:
 150         ncount = 0;
 151         /* We search all lists as a failsafe mechanism, not because we expect
 152            there to be dirty buffers on any of the other lists. */
 153         for(nlist = 0; nlist < NR_LIST; nlist++)
 154          {
 155          repeat1:
 156                  bh = lru_list[nlist];
 157                  if(!bh) continue;
 158                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 159                          if(bh->b_list != nlist) goto repeat1;
 160                          next = bh->b_next_free;
 161                          if(!lru_list[nlist]) break;
 162                          if (dev && bh->b_dev != dev)
 163                                   continue;
 164                          if (bh->b_lock)
 165                           {
 166                                   /* Buffer is locked; skip it unless wait is
 167                                      requested AND pass > 0. */
 168                                   if (!wait || !pass) {
 169                                           retry = 1;
 170                                           continue;
 171                                   }
 172                                   wait_on_buffer (bh);
 173                                   goto repeat2;
 174                           }
 175                          /* If an unlocked buffer is not uptodate, there has
 176                              been an IO error. Skip it. */
 177                          if (wait && bh->b_req && !bh->b_lock &&
 178                              !bh->b_dirt && !bh->b_uptodate) {
 179                                   err = 1;
 180                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 181                                   continue;
 182                           }
 183                          /* Don't write clean buffers.  Don't write ANY buffers
 184                             on the third pass. */
 185                          if (!bh->b_dirt || pass>=2)
 186                                   continue;
 187                          /* don't bother about locked buffers */
 188                          if (bh->b_lock)
 189                                  continue;
 190                          bh->b_count++;
 191                          bh->b_flushtime = 0;
 192                          ll_rw_block(WRITE, 1, &bh);
 193 
 194                          if(nlist != BUF_DIRTY) { 
 195                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 196                                  ncount++;
 197                          };
 198                          bh->b_count--;
 199                          retry = 1;
 200                  }
 201          }
 202         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 203         
 204         /* If we are waiting for the sync to succeed, and if any dirty
 205            blocks were written, then repeat; on the second pass, only
 206            wait for buffers being written (do not pass to write any
 207            more buffers on the second pass). */
 208         if (wait && retry && ++pass<=2)
 209                  goto repeat;
 210         return err;
 211 }
 212 
 213 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 {
 215         sync_buffers(dev, 0);
 216         sync_supers(dev);
 217         sync_inodes(dev);
 218         sync_buffers(dev, 0);
 219 }
 220 
 221 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         sync_buffers(dev, 0);
 224         sync_supers(dev);
 225         sync_inodes(dev);
 226         return sync_buffers(dev, 1);
 227 }
 228 
 229 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         sync_dev(0);
 232         return 0;
 233 }
 234 
 235 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237         return fsync_dev(inode->i_dev);
 238 }
 239 
 240 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         struct file * file;
 243         struct inode * inode;
 244 
 245         if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
 246                 return -EBADF;
 247         if (!file->f_op || !file->f_op->fsync)
 248                 return -EINVAL;
 249         if (file->f_op->fsync(inode,file))
 250                 return -EIO;
 251         return 0;
 252 }
 253 
 254 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         int i;
 257         int nlist;
 258         struct buffer_head * bh;
 259 
 260         for(nlist = 0; nlist < NR_LIST; nlist++) {
 261                 bh = lru_list[nlist];
 262                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 263                      bh = bh->b_next_free) {
 264                         if (bh->b_dev != dev)
 265                                  continue;
 266                         wait_on_buffer(bh);
 267                         if (bh->b_dev == dev)
 268                                  bh->b_flushtime = bh->b_uptodate = 
 269                                           bh->b_dirt = bh->b_req = 0;
 270                 }
 271         }
 272 }
 273 
 274 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 275 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 276 
 277 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         if (bh->b_next)
 280                 bh->b_next->b_prev = bh->b_prev;
 281         if (bh->b_prev)
 282                 bh->b_prev->b_next = bh->b_next;
 283         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 284                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 285         bh->b_next = bh->b_prev = NULL;
 286 }
 287 
 288 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         if (!(bh->b_prev_free) || !(bh->b_next_free))
 291                 panic("VFS: LRU block list corrupted");
 292         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 293         bh->b_prev_free->b_next_free = bh->b_next_free;
 294         bh->b_next_free->b_prev_free = bh->b_prev_free;
 295 
 296         if (lru_list[bh->b_list] == bh)
 297                  lru_list[bh->b_list] = bh->b_next_free;
 298         if(lru_list[bh->b_list] == bh)
 299                  lru_list[bh->b_list] = NULL;
 300         bh->b_next_free = bh->b_prev_free = NULL;
 301 }
 302 
 303 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         int isize = BUFSIZE_INDEX(bh->b_size);
 306         if (!(bh->b_prev_free) || !(bh->b_next_free))
 307                 panic("VFS: Free block list corrupted");
 308         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 309         if(!free_list[isize])
 310                  panic("Free list empty");
 311         nr_free[isize]--;
 312         if(bh->b_next_free == bh)
 313                  free_list[isize] = NULL;
 314         else {
 315                 bh->b_prev_free->b_next_free = bh->b_next_free;
 316                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 317                 if (free_list[isize] == bh)
 318                          free_list[isize] = bh->b_next_free;
 319         };
 320         bh->b_next_free = bh->b_prev_free = NULL;
 321 }
 322 
 323 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         if(bh->b_dev == 0xffff) {
 326                 remove_from_free_list(bh); /* Free list entries should not be
 327                                               in the hash queue */
 328                 return;
 329         };
 330         nr_buffers_type[bh->b_list]--;
 331         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 332         remove_from_hash_queue(bh);
 333         remove_from_lru_list(bh);
 334 }
 335 
 336 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 337 {
 338         if (!bh)
 339                 return;
 340         if (bh == lru_list[bh->b_list]) {
 341                 lru_list[bh->b_list] = bh->b_next_free;
 342                 return;
 343         }
 344         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 345         remove_from_lru_list(bh);
 346 /* add to back of free list */
 347 
 348         if(!lru_list[bh->b_list]) {
 349                 lru_list[bh->b_list] = bh;
 350                 lru_list[bh->b_list]->b_prev_free = bh;
 351         };
 352 
 353         bh->b_next_free = lru_list[bh->b_list];
 354         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 355         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 356         lru_list[bh->b_list]->b_prev_free = bh;
 357 }
 358 
 359 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 360 {
 361         int isize;
 362         if (!bh)
 363                 return;
 364 
 365         isize = BUFSIZE_INDEX(bh->b_size);      
 366         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 367 /* add to back of free list */
 368 
 369         if(!free_list[isize]) {
 370                 free_list[isize] = bh;
 371                 bh->b_prev_free = bh;
 372         };
 373 
 374         nr_free[isize]++;
 375         bh->b_next_free = free_list[isize];
 376         bh->b_prev_free = free_list[isize]->b_prev_free;
 377         free_list[isize]->b_prev_free->b_next_free = bh;
 378         free_list[isize]->b_prev_free = bh;
 379 }
 380 
 381 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383 /* put at end of free list */
 384 
 385         if(bh->b_dev == 0xffff) {
 386                 put_last_free(bh);
 387                 return;
 388         };
 389         if(!lru_list[bh->b_list]) {
 390                 lru_list[bh->b_list] = bh;
 391                 bh->b_prev_free = bh;
 392         };
 393         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 394         bh->b_next_free = lru_list[bh->b_list];
 395         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 396         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 397         lru_list[bh->b_list]->b_prev_free = bh;
 398         nr_buffers_type[bh->b_list]++;
 399         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 400 /* put the buffer in new hash-queue if it has a device */
 401         bh->b_prev = NULL;
 402         bh->b_next = NULL;
 403         if (!bh->b_dev)
 404                 return;
 405         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 406         hash(bh->b_dev,bh->b_blocknr) = bh;
 407         if (bh->b_next)
 408                 bh->b_next->b_prev = bh;
 409 }
 410 
 411 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 412 {               
 413         struct buffer_head * tmp;
 414 
 415         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 416                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 417                         if (tmp->b_size == size)
 418                                 return tmp;
 419                         else {
 420                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 421                                                         MAJOR(dev), MINOR(dev));
 422                                 return NULL;
 423                         }
 424         return NULL;
 425 }
 426 
 427 /*
 428  * Why like this, I hear you say... The reason is race-conditions.
 429  * As we don't lock buffers (unless we are reading them, that is),
 430  * something might happen to it while we sleep (ie a read-error
 431  * will force it bad). This shouldn't really happen currently, but
 432  * the code is ready.
 433  */
 434 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 435 {
 436         struct buffer_head * bh;
 437 
 438         for (;;) {
 439                 if (!(bh=find_buffer(dev,block,size)))
 440                         return NULL;
 441                 bh->b_count++;
 442                 wait_on_buffer(bh);
 443                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 444                         return bh;
 445                 bh->b_count--;
 446         }
 447 }
 448 
 449 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         int i, nlist;
 452         struct buffer_head * bh, *bhnext;
 453 
 454         if (!blksize_size[MAJOR(dev)])
 455                 return;
 456 
 457         switch(size) {
 458                 default: panic("Invalid blocksize passed to set_blocksize");
 459                 case 512: case 1024: case 2048: case 4096:;
 460         }
 461 
 462         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 463                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 464                 return;
 465         }
 466         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 467                 return;
 468         sync_buffers(dev, 2);
 469         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 470 
 471   /* We need to be quite careful how we do this - we are moving entries
 472      around on the free list, and we can get in a loop if we are not careful.*/
 473 
 474         for(nlist = 0; nlist < NR_LIST; nlist++) {
 475                 bh = lru_list[nlist];
 476                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 477                         if(!bh) break;
 478                         bhnext = bh->b_next_free; 
 479                         if (bh->b_dev != dev)
 480                                  continue;
 481                         if (bh->b_size == size)
 482                                  continue;
 483                         
 484                         wait_on_buffer(bh);
 485                         if (bh->b_dev == dev && bh->b_size != size) {
 486                                 bh->b_uptodate = bh->b_dirt = bh->b_req =
 487                                          bh->b_flushtime = 0;
 488                         };
 489                         remove_from_hash_queue(bh);
 490                 }
 491         }
 492 }
 493 
 494 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 495 
 496 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 497 {
 498         struct buffer_head * bh, * tmp;
 499         struct buffer_head * candidate[NR_LIST];
 500         unsigned int best_time, winner;
 501         int isize = BUFSIZE_INDEX(size);
 502         int buffers[NR_LIST];
 503         int i;
 504         int needed;
 505 
 506         /* First see if we even need this.  Sometimes it is advantageous
 507          to request some blocks in a filesystem that we know that we will
 508          be needing ahead of time. */
 509 
 510         if (nr_free[isize] > 100)
 511                 return;
 512 
 513         /* If there are too many dirty buffers, we wake up the update process
 514            now so as to ensure that there are still clean buffers available
 515            for user processes to use (and dirty) */
 516         
 517         /* We are going to try and locate this much memory */
 518         needed =bdf_prm.b_un.nrefill * size;  
 519 
 520         while (nr_free_pages > min_free_pages*2 && needed > 0 &&
 521                grow_buffers(GFP_BUFFER, size)) {
 522                 needed -= PAGE_SIZE;
 523         }
 524 
 525         if(needed <= 0) return;
 526 
 527         /* See if there are too many buffers of a different size.
 528            If so, victimize them */
 529 
 530         while(maybe_shrink_lav_buffers(size))
 531          {
 532                  if(!grow_buffers(GFP_BUFFER, size)) break;
 533                  needed -= PAGE_SIZE;
 534                  if(needed <= 0) return;
 535          };
 536 
 537         /* OK, we cannot grow the buffer cache, now try and get some
 538            from the lru list */
 539 
 540         /* First set the candidate pointers to usable buffers.  This
 541            should be quick nearly all of the time. */
 542 
 543 repeat0:
 544         for(i=0; i<NR_LIST; i++){
 545                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 546                    nr_buffers_type[i] == 0) {
 547                         candidate[i] = NULL;
 548                         buffers[i] = 0;
 549                         continue;
 550                 }
 551                 buffers[i] = nr_buffers_type[i];
 552                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 553                  {
 554                          if(buffers[i] < 0) panic("Here is the problem");
 555                          tmp = bh->b_next_free;
 556                          if (!bh) break;
 557                          
 558                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 559                              bh->b_dirt) {
 560                                  refile_buffer(bh);
 561                                  continue;
 562                          };
 563                          
 564                          if (bh->b_count || bh->b_size != size)
 565                                   continue;
 566                          
 567                          /* Buffers are written in the order they are placed 
 568                             on the locked list. If we encounter a locked
 569                             buffer here, this means that the rest of them
 570                             are also locked */
 571                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 572                                  buffers[i] = 0;
 573                                  break;
 574                          }
 575                          
 576                          if (BADNESS(bh)) continue;
 577                          break;
 578                  };
 579                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 580                 else candidate[i] = bh;
 581                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 582         }
 583         
 584  repeat:
 585         if(needed <= 0) return;
 586         
 587         /* Now see which candidate wins the election */
 588         
 589         winner = best_time = UINT_MAX;  
 590         for(i=0; i<NR_LIST; i++){
 591                 if(!candidate[i]) continue;
 592                 if(candidate[i]->b_lru_time < best_time){
 593                         best_time = candidate[i]->b_lru_time;
 594                         winner = i;
 595                 }
 596         }
 597         
 598         /* If we have a winner, use it, and then get a new candidate from that list */
 599         if(winner != UINT_MAX) {
 600                 i = winner;
 601                 bh = candidate[i];
 602                 candidate[i] = bh->b_next_free;
 603                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 604                 if (bh->b_count || bh->b_size != size)
 605                          panic("Busy buffer in candidate list\n");
 606                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 607                          panic("Shared buffer in candidate list\n");
 608                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 609                 
 610                 if(bh->b_dev == 0xffff) panic("Wrong list");
 611                 remove_from_queues(bh);
 612                 bh->b_dev = 0xffff;
 613                 put_last_free(bh);
 614                 needed -= bh->b_size;
 615                 buffers[i]--;
 616                 if(buffers[i] < 0) panic("Here is the problem");
 617                 
 618                 if(buffers[i] == 0) candidate[i] = NULL;
 619                 
 620                 /* Now all we need to do is advance the candidate pointer
 621                    from the winner list to the next usable buffer */
 622                 if(candidate[i] && buffers[i] > 0){
 623                         if(buffers[i] <= 0) panic("Here is another problem");
 624                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 625                                 if(buffers[i] < 0) panic("Here is the problem");
 626                                 tmp = bh->b_next_free;
 627                                 if (!bh) break;
 628                                 
 629                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 630                                     bh->b_dirt) {
 631                                         refile_buffer(bh);
 632                                         continue;
 633                                 };
 634                                 
 635                                 if (bh->b_count || bh->b_size != size)
 636                                          continue;
 637                                 
 638                                 /* Buffers are written in the order they are
 639                                    placed on the locked list.  If we encounter
 640                                    a locked buffer here, this means that the
 641                                    rest of them are also locked */
 642                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 643                                         buffers[i] = 0;
 644                                         break;
 645                                 }
 646               
 647                                 if (BADNESS(bh)) continue;
 648                                 break;
 649                         };
 650                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 651                         else candidate[i] = bh;
 652                         if(candidate[i] && candidate[i]->b_count) 
 653                                  panic("Here is the problem");
 654                 }
 655                 
 656                 goto repeat;
 657         }
 658         
 659         if(needed <= 0) return;
 660         
 661         /* Too bad, that was not enough. Try a little harder to grow some. */
 662         
 663         if (nr_free_pages > 5) {
 664                 if (grow_buffers(GFP_BUFFER, size)) {
 665                         needed -= PAGE_SIZE;
 666                         goto repeat0;
 667                 };
 668         }
 669         
 670         /* and repeat until we find something good */
 671         if (!grow_buffers(GFP_ATOMIC, size))
 672                 wakeup_bdflush(1);
 673         needed -= PAGE_SIZE;
 674         goto repeat0;
 675 }
 676 
 677 /*
 678  * Ok, this is getblk, and it isn't very clear, again to hinder
 679  * race-conditions. Most of the code is seldom used, (ie repeating),
 680  * so it should be much more efficient than it looks.
 681  *
 682  * The algorithm is changed: hopefully better, and an elusive bug removed.
 683  *
 684  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 685  * when the filesystem starts to get full of dirty blocks (I hope).
 686  */
 687 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 688 {
 689         struct buffer_head * bh;
 690         int isize = BUFSIZE_INDEX(size);
 691 
 692         /* Update this for the buffer size lav. */
 693         buffer_usage[isize]++;
 694 
 695         /* If there are too many dirty buffers, we wake up the update process
 696            now so as to ensure that there are still clean buffers available
 697            for user processes to use (and dirty) */
 698 repeat:
 699         bh = get_hash_table(dev, block, size);
 700         if (bh) {
 701                 if (bh->b_uptodate && !bh->b_dirt)
 702                          put_last_lru(bh);
 703                 if(!bh->b_dirt) bh->b_flushtime = 0;
 704                 return bh;
 705         }
 706 
 707         while(!free_list[isize]) refill_freelist(size);
 708         
 709         if (find_buffer(dev,block,size))
 710                  goto repeat;
 711 
 712         bh = free_list[isize];
 713         remove_from_free_list(bh);
 714 
 715 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 716 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 717         bh->b_count=1;
 718         bh->b_dirt=0;
 719         bh->b_lock=0;
 720         bh->b_uptodate=0;
 721         bh->b_flushtime = 0;
 722         bh->b_req=0;
 723         bh->b_dev=dev;
 724         bh->b_blocknr=block;
 725         insert_into_queues(bh);
 726         return bh;
 727 }
 728 
 729 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 730 {
 731         int newtime;
 732 
 733         if (buf->b_dirt){
 734                 /* Move buffer to dirty list if jiffies is clear */
 735                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 736                                      bdf_prm.b_un.age_buffer);
 737                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 738                          buf->b_flushtime = newtime;
 739         } else {
 740                 buf->b_flushtime = 0;
 741         }
 742 }
 743 
 744 
 745 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 746                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 747 
 748 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 749         int i, dispose;
 750         i = 0;
 751         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 752         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 753         if(buf->b_lock) i |= 2;
 754         if(buf->b_dirt) i |= 4;
 755         dispose = buffer_disposition[i];
 756         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 757                  dispose = BUF_UNSHARED;
 758         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 759         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 760         if(dispose != buf->b_list)  {
 761                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 762                          buf->b_lru_time = jiffies;
 763                 if(dispose == BUF_LOCKED && 
 764                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 765                          dispose = BUF_LOCKED1;
 766                 remove_from_queues(buf);
 767                 buf->b_list = dispose;
 768                 insert_into_queues(buf);
 769                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 770                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 771                    bdf_prm.b_un.nfract/100)
 772                          wakeup_bdflush(0);
 773         }
 774 }
 775 
 776 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 777 {
 778         if (!buf)
 779                 return;
 780         wait_on_buffer(buf);
 781 
 782         /* If dirty, mark the time this buffer should be written back */
 783         set_writetime(buf, 0);
 784         refile_buffer(buf);
 785 
 786         if (buf->b_count) {
 787                 if (--buf->b_count)
 788                         return;
 789                 wake_up(&buffer_wait);
 790                 return;
 791         }
 792         printk("VFS: brelse: Trying to free free buffer\n");
 793 }
 794 
 795 /*
 796  * bread() reads a specified block and returns the buffer that contains
 797  * it. It returns NULL if the block was unreadable.
 798  */
 799 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 800 {
 801         struct buffer_head * bh;
 802 
 803         if (!(bh = getblk(dev, block, size))) {
 804                 printk("VFS: bread: READ error on device %d/%d\n",
 805                                                 MAJOR(dev), MINOR(dev));
 806                 return NULL;
 807         }
 808         if (bh->b_uptodate)
 809                 return bh;
 810         ll_rw_block(READ, 1, &bh);
 811         wait_on_buffer(bh);
 812         if (bh->b_uptodate)
 813                 return bh;
 814         brelse(bh);
 815         return NULL;
 816 }
 817 
 818 /*
 819  * Ok, breada can be used as bread, but additionally to mark other
 820  * blocks for reading as well. End the argument list with a negative
 821  * number.
 822  */
 823 
 824 #define NBUF 16
 825 
 826 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 827         unsigned int pos, unsigned int filesize)
 828 {
 829         struct buffer_head * bhlist[NBUF];
 830         unsigned int blocks;
 831         struct buffer_head * bh;
 832         int index;
 833         int i, j;
 834 
 835         if (pos >= filesize)
 836                 return NULL;
 837 
 838         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 839                 return NULL;
 840 
 841         index = BUFSIZE_INDEX(bh->b_size);
 842 
 843         if (bh->b_uptodate)
 844                 return bh;
 845 
 846         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 847 
 848         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 849                 blocks = read_ahead[MAJOR(dev)] >> index;
 850         if (blocks > NBUF)
 851                 blocks = NBUF;
 852         
 853         bhlist[0] = bh;
 854         j = 1;
 855         for(i=1; i<blocks; i++) {
 856                 bh = getblk(dev,block+i,bufsize);
 857                 if (bh->b_uptodate) {
 858                         brelse(bh);
 859                         break;
 860                 }
 861                 bhlist[j++] = bh;
 862         }
 863 
 864         /* Request the read for these buffers, and then release them */
 865         ll_rw_block(READ, j, bhlist);
 866 
 867         for(i=1; i<j; i++)
 868                 brelse(bhlist[i]);
 869 
 870         /* Wait for this buffer, and then continue on */
 871         bh = bhlist[0];
 872         wait_on_buffer(bh);
 873         if (bh->b_uptodate)
 874                 return bh;
 875         brelse(bh);
 876         return NULL;
 877 }
 878 
 879 /*
 880  * See fs/inode.c for the weird use of volatile..
 881  */
 882 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 883 {
 884         struct wait_queue * wait;
 885 
 886         wait = ((volatile struct buffer_head *) bh)->b_wait;
 887         memset(bh,0,sizeof(*bh));
 888         ((volatile struct buffer_head *) bh)->b_wait = wait;
 889         bh->b_next_free = unused_list;
 890         unused_list = bh;
 891 }
 892 
 893 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 894 {
 895         int i;
 896         struct buffer_head * bh;
 897 
 898         if (unused_list)
 899                 return;
 900 
 901         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 902                 return;
 903 
 904         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 905                 bh->b_next_free = unused_list;  /* only make link */
 906                 unused_list = bh++;
 907         }
 908 }
 909 
 910 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 911 {
 912         struct buffer_head * bh;
 913 
 914         get_more_buffer_heads();
 915         if (!unused_list)
 916                 return NULL;
 917         bh = unused_list;
 918         unused_list = bh->b_next_free;
 919         bh->b_next_free = NULL;
 920         bh->b_data = NULL;
 921         bh->b_size = 0;
 922         bh->b_req = 0;
 923         return bh;
 924 }
 925 
 926 /*
 927  * Create the appropriate buffers when given a page for data area and
 928  * the size of each buffer.. Use the bh->b_this_page linked list to
 929  * follow the buffers created.  Return NULL if unable to create more
 930  * buffers.
 931  */
 932 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 933 {
 934         struct buffer_head *bh, *head;
 935         unsigned long offset;
 936 
 937         head = NULL;
 938         offset = PAGE_SIZE;
 939         while ((offset -= size) < PAGE_SIZE) {
 940                 bh = get_unused_buffer_head();
 941                 if (!bh)
 942                         goto no_grow;
 943                 bh->b_this_page = head;
 944                 head = bh;
 945                 bh->b_data = (char *) (page+offset);
 946                 bh->b_size = size;
 947                 bh->b_dev = 0xffff;  /* Flag as unused */
 948         }
 949         return head;
 950 /*
 951  * In case anything failed, we just free everything we got.
 952  */
 953 no_grow:
 954         bh = head;
 955         while (bh) {
 956                 head = bh;
 957                 bh = bh->b_this_page;
 958                 put_unused_buffer_head(head);
 959         }
 960         return NULL;
 961 }
 962 
 963 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
 964 {
 965         int i;
 966         int bhnum = 0;
 967         struct buffer_head * bhr[8];
 968 
 969         for (i = 0 ; i < nrbuf ; i++) {
 970                 if (bh[i] && !bh[i]->b_uptodate)
 971                         bhr[bhnum++] = bh[i];
 972         }
 973         if (bhnum)
 974                 ll_rw_block(READ, bhnum, bhr);
 975         for (i = 0 ; i < nrbuf ; i++) {
 976                 if (bh[i]) {
 977                         wait_on_buffer(bh[i]);
 978                 }
 979         }
 980 }
 981 
 982 /*
 983  * This actually gets enough info to try to align the stuff,
 984  * but we don't bother yet.. We'll have to check that nobody
 985  * else uses the buffers etc.
 986  *
 987  * "address" points to the new page we can use to move things
 988  * around..
 989  */
 990 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
     /* [previous][next][first][last][top][bottom][index][help] */
 991         unsigned long address)
 992 {
 993         while (nrbuf-- > 0)
 994                 brelse(bh[nrbuf]);
 995         return 0;
 996 }
 997 
 998 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 999         dev_t dev, int *b, int size)
1000 {
1001         struct buffer_head * bh[8];
1002         unsigned long page;
1003         unsigned long offset;
1004         int block;
1005         int nrbuf;
1006         int aligned = 1;
1007 
1008         bh[0] = first;
1009         nrbuf = 1;
1010         page = (unsigned long) first->b_data;
1011         if (page & ~PAGE_MASK)
1012                 aligned = 0;
1013         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1014                 block = *++b;
1015                 if (!block)
1016                         goto no_go;
1017                 first = get_hash_table(dev, block, size);
1018                 if (!first)
1019                         goto no_go;
1020                 bh[nrbuf++] = first;
1021                 if (page+offset != (unsigned long) first->b_data)
1022                         aligned = 0;
1023         }
1024         if (!aligned)
1025                 return try_to_align(bh, nrbuf, address);
1026         mem_map[MAP_NR(page)]++;
1027         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1028         while (nrbuf-- > 0)
1029                 brelse(bh[nrbuf]);
1030         free_page(address);
1031         ++current->mm->min_flt;
1032         return page;
1033 no_go:
1034         while (nrbuf-- > 0)
1035                 brelse(bh[nrbuf]);
1036         return 0;
1037 }
1038 
1039 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1040         dev_t dev, int b[], int size)
1041 {
1042         struct buffer_head * bh, * tmp, * arr[8];
1043         unsigned long offset;
1044         int isize = BUFSIZE_INDEX(size);
1045         int * p;
1046         int block;
1047 
1048         bh = create_buffers(address, size);
1049         if (!bh)
1050                 return 0;
1051         /* do any of the buffers already exist? punt if so.. */
1052         p = b;
1053         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1054                 block = *(p++);
1055                 if (!block)
1056                         goto not_aligned;
1057                 if (find_buffer(dev, block, size))
1058                         goto not_aligned;
1059         }
1060         tmp = bh;
1061         p = b;
1062         block = 0;
1063         while (1) {
1064                 arr[block++] = bh;
1065                 bh->b_count = 1;
1066                 bh->b_dirt = 0;
1067                 bh->b_flushtime = 0;
1068                 bh->b_uptodate = 0;
1069                 bh->b_req = 0;
1070                 bh->b_dev = dev;
1071                 bh->b_blocknr = *(p++);
1072                 bh->b_list = BUF_CLEAN;
1073                 nr_buffers++;
1074                 nr_buffers_size[isize]++;
1075                 insert_into_queues(bh);
1076                 if (bh->b_this_page)
1077                         bh = bh->b_this_page;
1078                 else
1079                         break;
1080         }
1081         buffermem += PAGE_SIZE;
1082         bh->b_this_page = tmp;
1083         mem_map[MAP_NR(address)]++;
1084         buffer_pages[MAP_NR(address)] = bh;
1085         read_buffers(arr,block);
1086         while (block-- > 0)
1087                 brelse(arr[block]);
1088         ++current->mm->maj_flt;
1089         return address;
1090 not_aligned:
1091         while ((tmp = bh) != NULL) {
1092                 bh = bh->b_this_page;
1093                 put_unused_buffer_head(tmp);
1094         }
1095         return 0;
1096 }
1097 
1098 /*
1099  * Try-to-share-buffers tries to minimize memory use by trying to keep
1100  * both code pages and the buffer area in the same page. This is done by
1101  * (a) checking if the buffers are already aligned correctly in memory and
1102  * (b) if none of the buffer heads are in memory at all, trying to load
1103  * them into memory the way we want them.
1104  *
1105  * This doesn't guarantee that the memory is shared, but should under most
1106  * circumstances work very well indeed (ie >90% sharing of code pages on
1107  * demand-loadable executables).
1108  */
1109 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1110         dev_t dev, int *b, int size)
1111 {
1112         struct buffer_head * bh;
1113         int block;
1114 
1115         block = b[0];
1116         if (!block)
1117                 return 0;
1118         bh = get_hash_table(dev, block, size);
1119         if (bh)
1120                 return check_aligned(bh, address, dev, b, size);
1121         return try_to_load_aligned(address, dev, b, size);
1122 }
1123 
1124 /*
1125  * bread_page reads four buffers into memory at the desired address. It's
1126  * a function of its own, as there is some speed to be got by reading them
1127  * all at the same time, not waiting for one to be read, and then another
1128  * etc. This also allows us to optimize memory usage by sharing code pages
1129  * and filesystem buffers..
1130  */
1131 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
1132 {
1133         struct buffer_head * bh[8];
1134         unsigned long where;
1135         int i, j;
1136 
1137         if (!no_share) {
1138                 where = try_to_share_buffers(address, dev, b, size);
1139                 if (where)
1140                         return where;
1141         }
1142         ++current->mm->maj_flt;
1143         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1144                 bh[i] = NULL;
1145                 if (b[i])
1146                         bh[i] = getblk(dev, b[i], size);
1147         }
1148         read_buffers(bh,i);
1149         where = address;
1150         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1151                 if (bh[i]) {
1152                         if (bh[i]->b_uptodate)
1153                                 memcpy((void *) where, bh[i]->b_data, size);
1154                         brelse(bh[i]);
1155                 }
1156         }
1157         return address;
1158 }
1159 
1160 /*
1161  * Try to increase the number of buffers available: the size argument
1162  * is used to determine what kind of buffers we want.
1163  */
1164 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1165 {
1166         unsigned long page;
1167         struct buffer_head *bh, *tmp;
1168         struct buffer_head * insert_point;
1169         int isize;
1170 
1171         if ((size & 511) || (size > PAGE_SIZE)) {
1172                 printk("VFS: grow_buffers: size = %d\n",size);
1173                 return 0;
1174         }
1175 
1176         isize = BUFSIZE_INDEX(size);
1177 
1178         if (!(page = __get_free_page(pri)))
1179                 return 0;
1180         bh = create_buffers(page, size);
1181         if (!bh) {
1182                 free_page(page);
1183                 return 0;
1184         }
1185 
1186         insert_point = free_list[isize];
1187 
1188         tmp = bh;
1189         while (1) {
1190                 nr_free[isize]++;
1191                 if (insert_point) {
1192                         tmp->b_next_free = insert_point->b_next_free;
1193                         tmp->b_prev_free = insert_point;
1194                         insert_point->b_next_free->b_prev_free = tmp;
1195                         insert_point->b_next_free = tmp;
1196                 } else {
1197                         tmp->b_prev_free = tmp;
1198                         tmp->b_next_free = tmp;
1199                 }
1200                 insert_point = tmp;
1201                 ++nr_buffers;
1202                 if (tmp->b_this_page)
1203                         tmp = tmp->b_this_page;
1204                 else
1205                         break;
1206         }
1207         free_list[isize] = bh;
1208         buffer_pages[MAP_NR(page)] = bh;
1209         tmp->b_this_page = bh;
1210         wake_up(&buffer_wait);
1211         buffermem += PAGE_SIZE;
1212         return 1;
1213 }
1214 
1215 /*
1216  * try_to_free() checks if all the buffers on this particular page
1217  * are unused, and free's the page if so.
1218  */
1219 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1220 {
1221         unsigned long page;
1222         struct buffer_head * tmp, * p;
1223         int isize = BUFSIZE_INDEX(bh->b_size);
1224 
1225         *bhp = bh;
1226         page = (unsigned long) bh->b_data;
1227         page &= PAGE_MASK;
1228         tmp = bh;
1229         do {
1230                 if (!tmp)
1231                         return 0;
1232                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1233                         return 0;
1234                 tmp = tmp->b_this_page;
1235         } while (tmp != bh);
1236         tmp = bh;
1237         do {
1238                 p = tmp;
1239                 tmp = tmp->b_this_page;
1240                 nr_buffers--;
1241                 nr_buffers_size[isize]--;
1242                 if (p == *bhp)
1243                   {
1244                     *bhp = p->b_prev_free;
1245                     if (p == *bhp) /* Was this the last in the list? */
1246                       *bhp = NULL;
1247                   }
1248                 remove_from_queues(p);
1249                 put_unused_buffer_head(p);
1250         } while (tmp != bh);
1251         buffermem -= PAGE_SIZE;
1252         buffer_pages[MAP_NR(page)] = NULL;
1253         free_page(page);
1254         return !mem_map[MAP_NR(page)];
1255 }
1256 
1257 
1258 /*
1259  * Consult the load average for buffers and decide whether or not
1260  * we should shrink the buffers of one size or not.  If we decide yes,
1261  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1262  * that is specified.
1263  *
1264  * I would prefer not to use a load average, but the way things are now it
1265  * seems unavoidable.  The way to get rid of it would be to force clustering
1266  * universally, so that when we reclaim buffers we always reclaim an entire
1267  * page.  Doing this would mean that we all need to move towards QMAGIC.
1268  */
1269 
1270 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1271 {          
1272         int nlist;
1273         int isize;
1274         int total_lav, total_n_buffers, n_sizes;
1275         
1276         /* Do not consider the shared buffers since they would not tend
1277            to have getblk called very often, and this would throw off
1278            the lav.  They are not easily reclaimable anyway (let the swapper
1279            make the first move). */
1280   
1281         total_lav = total_n_buffers = n_sizes = 0;
1282         for(nlist = 0; nlist < NR_SIZES; nlist++)
1283          {
1284                  total_lav += buffers_lav[nlist];
1285                  if(nr_buffers_size[nlist]) n_sizes++;
1286                  total_n_buffers += nr_buffers_size[nlist];
1287                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1288          }
1289         
1290         /* See if we have an excessive number of buffers of a particular
1291            size - if so, victimize that bunch. */
1292   
1293         isize = (size ? BUFSIZE_INDEX(size) : -1);
1294         
1295         if (n_sizes > 1)
1296                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1297                   {
1298                           if(nlist == isize) continue;
1299                           if(nr_buffers_size[nlist] &&
1300                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1301                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1302                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1303                                             return 1;
1304                   }
1305         return 0;
1306 }
1307 /*
1308  * Try to free up some pages by shrinking the buffer-cache
1309  *
1310  * Priority tells the routine how hard to try to shrink the
1311  * buffers: 3 means "don't bother too much", while a value
1312  * of 0 means "we'd better get some free pages now".
1313  */
1314 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1315 {
1316         if (priority < 2) {
1317                 sync_buffers(0,0);
1318         }
1319 
1320         if(priority == 2) wakeup_bdflush(1);
1321 
1322         if(maybe_shrink_lav_buffers(0)) return 1;
1323 
1324         /* No good candidate size - take any size we can find */
1325         return shrink_specific_buffers(priority, 0);
1326 }
1327 
1328 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1329 {
1330         struct buffer_head *bh;
1331         int nlist;
1332         int i, isize, isize1;
1333 
1334 #ifdef DEBUG
1335         if(size) printk("Shrinking buffers of size %d\n", size);
1336 #endif
1337         /* First try the free lists, and see if we can get a complete page
1338            from here */
1339         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1340 
1341         for(isize = 0; isize<NR_SIZES; isize++){
1342                 if(isize1 != -1 && isize1 != isize) continue;
1343                 bh = free_list[isize];
1344                 if(!bh) continue;
1345                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1346                         if (bh->b_count || !bh->b_this_page)
1347                                  continue;
1348                         if (try_to_free(bh, &bh))
1349                                  return 1;
1350                         if(!bh) break; /* Some interrupt must have used it after we
1351                                           freed the page.  No big deal - keep looking */
1352                 }
1353         }
1354         
1355         /* Not enough in the free lists, now try the lru list */
1356         
1357         for(nlist = 0; nlist < NR_LIST; nlist++) {
1358         repeat1:
1359                 if(priority > 3 && nlist == BUF_SHARED) continue;
1360                 bh = lru_list[nlist];
1361                 if(!bh) continue;
1362                 i = nr_buffers_type[nlist] >> priority;
1363                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1364                         /* We may have stalled while waiting for I/O to complete. */
1365                         if(bh->b_list != nlist) goto repeat1;
1366                         if (bh->b_count || !bh->b_this_page)
1367                                  continue;
1368                         if(size && bh->b_size != size) continue;
1369                         if (bh->b_lock)
1370                                  if (priority)
1371                                           continue;
1372                                  else
1373                                           wait_on_buffer(bh);
1374                         if (bh->b_dirt) {
1375                                 bh->b_count++;
1376                                 bh->b_flushtime = 0;
1377                                 ll_rw_block(WRITEA, 1, &bh);
1378                                 bh->b_count--;
1379                                 continue;
1380                         }
1381                         if (try_to_free(bh, &bh))
1382                                  return 1;
1383                         if(!bh) break;
1384                 }
1385         }
1386         return 0;
1387 }
1388 
1389 
1390 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1391 {
1392         struct buffer_head * bh;
1393         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1394         int shared;
1395         int nlist, isize;
1396 
1397         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1398         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1399         printk("Buffer blocks:   %6d\n",nr_buffers);
1400 
1401         for(nlist = 0; nlist < NR_LIST; nlist++) {
1402           shared = found = locked = dirty = used = lastused = 0;
1403           bh = lru_list[nlist];
1404           if(!bh) continue;
1405           do {
1406                 found++;
1407                 if (bh->b_lock)
1408                         locked++;
1409                 if (bh->b_dirt)
1410                         dirty++;
1411                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1412                 if (bh->b_count)
1413                         used++, lastused = found;
1414                 bh = bh->b_next_free;
1415               } while (bh != lru_list[nlist]);
1416         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1417                 nlist, found, used, lastused, locked, dirty, shared);
1418         };
1419         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1420         for(isize = 0; isize<NR_SIZES; isize++){
1421                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1422                        buffers_lav[isize], nr_free[isize]);
1423                 for(nlist = 0; nlist < NR_LIST; nlist++)
1424                          printk("%7d ", nr_buffers_st[isize][nlist]);
1425                 printk("\n");
1426         }
1427 }
1428 
1429 /*
1430  * try_to_reassign() checks if all the buffers on this particular page
1431  * are unused, and reassign to a new cluster them if this is true.
1432  */
1433 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1434                            dev_t dev, unsigned int starting_block)
1435 {
1436         unsigned long page;
1437         struct buffer_head * tmp, * p;
1438 
1439         *bhp = bh;
1440         page = (unsigned long) bh->b_data;
1441         page &= PAGE_MASK;
1442         if(mem_map[MAP_NR(page)] != 1) return 0;
1443         tmp = bh;
1444         do {
1445                 if (!tmp)
1446                          return 0;
1447                 
1448                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1449                          return 0;
1450                 tmp = tmp->b_this_page;
1451         } while (tmp != bh);
1452         tmp = bh;
1453         
1454         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1455                  tmp = tmp->b_this_page;
1456         
1457         /* This is the buffer at the head of the page */
1458         bh = tmp;
1459         do {
1460                 p = tmp;
1461                 tmp = tmp->b_this_page;
1462                 remove_from_queues(p);
1463                 p->b_dev=dev;
1464                 p->b_uptodate = 0;
1465                 p->b_req = 0;
1466                 p->b_blocknr=starting_block++;
1467                 insert_into_queues(p);
1468         } while (tmp != bh);
1469         return 1;
1470 }
1471 
1472 /*
1473  * Try to find a free cluster by locating a page where
1474  * all of the buffers are unused.  We would like this function
1475  * to be atomic, so we do not call anything that might cause
1476  * the process to sleep.  The priority is somewhat similar to
1477  * the priority used in shrink_buffers.
1478  * 
1479  * My thinking is that the kernel should end up using whole
1480  * pages for the buffer cache as much of the time as possible.
1481  * This way the other buffers on a particular page are likely
1482  * to be very near each other on the free list, and we will not
1483  * be expiring data prematurely.  For now we only cannibalize buffers
1484  * of the same size to keep the code simpler.
1485  */
1486 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1487                      unsigned int starting_block, int size)
1488 {
1489         struct buffer_head *bh;
1490         int isize = BUFSIZE_INDEX(size);
1491         int i;
1492 
1493         /* We want to give ourselves a really good shot at generating
1494            a cluster, and since we only take buffers from the free
1495            list, we "overfill" it a little. */
1496 
1497         while(nr_free[isize] < 32) refill_freelist(size);
1498 
1499         bh = free_list[isize];
1500         if(bh)
1501                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1502                          if (!bh->b_this_page)  continue;
1503                          if (try_to_reassign(bh, &bh, dev, starting_block))
1504                                  return 4;
1505                  }
1506         return 0;
1507 }
1508 
1509 /* This function tries to generate a new cluster of buffers
1510  * from a new page in memory.  We should only do this if we have
1511  * not expanded the buffer cache to the maximum size that we allow.
1512  */
1513 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1514 {
1515         struct buffer_head * bh, * tmp, * arr[8];
1516         int isize = BUFSIZE_INDEX(size);
1517         unsigned long offset;
1518         unsigned long page;
1519         int nblock;
1520 
1521         page = get_free_page(GFP_NOBUFFER);
1522         if(!page) return 0;
1523 
1524         bh = create_buffers(page, size);
1525         if (!bh) {
1526                 free_page(page);
1527                 return 0;
1528         };
1529         nblock = block;
1530         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1531                 if (find_buffer(dev, nblock++, size))
1532                          goto not_aligned;
1533         }
1534         tmp = bh;
1535         nblock = 0;
1536         while (1) {
1537                 arr[nblock++] = bh;
1538                 bh->b_count = 1;
1539                 bh->b_dirt = 0;
1540                 bh->b_flushtime = 0;
1541                 bh->b_lock = 0;
1542                 bh->b_uptodate = 0;
1543                 bh->b_req = 0;
1544                 bh->b_dev = dev;
1545                 bh->b_list = BUF_CLEAN;
1546                 bh->b_blocknr = block++;
1547                 nr_buffers++;
1548                 nr_buffers_size[isize]++;
1549                 insert_into_queues(bh);
1550                 if (bh->b_this_page)
1551                         bh = bh->b_this_page;
1552                 else
1553                         break;
1554         }
1555         buffermem += PAGE_SIZE;
1556         buffer_pages[MAP_NR(page)] = bh;
1557         bh->b_this_page = tmp;
1558         while (nblock-- > 0)
1559                 brelse(arr[nblock]);
1560         return 4;
1561 not_aligned:
1562         while ((tmp = bh) != NULL) {
1563                 bh = bh->b_this_page;
1564                 put_unused_buffer_head(tmp);
1565         }
1566         free_page(page);
1567         return 0;
1568 }
1569 
1570 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1571 {
1572         int i, offset;
1573         
1574         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1575                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1576                 if(find_buffer(dev, b[i], size)) return 0;
1577         };
1578 
1579         /* OK, we have a candidate for a new cluster */
1580         
1581         /* See if one size of buffer is over-represented in the buffer cache,
1582            if so reduce the numbers of buffers */
1583         if(maybe_shrink_lav_buffers(size))
1584          {
1585                  int retval;
1586                  retval = try_to_generate_cluster(dev, b[0], size);
1587                  if(retval) return retval;
1588          };
1589         
1590         if (nr_free_pages > min_free_pages*2) 
1591                  return try_to_generate_cluster(dev, b[0], size);
1592         else
1593                  return reassign_cluster(dev, b[0], size);
1594 }
1595 
1596 /*
1597  * This initializes the initial buffer free list.  nr_buffers_type is set
1598  * to one less the actual number of buffers, as a sop to backwards
1599  * compatibility --- the old code did this (I think unintentionally,
1600  * but I'm not sure), and programs in the ps package expect it.
1601  *                                      - TYT 8/30/92
1602  */
1603 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1604 {
1605         int i;
1606         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1607 
1608         if (high_memory >= 4*1024*1024) {
1609                 if(high_memory >= 16*1024*1024)
1610                          nr_hash = 16381;
1611                 else
1612                          nr_hash = 4093;
1613         } else {
1614                 nr_hash = 997;
1615         };
1616         
1617         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1618                                                      sizeof(struct buffer_head *));
1619 
1620 
1621         buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) * 
1622                                                      sizeof(struct buffer_head *));
1623         for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1624                 buffer_pages[i] = NULL;
1625 
1626         for (i = 0 ; i < nr_hash ; i++)
1627                 hash_table[i] = NULL;
1628         lru_list[BUF_CLEAN] = 0;
1629         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1630         if (!free_list[isize])
1631                 panic("VFS: Unable to initialize buffer free list!");
1632         return;
1633 }
1634 
1635 /* This is a simple kernel daemon, whose job it is to provide a dynamically
1636  * response to dirty buffers.  Once this process is activated, we write back
1637  * a limited number of buffers to the disks and then go back to sleep again.
1638  * In effect this is a process which never leaves kernel mode, and does not have
1639  * any user memory associated with it except for the stack.  There is also
1640  * a kernel stack page, which obviously must be separate from the user stack.
1641  */
1642 struct wait_queue * bdflush_wait = NULL;
1643 struct wait_queue * bdflush_done = NULL;
1644 
1645 static int bdflush_running = 0;
1646 
1647 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1648 {
1649         if(!bdflush_running){
1650                 printk("Warning - bdflush not running\n");
1651                 sync_buffers(0,0);
1652                 return;
1653         };
1654         wake_up(&bdflush_wait);
1655         if(wait) sleep_on(&bdflush_done);
1656 }
1657 
1658 
1659 
1660 /* 
1661  * Here we attempt to write back old buffers.  We also try and flush inodes 
1662  * and supers as well, since this function is essentially "update", and 
1663  * otherwise there would be no way of ensuring that these quantities ever 
1664  * get written back.  Ideally, we would have a timestamp on the inodes
1665  * and superblocks so that we could write back only the old ones as well
1666  */
1667 
1668 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1669 {
1670         int i, isize;
1671         int ndirty, nwritten;
1672         int nlist;
1673         int ncount;
1674         struct buffer_head * bh, *next;
1675 
1676         sync_supers(0);
1677         sync_inodes(0);
1678 
1679         ncount = 0;
1680 #ifdef DEBUG
1681         for(nlist = 0; nlist < NR_LIST; nlist++)
1682 #else
1683         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1684 #endif
1685         {
1686                 ndirty = 0;
1687                 nwritten = 0;
1688         repeat:
1689                 bh = lru_list[nlist];
1690                 if(bh) 
1691                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1692                                  /* We may have stalled while waiting for I/O to complete. */
1693                                  if(bh->b_list != nlist) goto repeat;
1694                                  next = bh->b_next_free;
1695                                  if(!lru_list[nlist]) {
1696                                          printk("Dirty list empty %d\n", i);
1697                                          break;
1698                                  }
1699                                  
1700                                  /* Clean buffer on dirty list?  Refile it */
1701                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1702                                   {
1703                                           refile_buffer(bh);
1704                                           continue;
1705                                   }
1706                                  
1707                                  if (bh->b_lock || !bh->b_dirt)
1708                                           continue;
1709                                  ndirty++;
1710                                  if(bh->b_flushtime > jiffies) continue;
1711                                  nwritten++;
1712                                  bh->b_count++;
1713                                  bh->b_flushtime = 0;
1714 #ifdef DEBUG
1715                                  if(nlist != BUF_DIRTY) ncount++;
1716 #endif
1717                                  ll_rw_block(WRITE, 1, &bh);
1718                                  bh->b_count--;
1719                          }
1720         }
1721 #ifdef DEBUG
1722         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1723         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1724 #endif
1725         
1726         /* We assume that we only come through here on a regular
1727            schedule, like every 5 seconds.  Now update load averages.  
1728            Shift usage counts to prevent overflow. */
1729         for(isize = 0; isize<NR_SIZES; isize++){
1730                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1731                 buffer_usage[isize] = 0;
1732         };
1733         return 0;
1734 }
1735 
1736 
1737 /* This is the interface to bdflush.  As we get more sophisticated, we can
1738  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1739  * invoke this again after you have done this once, you would simply modify 
1740  * the tuning parameters.  We would want to verify each parameter, however,
1741  * to make sure that it is reasonable. */
1742 
1743 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1744 {
1745         int i, error;
1746         int ndirty;
1747         int nlist;
1748         int ncount;
1749         struct buffer_head * bh, *next;
1750 
1751         if (!suser())
1752                 return -EPERM;
1753 
1754         if (func == 1)
1755                  return sync_old_buffers();
1756 
1757         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1758         if (func >= 2) {
1759                 i = (func-2) >> 1;
1760                 if (i < 0 || i >= N_PARAM)
1761                         return -EINVAL;
1762                 if((func & 1) == 0) {
1763                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1764                         if (error)
1765                                 return error;
1766                         put_fs_long(bdf_prm.data[i], data);
1767                         return 0;
1768                 };
1769                 if (data < bdflush_min[i] || data > bdflush_max[i])
1770                         return -EINVAL;
1771                 bdf_prm.data[i] = data;
1772                 return 0;
1773         };
1774         
1775         if (bdflush_running)
1776                 return -EBUSY; /* Only one copy of this running at one time */
1777         bdflush_running++;
1778         
1779         /* OK, from here on is the daemon */
1780         
1781         for (;;) {
1782 #ifdef DEBUG
1783                 printk("bdflush() activated...");
1784 #endif
1785                 
1786                 ncount = 0;
1787 #ifdef DEBUG
1788                 for(nlist = 0; nlist < NR_LIST; nlist++)
1789 #else
1790                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1791 #endif
1792                  {
1793                          ndirty = 0;
1794                  repeat:
1795                          bh = lru_list[nlist];
1796                          if(bh) 
1797                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1798                                        bh = next) {
1799                                           /* We may have stalled while waiting for I/O to complete. */
1800                                           if(bh->b_list != nlist) goto repeat;
1801                                           next = bh->b_next_free;
1802                                           if(!lru_list[nlist]) {
1803                                                   printk("Dirty list empty %d\n", i);
1804                                                   break;
1805                                           }
1806                                           
1807                                           /* Clean buffer on dirty list?  Refile it */
1808                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1809                                            {
1810                                                    refile_buffer(bh);
1811                                                    continue;
1812                                            }
1813                                           
1814                                           if (bh->b_lock || !bh->b_dirt)
1815                                                    continue;
1816                                           /* Should we write back buffers that are shared or not??
1817                                              currently dirty buffers are not shared, so it does not matter */
1818                                           bh->b_count++;
1819                                           ndirty++;
1820                                           bh->b_flushtime = 0;
1821                                           ll_rw_block(WRITE, 1, &bh);
1822 #ifdef DEBUG
1823                                           if(nlist != BUF_DIRTY) ncount++;
1824 #endif
1825                                           bh->b_count--;
1826                                   }
1827                  }
1828 #ifdef DEBUG
1829                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1830                 printk("sleeping again.\n");
1831 #endif
1832                 wake_up(&bdflush_done);
1833                 
1834                 /* If there are still a lot of dirty buffers around, skip the sleep
1835                    and flush some more */
1836                 
1837                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1838                    bdf_prm.b_un.nfract/100) {
1839                         if (current->signal & (1 << (SIGKILL-1))) {
1840                                 bdflush_running--;
1841                                 return 0;
1842                         }
1843                         current->signal = 0;
1844                         interruptible_sleep_on(&bdflush_wait);
1845                 }
1846         }
1847 }
1848 
1849 
1850 /*
1851  * Overrides for Emacs so that we follow Linus's tabbing style.
1852  * Emacs will notice this stuff at the end of the file and automatically
1853  * adjust the settings for this buffer only.  This must remain at the end
1854  * of the file.
1855  * ---------------------------------------------------------------------------
1856  * Local variables:
1857  * c-indent-level: 8
1858  * c-brace-imaginary-offset: 0
1859  * c-brace-offset: -8
1860  * c-argdecl-indent: 8
1861  * c-label-offset: -8
1862  * c-continued-statement-offset: 8
1863  * c-continued-brace-offset: 0
1864  * End:
1865  */

/* [previous][next][first][last][top][bottom][index][help] */