root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. fsync_dev
  5. sys_sync
  6. file_fsync
  7. sys_fsync
  8. invalidate_buffers
  9. check_disk_change
  10. remove_from_hash_queue
  11. remove_from_lru_list
  12. remove_from_free_list
  13. remove_from_queues
  14. put_last_lru
  15. put_last_free
  16. insert_into_queues
  17. find_buffer
  18. get_hash_table
  19. set_blocksize
  20. refill_freelist
  21. getblk
  22. set_writetime
  23. refile_buffer
  24. brelse
  25. bread
  26. breada
  27. put_unused_buffer_head
  28. get_more_buffer_heads
  29. get_unused_buffer_head
  30. create_buffers
  31. read_buffers
  32. check_aligned
  33. try_to_load_aligned
  34. try_to_share_buffers
  35. bread_page
  36. grow_buffers
  37. try_to_free
  38. maybe_shrink_lav_buffers
  39. shrink_buffers
  40. shrink_specific_buffers
  41. show_buffers
  42. try_to_reassign
  43. reassign_cluster
  44. try_to_generate_cluster
  45. generate_cluster
  46. buffer_init
  47. wakeup_bdflush
  48. sync_old_buffers
  49. sys_bdflush

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting an interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18  
  19 #include <linux/config.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/major.h>
  23 #include <linux/string.h>
  24 #include <linux/locks.h>
  25 #include <linux/errno.h>
  26 #include <linux/malloc.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/segment.h>
  30 #include <asm/io.h>
  31 
  32 #ifdef CONFIG_SCSI
  33 #ifdef CONFIG_BLK_DEV_SR
  34 extern int check_cdrom_media_change(int, int);
  35 #endif
  36 #ifdef CONFIG_BLK_DEV_SD
  37 extern int check_scsidisk_media_change(int, int);
  38 extern int revalidate_scsidisk(int, int);
  39 #endif
  40 #endif
  41 #ifdef CONFIG_CDU31A
  42 extern int check_cdu31a_media_change(int, int);
  43 #endif
  44 #ifdef CONFIG_MCD
  45 extern int check_mcd_media_change(int, int);
  46 #endif
  47 
  48 #define NR_SIZES 4
  49 static char buffersize_index[9] = {-1,  0,  1, -1,  2, -1, -1, -1, 3};
  50 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
  51 
  52 #define BUFSIZE_INDEX(X) (buffersize_index[(X)>>9])
  53 
  54 static int grow_buffers(int pri, int size);
  55 static int shrink_specific_buffers(unsigned int priority, int size);
  56 static int maybe_shrink_lav_buffers(int);
  57 
  58 static int nr_hash = 0;  /* Size of hash table */
  59 static struct buffer_head ** hash_table;
  60 struct buffer_head ** buffer_pages;
  61 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
  62 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
  63 static struct buffer_head * unused_list = NULL;
  64 static struct wait_queue * buffer_wait = NULL;
  65 
  66 int nr_buffers = 0;
  67 int nr_buffers_type[NR_LIST] = {0,};
  68 int nr_buffers_size[NR_SIZES] = {0,};
  69 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
  70 int buffer_usage[NR_SIZES] = {0,};  /* Usage counts used to determine load average */
  71 int buffers_lav[NR_SIZES] = {0,};  /* Load average of buffer usage */
  72 int nr_free[NR_SIZES] = {0,};
  73 int buffermem = 0;
  74 int nr_buffer_heads = 0;
  75 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  76 extern int *blksize_size[];
  77 
  78 /* Here is the parameter block for the bdflush process. */
  79 static void wakeup_bdflush(int);
  80 
  81 #define N_PARAM 9
  82 #define LAV
  83 
  84 static union bdflush_param{
  85         struct {
  86                 int nfract;  /* Percentage of buffer cache dirty to 
  87                                 activate bdflush */
  88                 int ndirty;  /* Maximum number of dirty blocks to write out per
  89                                 wake-cycle */
  90                 int nrefill; /* Number of clean buffers to try and obtain
  91                                 each time we call refill */
  92                 int nref_dirt; /* Dirty buffer threshold for activating bdflush
  93                                   when trying to refill buffers. */
  94                 int clu_nfract;  /* Percentage of buffer cache to scan to 
  95                                     search for free clusters */
  96                 int age_buffer;  /* Time for normal buffer to age before 
  97                                     we flush it */
  98                 int age_super;  /* Time for superblock to age before we 
  99                                    flush it */
 100                 int lav_const;  /* Constant used for load average (time
 101                                    constant */
 102                 int lav_ratio;  /* Used to determine how low a lav for a
 103                                    particular size can go before we start to
 104                                    trim back the buffers */
 105         } b_un;
 106         unsigned int data[N_PARAM];
 107 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
 108 
 109 /* The lav constant is set for 1 minute, as long as the update process runs
 110    every 5 seconds.  If you change the frequency of update, the time
 111    constant will also change. */
 112 
 113 
 114 /* These are the min and max parameter values that we will allow to be assigned */
 115 static int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};
 116 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
 117 
 118 /*
 119  * Rewrote the wait-routines to use the "new" wait-queue functionality,
 120  * and getting rid of the cli-sti pairs. The wait-queue routines still
 121  * need cli-sti, but now it's just a couple of 386 instructions or so.
 122  *
 123  * Note that the real wait_on_buffer() is an inline function that checks
 124  * if 'b_wait' is set before calling this, so that the queues aren't set
 125  * up unnecessarily.
 126  */
 127 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 128 {
 129         struct wait_queue wait = { current, NULL };
 130 
 131         bh->b_count++;
 132         add_wait_queue(&bh->b_wait, &wait);
 133 repeat:
 134         current->state = TASK_UNINTERRUPTIBLE;
 135         if (bh->b_lock) {
 136                 schedule();
 137                 goto repeat;
 138         }
 139         remove_wait_queue(&bh->b_wait, &wait);
 140         bh->b_count--;
 141         current->state = TASK_RUNNING;
 142 }
 143 
 144 /* Call sync_buffers with wait!=0 to ensure that the call does not
 145    return until all buffer writes have completed.  Sync() may return
 146    before the writes have finished; fsync() may not. */
 147 
 148 
 149 /* Godamity-damn.  Some buffers (bitmaps for filesystems)
 150    spontaneously dirty themselves without ever brelse being called.
 151    We will ultimately want to put these in a separate list, but for
 152    now we search all of the lists for dirty buffers */
 153 
 154 static int sync_buffers(dev_t dev, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         int i, retry, pass = 0, err = 0;
 157         int nlist, ncount;
 158         struct buffer_head * bh, *next;
 159 
 160         /* One pass for no-wait, three for wait:
 161            0) write out all dirty, unlocked buffers;
 162            1) write out all dirty buffers, waiting if locked;
 163            2) wait for completion by waiting for all buffers to unlock. */
 164  repeat:
 165         retry = 0;
 166         ncount = 0;
 167         /* We search all lists as a failsafe mechanism, not because we expect
 168            there to be dirty buffers on any of the other lists. */
 169         for(nlist = 0; nlist < NR_LIST; nlist++)
 170          {
 171          repeat1:
 172                  bh = lru_list[nlist];
 173                  if(!bh) continue;
 174                  for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
 175                          if(bh->b_list != nlist) goto repeat1;
 176                          next = bh->b_next_free;
 177                          if(!lru_list[nlist]) break;
 178                          if (dev && bh->b_dev != dev)
 179                                   continue;
 180                          if (bh->b_lock)
 181                           {
 182                                   /* Buffer is locked; skip it unless wait is
 183                                      requested AND pass > 0. */
 184                                   if (!wait || !pass) {
 185                                           retry = 1;
 186                                           continue;
 187                                   }
 188                                   wait_on_buffer (bh);
 189                           }
 190                          /* If an unlocked buffer is not uptodate, there has
 191                              been an IO error. Skip it. */
 192                          if (wait && bh->b_req && !bh->b_lock &&
 193                              !bh->b_dirt && !bh->b_uptodate) {
 194                                   err = 1;
 195                                   printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
 196                                   continue;
 197                           }
 198                          /* Don't write clean buffers.  Don't write ANY buffers
 199                             on the third pass. */
 200                          if (!bh->b_dirt || pass>=2)
 201                                   continue;
 202                          bh->b_count++;
 203                          bh->b_flushtime = 0;
 204                          ll_rw_block(WRITE, 1, &bh);
 205 
 206                          if(nlist != BUF_DIRTY) { 
 207                                  printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
 208                                  ncount++;
 209                          };
 210                          bh->b_count--;
 211                          retry = 1;
 212                  }
 213          }
 214         if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
 215         
 216         /* If we are waiting for the sync to succeed, and if any dirty
 217            blocks were written, then repeat; on the second pass, only
 218            wait for buffers being written (do not pass to write any
 219            more buffers on the second pass). */
 220         if (wait && retry && ++pass<=2)
 221                  goto repeat;
 222         return err;
 223 }
 224 
 225 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         sync_buffers(dev, 0);
 228         sync_supers(dev);
 229         sync_inodes(dev);
 230         sync_buffers(dev, 0);
 231 }
 232 
 233 int fsync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235         sync_buffers(dev, 0);
 236         sync_supers(dev);
 237         sync_inodes(dev);
 238         return sync_buffers(dev, 1);
 239 }
 240 
 241 asmlinkage int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         sync_dev(0);
 244         return 0;
 245 }
 246 
 247 int file_fsync (struct inode *inode, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 248 {
 249         return fsync_dev(inode->i_dev);
 250 }
 251 
 252 asmlinkage int sys_fsync(unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 253 {
 254         struct file * file;
 255         struct inode * inode;
 256 
 257         if (fd>=NR_OPEN || !(file=current->filp[fd]) || !(inode=file->f_inode))
 258                 return -EBADF;
 259         if (!file->f_op || !file->f_op->fsync)
 260                 return -EINVAL;
 261         if (file->f_op->fsync(inode,file))
 262                 return -EIO;
 263         return 0;
 264 }
 265 
 266 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268         int i;
 269         int nlist;
 270         struct buffer_head * bh;
 271 
 272         for(nlist = 0; nlist < NR_LIST; nlist++) {
 273                 bh = lru_list[nlist];
 274                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; 
 275                      bh = bh->b_next_free) {
 276                         if (bh->b_dev != dev)
 277                                  continue;
 278                         wait_on_buffer(bh);
 279                         if (bh->b_dev == dev)
 280                                  bh->b_flushtime = bh->b_uptodate = 
 281                                           bh->b_dirt = bh->b_req = 0;
 282                 }
 283         }
 284 }
 285 
 286 /*
 287  * This routine checks whether a floppy has been changed, and
 288  * invalidates all buffer-cache-entries in that case. This
 289  * is a relatively slow routine, so we have to try to minimize using
 290  * it. Thus it is called only upon a 'mount' or 'open'. This
 291  * is the best way of combining speed and utility, I think.
 292  * People changing diskettes in the middle of an operation deserve
 293  * to loose :-)
 294  *
 295  * NOTE! Although currently this is only for floppies, the idea is
 296  * that any additional removable block-device will use this routine,
 297  * and that mount/open needn't know that floppies/whatever are
 298  * special.
 299  */
 300 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302         int i;
 303         struct buffer_head * bh;
 304 
 305         switch(MAJOR(dev)){
 306         case FLOPPY_MAJOR:
 307                 if (!(bh = getblk(dev,0,1024)))
 308                         return;
 309                 i = floppy_change(bh);
 310                 brelse(bh);
 311                 break;
 312 
 313 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 314          case SCSI_DISK_MAJOR:
 315                 i = check_scsidisk_media_change(dev, 0);
 316                 break;
 317 #endif
 318 
 319 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 320          case SCSI_CDROM_MAJOR:
 321                 i = check_cdrom_media_change(dev, 0);
 322                 break;
 323 #endif
 324 
 325 #if defined(CONFIG_CDU31A)
 326          case CDU31A_CDROM_MAJOR:
 327                 i = check_cdu31a_media_change(dev, 0);
 328                 break;
 329 #endif
 330 
 331 #if defined(CONFIG_MCD)
 332          case MITSUMI_CDROM_MAJOR:
 333                 i = check_mcd_media_change(dev, 0);
 334                 break;
 335 #endif
 336 
 337          default:
 338                 return;
 339         };
 340 
 341         if (!i) return;
 342 
 343         printk("VFS: Disk change detected on device %d/%d\n",
 344                                         MAJOR(dev), MINOR(dev));
 345         for (i=0 ; i<NR_SUPER ; i++)
 346                 if (super_blocks[i].s_dev == dev)
 347                         put_super(super_blocks[i].s_dev);
 348         invalidate_inodes(dev);
 349         invalidate_buffers(dev);
 350 
 351 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 352 /* This is trickier for a removable hardisk, because we have to invalidate
 353    all of the partitions that lie on the disk. */
 354         if (MAJOR(dev) == SCSI_DISK_MAJOR)
 355                 revalidate_scsidisk(dev, 0);
 356 #endif
 357 }
 358 
 359 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
 360 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 361 
 362 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 363 {
 364         if (bh->b_next)
 365                 bh->b_next->b_prev = bh->b_prev;
 366         if (bh->b_prev)
 367                 bh->b_prev->b_next = bh->b_next;
 368         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 369                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 370         bh->b_next = bh->b_prev = NULL;
 371 }
 372 
 373 static inline void remove_from_lru_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 374 {
 375         if (!(bh->b_prev_free) || !(bh->b_next_free))
 376                 panic("VFS: LRU block list corrupted");
 377         if (bh->b_dev == 0xffff) panic("LRU list corrupted");
 378         bh->b_prev_free->b_next_free = bh->b_next_free;
 379         bh->b_next_free->b_prev_free = bh->b_prev_free;
 380 
 381         if (lru_list[bh->b_list] == bh)
 382                  lru_list[bh->b_list] = bh->b_next_free;
 383         if(lru_list[bh->b_list] == bh)
 384                  lru_list[bh->b_list] = NULL;
 385         bh->b_next_free = bh->b_prev_free = NULL;
 386 }
 387 
 388 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390         int isize = BUFSIZE_INDEX(bh->b_size);
 391         if (!(bh->b_prev_free) || !(bh->b_next_free))
 392                 panic("VFS: Free block list corrupted");
 393         if(bh->b_dev != 0xffff) panic("Free list corrupted");
 394         if(!free_list[isize])
 395                  panic("Free list empty");
 396         nr_free[isize]--;
 397         if(bh->b_next_free == bh)
 398                  free_list[isize] = NULL;
 399         else {
 400                 bh->b_prev_free->b_next_free = bh->b_next_free;
 401                 bh->b_next_free->b_prev_free = bh->b_prev_free;
 402                 if (free_list[isize] == bh)
 403                          free_list[isize] = bh->b_next_free;
 404         };
 405         bh->b_next_free = bh->b_prev_free = NULL;
 406 }
 407 
 408 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410         if(bh->b_dev == 0xffff) {
 411                 remove_from_free_list(bh); /* Free list entries should not be
 412                                               in the hash queue */
 413                 return;
 414         };
 415         nr_buffers_type[bh->b_list]--;
 416         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
 417         remove_from_hash_queue(bh);
 418         remove_from_lru_list(bh);
 419 }
 420 
 421 static inline void put_last_lru(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 422 {
 423         if (!bh)
 424                 return;
 425         if (bh == lru_list[bh->b_list]) {
 426                 lru_list[bh->b_list] = bh->b_next_free;
 427                 return;
 428         }
 429         if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
 430         remove_from_lru_list(bh);
 431 /* add to back of free list */
 432 
 433         if(!lru_list[bh->b_list]) {
 434                 lru_list[bh->b_list] = bh;
 435                 lru_list[bh->b_list]->b_prev_free = bh;
 436         };
 437 
 438         bh->b_next_free = lru_list[bh->b_list];
 439         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 440         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 441         lru_list[bh->b_list]->b_prev_free = bh;
 442 }
 443 
 444 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         int isize;
 447         if (!bh)
 448                 return;
 449 
 450         isize = BUFSIZE_INDEX(bh->b_size);      
 451         bh->b_dev = 0xffff;  /* So it is obvious we are on the free list */
 452 /* add to back of free list */
 453 
 454         if(!free_list[isize]) {
 455                 free_list[isize] = bh;
 456                 bh->b_prev_free = bh;
 457         };
 458 
 459         nr_free[isize]++;
 460         bh->b_next_free = free_list[isize];
 461         bh->b_prev_free = free_list[isize]->b_prev_free;
 462         free_list[isize]->b_prev_free->b_next_free = bh;
 463         free_list[isize]->b_prev_free = bh;
 464 }
 465 
 466 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 467 {
 468 /* put at end of free list */
 469 
 470         if(bh->b_dev == 0xffff) {
 471                 put_last_free(bh);
 472                 return;
 473         };
 474         if(!lru_list[bh->b_list]) {
 475                 lru_list[bh->b_list] = bh;
 476                 bh->b_prev_free = bh;
 477         };
 478         if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
 479         bh->b_next_free = lru_list[bh->b_list];
 480         bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
 481         lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
 482         lru_list[bh->b_list]->b_prev_free = bh;
 483         nr_buffers_type[bh->b_list]++;
 484         nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
 485 /* put the buffer in new hash-queue if it has a device */
 486         bh->b_prev = NULL;
 487         bh->b_next = NULL;
 488         if (!bh->b_dev)
 489                 return;
 490         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 491         hash(bh->b_dev,bh->b_blocknr) = bh;
 492         if (bh->b_next)
 493                 bh->b_next->b_prev = bh;
 494 }
 495 
 496 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 497 {               
 498         struct buffer_head * tmp;
 499 
 500         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 501                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 502                         if (tmp->b_size == size)
 503                                 return tmp;
 504                         else {
 505                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 506                                                         MAJOR(dev), MINOR(dev));
 507                                 return NULL;
 508                         }
 509         return NULL;
 510 }
 511 
 512 /*
 513  * Why like this, I hear you say... The reason is race-conditions.
 514  * As we don't lock buffers (unless we are readint them, that is),
 515  * something might happen to it while we sleep (ie a read-error
 516  * will force it bad). This shouldn't really happen currently, but
 517  * the code is ready.
 518  */
 519 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 520 {
 521         struct buffer_head * bh;
 522 
 523         for (;;) {
 524                 if (!(bh=find_buffer(dev,block,size)))
 525                         return NULL;
 526                 bh->b_count++;
 527                 wait_on_buffer(bh);
 528                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 529                         return bh;
 530                 bh->b_count--;
 531         }
 532 }
 533 
 534 void set_blocksize(dev_t dev, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 535 {
 536         int i, nlist;
 537         struct buffer_head * bh, *bhnext;
 538 
 539         if (!blksize_size[MAJOR(dev)])
 540                 return;
 541 
 542         switch(size) {
 543                 default: panic("Invalid blocksize passed to set_blocksize");
 544                 case 512: case 1024: case 2048: case 4096:;
 545         }
 546 
 547         if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
 548                 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 549                 return;
 550         }
 551         if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
 552                 return;
 553         sync_buffers(dev, 2);
 554         blksize_size[MAJOR(dev)][MINOR(dev)] = size;
 555 
 556   /* We need to be quite careful how we do this - we are moving entries
 557      around on the free list, and we can get in a loop if we are not careful.*/
 558 
 559         for(nlist = 0; nlist < NR_LIST; nlist++) {
 560                 bh = lru_list[nlist];
 561                 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
 562                         if(!bh) break;
 563                         bhnext = bh->b_next_free; 
 564                         if (bh->b_dev != dev)
 565                                  continue;
 566                         if (bh->b_size == size)
 567                                  continue;
 568                         
 569                         wait_on_buffer(bh);
 570                         if (bh->b_dev == dev && bh->b_size != size) {
 571                                 bh->b_uptodate = bh->b_dirt = 
 572                                          bh->b_flushtime = 0;
 573                         };
 574                         remove_from_hash_queue(bh);
 575                 }
 576         }
 577 }
 578 
 579 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 580 
 581 void refill_freelist(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 582 {
 583         struct buffer_head * bh, * tmp;
 584         struct buffer_head * candidate[NR_LIST];
 585         unsigned int best_time, winner;
 586         int isize = BUFSIZE_INDEX(size);
 587         int buffers[NR_LIST];
 588         int i;
 589         int needed;
 590 
 591         /* First see if we even need this.  Sometimes it is advantageous
 592          to request some blocks in a filesystem that we know that we will
 593          be needing ahead of time. */
 594 
 595         if (nr_free[isize] > 100)
 596                 return;
 597 
 598         /* If there are too many dirty buffers, we wake up the update process
 599            now so as to ensure that there are still clean buffers available
 600            for user processes to use (and dirty) */
 601         
 602         /* We are going to try and locate this much memory */
 603         needed =bdf_prm.b_un.nrefill * size;  
 604 
 605         while (nr_free_pages > min_free_pages && needed > 0 &&
 606                grow_buffers(GFP_BUFFER, size)) {
 607                 needed -= PAGE_SIZE;
 608         }
 609 
 610         if(needed <= 0) return;
 611 
 612         /* See if there are too many buffers of a different size.
 613            If so, victimize them */
 614 
 615         while(maybe_shrink_lav_buffers(size))
 616          {
 617                  if(!grow_buffers(GFP_BUFFER, size)) break;
 618                  needed -= PAGE_SIZE;
 619                  if(needed <= 0) return;
 620          };
 621 
 622         /* OK, we cannot grow the buffer cache, now try and get some
 623            from the lru list */
 624 
 625         /* First set the candidate pointers to usable buffers.  This
 626            should be quick nearly all of the time. */
 627 
 628 repeat0:
 629         for(i=0; i<NR_LIST; i++){
 630                 if(i == BUF_DIRTY || i == BUF_SHARED || 
 631                    nr_buffers_type[i] == 0) {
 632                         candidate[i] = NULL;
 633                         buffers[i] = 0;
 634                         continue;
 635                 }
 636                 buffers[i] = nr_buffers_type[i];
 637                 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
 638                  {
 639                          if(buffers[i] < 0) panic("Here is the problem");
 640                          tmp = bh->b_next_free;
 641                          if (!bh) break;
 642                          
 643                          if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 644                              bh->b_dirt) {
 645                                  refile_buffer(bh);
 646                                  continue;
 647                          };
 648                          
 649                          if (bh->b_count || bh->b_size != size)
 650                                   continue;
 651                          
 652                          /* Buffers are written in the order they are placed 
 653                             on the locked list. If we encounter a locked
 654                             buffer here, this means that the rest of them
 655                             are also locked */
 656                          if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 657                                  buffers[i] = 0;
 658                                  break;
 659                          }
 660                          
 661                          if (BADNESS(bh)) continue;
 662                          break;
 663                  };
 664                 if(!buffers[i]) candidate[i] = NULL; /* Nothing on this list */
 665                 else candidate[i] = bh;
 666                 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
 667         }
 668         
 669  repeat:
 670         if(needed <= 0) return;
 671         
 672         /* Now see which candidate wins the election */
 673         
 674         winner = best_time = UINT_MAX;  
 675         for(i=0; i<NR_LIST; i++){
 676                 if(!candidate[i]) continue;
 677                 if(candidate[i]->b_lru_time < best_time){
 678                         best_time = candidate[i]->b_lru_time;
 679                         winner = i;
 680                 }
 681         }
 682         
 683         /* If we have a winner, use it, and then get a new candidate from that list */
 684         if(winner != UINT_MAX) {
 685                 i = winner;
 686                 bh = candidate[i];
 687                 candidate[i] = bh->b_next_free;
 688                 if(candidate[i] == bh) candidate[i] = NULL;  /* Got last one */
 689                 if (bh->b_count || bh->b_size != size)
 690                          panic("Busy buffer in candidate list\n");
 691                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
 692                          panic("Shared buffer in candidate list\n");
 693                 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
 694                 
 695                 if(bh->b_dev == 0xffff) panic("Wrong list");
 696                 remove_from_queues(bh);
 697                 bh->b_dev = 0xffff;
 698                 put_last_free(bh);
 699                 needed -= bh->b_size;
 700                 buffers[i]--;
 701                 if(buffers[i] < 0) panic("Here is the problem");
 702                 
 703                 if(buffers[i] == 0) candidate[i] = NULL;
 704                 
 705                 /* Now all we need to do is advance the candidate pointer
 706                    from the winner list to the next usable buffer */
 707                 if(candidate[i] && buffers[i] > 0){
 708                         if(buffers[i] <= 0) panic("Here is another problem");
 709                         for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
 710                                 if(buffers[i] < 0) panic("Here is the problem");
 711                                 tmp = bh->b_next_free;
 712                                 if (!bh) break;
 713                                 
 714                                 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
 715                                     bh->b_dirt) {
 716                                         refile_buffer(bh);
 717                                         continue;
 718                                 };
 719                                 
 720                                 if (bh->b_count || bh->b_size != size)
 721                                          continue;
 722                                 
 723                                 /* Buffers are written in the order they are
 724                                    placed on the locked list.  If we encounter
 725                                    a locked buffer here, this means that the
 726                                    rest of them are also locked */
 727                                 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
 728                                         buffers[i] = 0;
 729                                         break;
 730                                 }
 731               
 732                                 if (BADNESS(bh)) continue;
 733                                 break;
 734                         };
 735                         if(!buffers[i]) candidate[i] = NULL; /* Nothing here */
 736                         else candidate[i] = bh;
 737                         if(candidate[i] && candidate[i]->b_count) 
 738                                  panic("Here is the problem");
 739                 }
 740                 
 741                 goto repeat;
 742         }
 743         
 744         if(needed <= 0) return;
 745         
 746         /* Too bad, that was not enough. Try a little harder to grow some. */
 747         
 748         if (nr_free_pages > 5) {
 749                 if (grow_buffers(GFP_BUFFER, size)) {
 750                         needed -= PAGE_SIZE;
 751                         goto repeat0;
 752                 };
 753         }
 754         
 755         /* and repeat until we find something good */
 756         if (!grow_buffers(GFP_ATOMIC, size))
 757                 wakeup_bdflush(1);
 758         needed -= PAGE_SIZE;
 759         goto repeat0;
 760 }
 761 
 762 /*
 763  * Ok, this is getblk, and it isn't very clear, again to hinder
 764  * race-conditions. Most of the code is seldom used, (ie repeating),
 765  * so it should be much more efficient than it looks.
 766  *
 767  * The algoritm is changed: hopefully better, and an elusive bug removed.
 768  *
 769  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 770  * when the filesystem starts to get full of dirty blocks (I hope).
 771  */
 772 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 773 {
 774         struct buffer_head * bh;
 775         int isize = BUFSIZE_INDEX(size);
 776 
 777         /* Update this for the buffer size lav. */
 778         buffer_usage[isize]++;
 779 
 780         /* If there are too many dirty buffers, we wake up the update process
 781            now so as to ensure that there are still clean buffers available
 782            for user processes to use (and dirty) */
 783 repeat:
 784         bh = get_hash_table(dev, block, size);
 785         if (bh) {
 786                 if (bh->b_uptodate && !bh->b_dirt)
 787                          put_last_lru(bh);
 788                 if(!bh->b_dirt) bh->b_flushtime = 0;
 789                 return bh;
 790         }
 791 
 792         while(!free_list[isize]) refill_freelist(size);
 793         
 794         if (find_buffer(dev,block,size))
 795                  goto repeat;
 796 
 797         bh = free_list[isize];
 798         remove_from_free_list(bh);
 799 
 800 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 801 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 802         bh->b_count=1;
 803         bh->b_dirt=0;
 804         bh->b_lock=0;
 805         bh->b_uptodate=0;
 806         bh->b_flushtime = 0;
 807         bh->b_req=0;
 808         bh->b_dev=dev;
 809         bh->b_blocknr=block;
 810         insert_into_queues(bh);
 811         return bh;
 812 }
 813 
 814 void set_writetime(struct buffer_head * buf, int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 815 {
 816         int newtime;
 817 
 818         if (buf->b_dirt){
 819                 /* Move buffer to dirty list if jiffies is clear */
 820                 newtime = jiffies + (flag ? bdf_prm.b_un.age_super : 
 821                                      bdf_prm.b_un.age_buffer);
 822                 if(!buf->b_flushtime || buf->b_flushtime > newtime)
 823                          buf->b_flushtime = newtime;
 824         } else {
 825                 buf->b_flushtime = 0;
 826         }
 827 }
 828 
 829 
 830 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED, 
 831                                       BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
 832 
 833 void refile_buffer(struct buffer_head * buf){
     /* [previous][next][first][last][top][bottom][index][help] */
 834         int i, dispose;
 835         i = 0;
 836         if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
 837         if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
 838         if(buf->b_lock) i |= 2;
 839         if(buf->b_dirt) i |= 4;
 840         dispose = buffer_disposition[i];
 841         if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
 842                  dispose = BUF_UNSHARED;
 843         if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
 844         if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
 845         if(dispose != buf->b_list)  {
 846                 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
 847                          buf->b_lru_time = jiffies;
 848                 if(dispose == BUF_LOCKED && 
 849                    (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
 850                          dispose = BUF_LOCKED1;
 851                 remove_from_queues(buf);
 852                 buf->b_list = dispose;
 853                 insert_into_queues(buf);
 854                 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] > 
 855                    (nr_buffers - nr_buffers_type[BUF_SHARED]) *
 856                    bdf_prm.b_un.nfract/100)
 857                          wakeup_bdflush(0);
 858         }
 859 }
 860 
 861 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 862 {
 863         if (!buf)
 864                 return;
 865         wait_on_buffer(buf);
 866 
 867         /* If dirty, mark the time this buffer should be written back */
 868         set_writetime(buf, 0);
 869         refile_buffer(buf);
 870 
 871         if (buf->b_count) {
 872                 if (--buf->b_count)
 873                         return;
 874                 wake_up(&buffer_wait);
 875                 return;
 876         }
 877         printk("VFS: brelse: Trying to free free buffer\n");
 878 }
 879 
 880 /*
 881  * bread() reads a specified block and returns the buffer that contains
 882  * it. It returns NULL if the block was unreadable.
 883  */
 884 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 885 {
 886         struct buffer_head * bh;
 887 
 888         if (!(bh = getblk(dev, block, size))) {
 889                 printk("VFS: bread: READ error on device %d/%d\n",
 890                                                 MAJOR(dev), MINOR(dev));
 891                 return NULL;
 892         }
 893         if (bh->b_uptodate)
 894                 return bh;
 895         ll_rw_block(READ, 1, &bh);
 896         wait_on_buffer(bh);
 897         if (bh->b_uptodate)
 898                 return bh;
 899         brelse(bh);
 900         return NULL;
 901 }
 902 
 903 /*
 904  * Ok, breada can be used as bread, but additionally to mark other
 905  * blocks for reading as well. End the argument list with a negative
 906  * number.
 907  */
 908 
 909 #define NBUF 16
 910 
 911 struct buffer_head * breada(dev_t dev, int block, int bufsize,
     /* [previous][next][first][last][top][bottom][index][help] */
 912         unsigned int pos, unsigned int filesize)
 913 {
 914         struct buffer_head * bhlist[NBUF];
 915         unsigned int blocks;
 916         struct buffer_head * bh;
 917         int index;
 918         int i, j;
 919 
 920         if (pos >= filesize)
 921                 return NULL;
 922 
 923         if (block < 0 || !(bh = getblk(dev,block,bufsize)))
 924                 return NULL;
 925 
 926         index = BUFSIZE_INDEX(bh->b_size);
 927 
 928         if (bh->b_uptodate)
 929                 return bh;
 930 
 931         blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
 932 
 933         if (blocks > (read_ahead[MAJOR(dev)] >> index))
 934                 blocks = read_ahead[MAJOR(dev)] >> index;
 935         if (blocks > NBUF)
 936                 blocks = NBUF;
 937         
 938         bhlist[0] = bh;
 939         j = 1;
 940         for(i=1; i<blocks; i++) {
 941                 bh = getblk(dev,block+i,bufsize);
 942                 if (bh->b_uptodate) {
 943                         brelse(bh);
 944                         break;
 945                 }
 946                 bhlist[j++] = bh;
 947         }
 948 
 949         /* Request the read for these buffers, and then release them */
 950         ll_rw_block(READ, j, bhlist);
 951 
 952         for(i=1; i<j; i++)
 953                 brelse(bhlist[i]);
 954 
 955         /* Wait for this buffer, and then continue on */
 956         bh = bhlist[0];
 957         wait_on_buffer(bh);
 958         if (bh->b_uptodate)
 959                 return bh;
 960         brelse(bh);
 961         return NULL;
 962 }
 963 
 964 /*
 965  * See fs/inode.c for the weird use of volatile..
 966  */
 967 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 968 {
 969         struct wait_queue * wait;
 970 
 971         wait = ((volatile struct buffer_head *) bh)->b_wait;
 972         memset((void *) bh,0,sizeof(*bh));
 973         ((volatile struct buffer_head *) bh)->b_wait = wait;
 974         bh->b_next_free = unused_list;
 975         unused_list = bh;
 976 }
 977 
 978 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 979 {
 980         int i;
 981         struct buffer_head * bh;
 982 
 983         if (unused_list)
 984                 return;
 985 
 986         if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
 987                 return;
 988 
 989         for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 990                 bh->b_next_free = unused_list;  /* only make link */
 991                 unused_list = bh++;
 992         }
 993 }
 994 
 995 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 996 {
 997         struct buffer_head * bh;
 998 
 999         get_more_buffer_heads();
1000         if (!unused_list)
1001                 return NULL;
1002         bh = unused_list;
1003         unused_list = bh->b_next_free;
1004         bh->b_next_free = NULL;
1005         bh->b_data = NULL;
1006         bh->b_size = 0;
1007         bh->b_req = 0;
1008         return bh;
1009 }
1010 
1011 /*
1012  * Create the appropriate buffers when given a page for data area and
1013  * the size of each buffer.. Use the bh->b_this_page linked list to
1014  * follow the buffers created.  Return NULL if unable to create more
1015  * buffers.
1016  */
1017 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
1018 {
1019         struct buffer_head *bh, *head;
1020         unsigned long offset;
1021 
1022         head = NULL;
1023         offset = PAGE_SIZE;
1024         while ((offset -= size) < PAGE_SIZE) {
1025                 bh = get_unused_buffer_head();
1026                 if (!bh)
1027                         goto no_grow;
1028                 bh->b_this_page = head;
1029                 head = bh;
1030                 bh->b_data = (char *) (page+offset);
1031                 bh->b_size = size;
1032                 bh->b_dev = 0xffff;  /* Flag as unused */
1033         }
1034         return head;
1035 /*
1036  * In case anything failed, we just free everything we got.
1037  */
1038 no_grow:
1039         bh = head;
1040         while (bh) {
1041                 head = bh;
1042                 bh = bh->b_this_page;
1043                 put_unused_buffer_head(head);
1044         }
1045         return NULL;
1046 }
1047 
1048 static void read_buffers(struct buffer_head * bh[], int nrbuf)
     /* [previous][next][first][last][top][bottom][index][help] */
1049 {
1050         int i;
1051         int bhnum = 0;
1052         struct buffer_head * bhr[8];
1053 
1054         for (i = 0 ; i < nrbuf ; i++) {
1055                 if (bh[i] && !bh[i]->b_uptodate)
1056                         bhr[bhnum++] = bh[i];
1057         }
1058         if (bhnum)
1059                 ll_rw_block(READ, bhnum, bhr);
1060         for (i = 0 ; i < nrbuf ; i++) {
1061                 if (bh[i]) {
1062                         wait_on_buffer(bh[i]);
1063                 }
1064         }
1065 }
1066 
1067 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1068         dev_t dev, int *b, int size)
1069 {
1070         struct buffer_head * bh[8];
1071         unsigned long page;
1072         unsigned long offset;
1073         int block;
1074         int nrbuf;
1075 
1076         page = (unsigned long) first->b_data;
1077         if (page & ~PAGE_MASK) {
1078                 brelse(first);
1079                 return 0;
1080         }
1081         mem_map[MAP_NR(page)]++;
1082         bh[0] = first;
1083         nrbuf = 1;
1084         for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1085                 block = *++b;
1086                 if (!block)
1087                         goto no_go;
1088                 first = get_hash_table(dev, block, size);
1089                 if (!first)
1090                         goto no_go;
1091                 bh[nrbuf++] = first;
1092                 if (page+offset != (unsigned long) first->b_data)
1093                         goto no_go;
1094         }
1095         read_buffers(bh,nrbuf);         /* make sure they are actually read correctly */
1096         while (nrbuf-- > 0)
1097                 brelse(bh[nrbuf]);
1098         free_page(address);
1099         ++current->min_flt;
1100         return page;
1101 no_go:
1102         while (nrbuf-- > 0)
1103                 brelse(bh[nrbuf]);
1104         free_page(page);
1105         return 0;
1106 }
1107 
1108 static unsigned long try_to_load_aligned(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1109         dev_t dev, int b[], int size)
1110 {
1111         struct buffer_head * bh, * tmp, * arr[8];
1112         unsigned long offset;
1113         int isize = BUFSIZE_INDEX(size);
1114         int * p;
1115         int block;
1116 
1117         bh = create_buffers(address, size);
1118         if (!bh)
1119                 return 0;
1120         /* do any of the buffers already exist? punt if so.. */
1121         p = b;
1122         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1123                 block = *(p++);
1124                 if (!block)
1125                         goto not_aligned;
1126                 if (find_buffer(dev, block, size))
1127                         goto not_aligned;
1128         }
1129         tmp = bh;
1130         p = b;
1131         block = 0;
1132         while (1) {
1133                 arr[block++] = bh;
1134                 bh->b_count = 1;
1135                 bh->b_dirt = 0;
1136                 bh->b_flushtime = 0;
1137                 bh->b_uptodate = 0;
1138                 bh->b_req = 0;
1139                 bh->b_dev = dev;
1140                 bh->b_blocknr = *(p++);
1141                 bh->b_list = BUF_CLEAN;
1142                 nr_buffers++;
1143                 nr_buffers_size[isize]++;
1144                 insert_into_queues(bh);
1145                 if (bh->b_this_page)
1146                         bh = bh->b_this_page;
1147                 else
1148                         break;
1149         }
1150         buffermem += PAGE_SIZE;
1151         bh->b_this_page = tmp;
1152         mem_map[MAP_NR(address)]++;
1153         buffer_pages[address >> PAGE_SHIFT] = bh;
1154         read_buffers(arr,block);
1155         while (block-- > 0)
1156                 brelse(arr[block]);
1157         ++current->maj_flt;
1158         return address;
1159 not_aligned:
1160         while ((tmp = bh) != NULL) {
1161                 bh = bh->b_this_page;
1162                 put_unused_buffer_head(tmp);
1163         }
1164         return 0;
1165 }
1166 
1167 /*
1168  * Try-to-share-buffers tries to minimize memory use by trying to keep
1169  * both code pages and the buffer area in the same page. This is done by
1170  * (a) checking if the buffers are already aligned correctly in memory and
1171  * (b) if none of the buffer heads are in memory at all, trying to load
1172  * them into memory the way we want them.
1173  *
1174  * This doesn't guarantee that the memory is shared, but should under most
1175  * circumstances work very well indeed (ie >90% sharing of code pages on
1176  * demand-loadable executables).
1177  */
1178 static inline unsigned long try_to_share_buffers(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1179         dev_t dev, int *b, int size)
1180 {
1181         struct buffer_head * bh;
1182         int block;
1183 
1184         block = b[0];
1185         if (!block)
1186                 return 0;
1187         bh = get_hash_table(dev, block, size);
1188         if (bh)
1189                 return check_aligned(bh, address, dev, b, size);
1190         return try_to_load_aligned(address, dev, b, size);
1191 }
1192 
1193 #define COPYBLK(size,from,to) \
1194 __asm__ __volatile__("rep ; movsl": \
1195         :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1196         :"cx","di","si")
1197 
1198 /*
1199  * bread_page reads four buffers into memory at the desired address. It's
1200  * a function of its own, as there is some speed to be got by reading them
1201  * all at the same time, not waiting for one to be read, and then another
1202  * etc. This also allows us to optimize memory usage by sharing code pages
1203  * and filesystem buffers..
1204  */
1205 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
1206 {
1207         struct buffer_head * bh[8];
1208         unsigned long where;
1209         int i, j;
1210 
1211         if (!(prot & PAGE_RW)) {
1212                 where = try_to_share_buffers(address,dev,b,size);
1213                 if (where)
1214                         return where;
1215         }
1216         ++current->maj_flt;
1217         for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1218                 bh[i] = NULL;
1219                 if (b[i])
1220                         bh[i] = getblk(dev, b[i], size);
1221         }
1222         read_buffers(bh,i);
1223         where = address;
1224         for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
1225                 if (bh[i]) {
1226                         if (bh[i]->b_uptodate)
1227                                 COPYBLK(size, (unsigned long) bh[i]->b_data,address);
1228                         brelse(bh[i]);
1229                 }
1230         }
1231         return where;
1232 }
1233 
1234 /*
1235  * Try to increase the number of buffers available: the size argument
1236  * is used to determine what kind of buffers we want.
1237  */
1238 static int grow_buffers(int pri, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1239 {
1240         unsigned long page;
1241         struct buffer_head *bh, *tmp;
1242         struct buffer_head * insert_point;
1243         int isize;
1244 
1245         if ((size & 511) || (size > PAGE_SIZE)) {
1246                 printk("VFS: grow_buffers: size = %d\n",size);
1247                 return 0;
1248         }
1249 
1250         isize = BUFSIZE_INDEX(size);
1251 
1252         if (!(page = __get_free_page(pri)))
1253                 return 0;
1254         bh = create_buffers(page, size);
1255         if (!bh) {
1256                 free_page(page);
1257                 return 0;
1258         }
1259 
1260         insert_point = free_list[isize];
1261 
1262         tmp = bh;
1263         while (1) {
1264                 nr_free[isize]++;
1265                 if (insert_point) {
1266                         tmp->b_next_free = insert_point->b_next_free;
1267                         tmp->b_prev_free = insert_point;
1268                         insert_point->b_next_free->b_prev_free = tmp;
1269                         insert_point->b_next_free = tmp;
1270                 } else {
1271                         tmp->b_prev_free = tmp;
1272                         tmp->b_next_free = tmp;
1273                 }
1274                 insert_point = tmp;
1275                 ++nr_buffers;
1276                 if (tmp->b_this_page)
1277                         tmp = tmp->b_this_page;
1278                 else
1279                         break;
1280         }
1281         free_list[isize] = bh;
1282         buffer_pages[page >> PAGE_SHIFT] = bh;
1283         tmp->b_this_page = bh;
1284         wake_up(&buffer_wait);
1285         buffermem += PAGE_SIZE;
1286         return 1;
1287 }
1288 
1289 /*
1290  * try_to_free() checks if all the buffers on this particular page
1291  * are unused, and free's the page if so.
1292  */
1293 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
     /* [previous][next][first][last][top][bottom][index][help] */
1294 {
1295         unsigned long page;
1296         struct buffer_head * tmp, * p;
1297         int isize = BUFSIZE_INDEX(bh->b_size);
1298 
1299         *bhp = bh;
1300         page = (unsigned long) bh->b_data;
1301         page &= PAGE_MASK;
1302         tmp = bh;
1303         do {
1304                 if (!tmp)
1305                         return 0;
1306                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1307                         return 0;
1308                 tmp = tmp->b_this_page;
1309         } while (tmp != bh);
1310         tmp = bh;
1311         do {
1312                 p = tmp;
1313                 tmp = tmp->b_this_page;
1314                 nr_buffers--;
1315                 nr_buffers_size[isize]--;
1316                 if (p == *bhp)
1317                   {
1318                     *bhp = p->b_prev_free;
1319                     if (p == *bhp) /* Was this the last in the list? */
1320                       *bhp = NULL;
1321                   }
1322                 remove_from_queues(p);
1323                 put_unused_buffer_head(p);
1324         } while (tmp != bh);
1325         buffermem -= PAGE_SIZE;
1326         buffer_pages[page >> PAGE_SHIFT] = NULL;
1327         free_page(page);
1328         return !mem_map[MAP_NR(page)];
1329 }
1330 
1331 
1332 /*
1333  * Consult the load average for buffers and decide whether or not
1334  * we should shrink the buffers of one size or not.  If we decide yes,
1335  * do it and return 1.  Else return 0.  Do not attempt to shrink size
1336  * that is specified.
1337  *
1338  * I would prefer not to use a load average, but the way things are now it
1339  * seems unavoidable.  The way to get rid of it would be to force clustering
1340  * universally, so that when we reclaim buffers we always reclaim an entire
1341  * page.  Doing this would mean that we all need to move towards QMAGIC.
1342  */
1343 
1344 static int maybe_shrink_lav_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1345 {          
1346         int nlist;
1347         int isize;
1348         int total_lav, total_n_buffers, n_sizes;
1349         
1350         /* Do not consider the shared buffers since they would not tend
1351            to have getblk called very often, and this would throw off
1352            the lav.  They are not easily reclaimable anyway (let the swapper
1353            make the first move). */
1354   
1355         total_lav = total_n_buffers = n_sizes = 0;
1356         for(nlist = 0; nlist < NR_SIZES; nlist++)
1357          {
1358                  total_lav += buffers_lav[nlist];
1359                  if(nr_buffers_size[nlist]) n_sizes++;
1360                  total_n_buffers += nr_buffers_size[nlist];
1361                  total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED]; 
1362          }
1363         
1364         /* See if we have an excessive number of buffers of a particular
1365            size - if so, victimize that bunch. */
1366   
1367         isize = (size ? BUFSIZE_INDEX(size) : -1);
1368         
1369         if (n_sizes > 1)
1370                  for(nlist = 0; nlist < NR_SIZES; nlist++)
1371                   {
1372                           if(nlist == isize) continue;
1373                           if(nr_buffers_size[nlist] &&
1374                              bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers < 
1375                              total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1376                                    if(shrink_specific_buffers(6, bufferindex_size[nlist])) 
1377                                             return 1;
1378                   }
1379         return 0;
1380 }
1381 /*
1382  * Try to free up some pages by shrinking the buffer-cache
1383  *
1384  * Priority tells the routine how hard to try to shrink the
1385  * buffers: 3 means "don't bother too much", while a value
1386  * of 0 means "we'd better get some free pages now".
1387  */
1388 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
1389 {
1390         if (priority < 2) {
1391                 sync_buffers(0,0);
1392         }
1393 
1394         if(priority == 2) wakeup_bdflush(1);
1395 
1396         if(maybe_shrink_lav_buffers(0)) return 1;
1397 
1398         /* No good candidate size - take any size we can find */
1399         return shrink_specific_buffers(priority, 0);
1400 }
1401 
1402 static int shrink_specific_buffers(unsigned int priority, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1403 {
1404         struct buffer_head *bh;
1405         int nlist;
1406         int i, isize, isize1;
1407 
1408 #ifdef DEBUG
1409         if(size) printk("Shrinking buffers of size %d\n", size);
1410 #endif
1411         /* First try the free lists, and see if we can get a complete page
1412            from here */
1413         isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1414 
1415         for(isize = 0; isize<NR_SIZES; isize++){
1416                 if(isize1 != -1 && isize1 != isize) continue;
1417                 bh = free_list[isize];
1418                 if(!bh) continue;
1419                 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1420                         if (bh->b_count || !bh->b_this_page)
1421                                  continue;
1422                         if (try_to_free(bh, &bh))
1423                                  return 1;
1424                         if(!bh) break; /* Some interrupt must have used it after we
1425                                           freed the page.  No big deal - keep looking */
1426                 }
1427         }
1428         
1429         /* Not enough in the free lists, now try the lru list */
1430         
1431         for(nlist = 0; nlist < NR_LIST; nlist++) {
1432         repeat1:
1433                 if(priority > 3 && nlist == BUF_SHARED) continue;
1434                 bh = lru_list[nlist];
1435                 if(!bh) continue;
1436                 i = nr_buffers_type[nlist] >> priority;
1437                 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1438                         /* We may have stalled while waiting for I/O to complete. */
1439                         if(bh->b_list != nlist) goto repeat1;
1440                         if (bh->b_count || !bh->b_this_page)
1441                                  continue;
1442                         if(size && bh->b_size != size) continue;
1443                         if (bh->b_lock)
1444                                  if (priority)
1445                                           continue;
1446                                  else
1447                                           wait_on_buffer(bh);
1448                         if (bh->b_dirt) {
1449                                 bh->b_count++;
1450                                 bh->b_flushtime = 0;
1451                                 ll_rw_block(WRITEA, 1, &bh);
1452                                 bh->b_count--;
1453                                 continue;
1454                         }
1455                         if (try_to_free(bh, &bh))
1456                                  return 1;
1457                         if(!bh) break;
1458                 }
1459         }
1460         return 0;
1461 }
1462 
1463 
1464 void show_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1465 {
1466         struct buffer_head * bh;
1467         int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1468         int shared;
1469         int nlist, isize;
1470 
1471         printk("Buffer memory:   %6dkB\n",buffermem>>10);
1472         printk("Buffer heads:    %6d\n",nr_buffer_heads);
1473         printk("Buffer blocks:   %6d\n",nr_buffers);
1474 
1475         for(nlist = 0; nlist < NR_LIST; nlist++) {
1476           shared = found = locked = dirty = used = lastused = 0;
1477           bh = lru_list[nlist];
1478           if(!bh) continue;
1479           do {
1480                 found++;
1481                 if (bh->b_lock)
1482                         locked++;
1483                 if (bh->b_dirt)
1484                         dirty++;
1485                 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1486                 if (bh->b_count)
1487                         used++, lastused = found;
1488                 bh = bh->b_next_free;
1489               } while (bh != lru_list[nlist]);
1490         printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1491                 nlist, found, used, lastused, locked, dirty, shared);
1492         };
1493         printk("Size    [LAV]     Free  Clean  Unshar     Lck    Lck1   Dirty  Shared\n");
1494         for(isize = 0; isize<NR_SIZES; isize++){
1495                 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1496                        buffers_lav[isize], nr_free[isize]);
1497                 for(nlist = 0; nlist < NR_LIST; nlist++)
1498                          printk("%7d ", nr_buffers_st[isize][nlist]);
1499                 printk("\n");
1500         }
1501 }
1502 
1503 /*
1504  * try_to_reassign() checks if all the buffers on this particular page
1505  * are unused, and reassign to a new cluster them if this is true.
1506  */
1507 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
     /* [previous][next][first][last][top][bottom][index][help] */
1508                            dev_t dev, unsigned int starting_block)
1509 {
1510         unsigned long page;
1511         struct buffer_head * tmp, * p;
1512 
1513         *bhp = bh;
1514         page = (unsigned long) bh->b_data;
1515         page &= PAGE_MASK;
1516         if(mem_map[MAP_NR(page)] != 1) return 0;
1517         tmp = bh;
1518         do {
1519                 if (!tmp)
1520                          return 0;
1521                 
1522                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1523                          return 0;
1524                 tmp = tmp->b_this_page;
1525         } while (tmp != bh);
1526         tmp = bh;
1527         
1528         while((unsigned int) tmp->b_data & (PAGE_SIZE - 1)) 
1529                  tmp = tmp->b_this_page;
1530         
1531         /* This is the buffer at the head of the page */
1532         bh = tmp;
1533         do {
1534                 p = tmp;
1535                 tmp = tmp->b_this_page;
1536                 remove_from_queues(p);
1537                 p->b_dev=dev;
1538                 p->b_uptodate = 0;
1539                 p->b_req = 0;
1540                 p->b_blocknr=starting_block++;
1541                 insert_into_queues(p);
1542         } while (tmp != bh);
1543         return 1;
1544 }
1545 
1546 /*
1547  * Try to find a free cluster by locating a page where
1548  * all of the buffers are unused.  We would like this function
1549  * to be atomic, so we do not call anything that might cause
1550  * the process to sleep.  The priority is somewhat similar to
1551  * the priority used in shrink_buffers.
1552  * 
1553  * My thinking is that the kernel should end up using whole
1554  * pages for the buffer cache as much of the time as possible.
1555  * This way the other buffers on a particular page are likely
1556  * to be very near each other on the free list, and we will not
1557  * be expiring data prematurely.  For now we only canibalize buffers
1558  * of the same size to keep the code simpler.
1559  */
1560 static int reassign_cluster(dev_t dev, 
     /* [previous][next][first][last][top][bottom][index][help] */
1561                      unsigned int starting_block, int size)
1562 {
1563         struct buffer_head *bh;
1564         int isize = BUFSIZE_INDEX(size);
1565         int i;
1566 
1567         /* We want to give ourselves a really good shot at generating
1568            a cluster, and since we only take buffers from the free
1569            list, we "overfill" it a little. */
1570 
1571         while(nr_free[isize] < 32) refill_freelist(size);
1572 
1573         bh = free_list[isize];
1574         if(bh)
1575                  for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1576                          if (!bh->b_this_page)  continue;
1577                          if (try_to_reassign(bh, &bh, dev, starting_block))
1578                                  return 4;
1579                  }
1580         return 0;
1581 }
1582 
1583 /* This function tries to generate a new cluster of buffers
1584  * from a new page in memory.  We should only do this if we have
1585  * not expanded the buffer cache to the maximum size that we allow.
1586  */
1587 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1588 {
1589         struct buffer_head * bh, * tmp, * arr[8];
1590         int isize = BUFSIZE_INDEX(size);
1591         unsigned long offset;
1592         unsigned long page;
1593         int nblock;
1594 
1595         page = get_free_page(GFP_NOBUFFER);
1596         if(!page) return 0;
1597 
1598         bh = create_buffers(page, size);
1599         if (!bh) {
1600                 free_page(page);
1601                 return 0;
1602         };
1603         nblock = block;
1604         for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1605                 if (find_buffer(dev, nblock++, size))
1606                          goto not_aligned;
1607         }
1608         tmp = bh;
1609         nblock = 0;
1610         while (1) {
1611                 arr[nblock++] = bh;
1612                 bh->b_count = 1;
1613                 bh->b_dirt = 0;
1614                 bh->b_flushtime = 0;
1615                 bh->b_lock = 0;
1616                 bh->b_uptodate = 0;
1617                 bh->b_req = 0;
1618                 bh->b_dev = dev;
1619                 bh->b_list = BUF_CLEAN;
1620                 bh->b_blocknr = block++;
1621                 nr_buffers++;
1622                 nr_buffers_size[isize]++;
1623                 insert_into_queues(bh);
1624                 if (bh->b_this_page)
1625                         bh = bh->b_this_page;
1626                 else
1627                         break;
1628         }
1629         buffermem += PAGE_SIZE;
1630         buffer_pages[page >> PAGE_SHIFT] = bh;
1631         bh->b_this_page = tmp;
1632         while (nblock-- > 0)
1633                 brelse(arr[nblock]);
1634         return 4;
1635 not_aligned:
1636         while ((tmp = bh) != NULL) {
1637                 bh = bh->b_this_page;
1638                 put_unused_buffer_head(tmp);
1639         }
1640         free_page(page);
1641         return 0;
1642 }
1643 
1644 unsigned long generate_cluster(dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1645 {
1646         int i, offset;
1647         
1648         for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1649                 if(i && b[i]-1 != b[i-1]) return 0;  /* No need to cluster */
1650                 if(find_buffer(dev, b[i], size)) return 0;
1651         };
1652 
1653         /* OK, we have a candidate for a new cluster */
1654         
1655         /* See if one size of buffer is over-represented in the buffer cache,
1656            if so reduce the numbers of buffers */
1657         if(maybe_shrink_lav_buffers(size))
1658          {
1659                  int retval;
1660                  retval = try_to_generate_cluster(dev, b[0], size);
1661                  if(retval) return retval;
1662          };
1663         
1664         if (nr_free_pages > min_free_pages) 
1665                  return try_to_generate_cluster(dev, b[0], size);
1666         else
1667                  return reassign_cluster(dev, b[0], size);
1668 }
1669 
1670 /*
1671  * This initializes the initial buffer free list.  nr_buffers_type is set
1672  * to one less the actual number of buffers, as a sop to backwards
1673  * compatibility --- the old code did this (I think unintentionally,
1674  * but I'm not sure), and programs in the ps package expect it.
1675  *                                      - TYT 8/30/92
1676  */
1677 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1678 {
1679         int i;
1680         int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1681 
1682         if (high_memory >= 4*1024*1024) {
1683                 min_free_pages = 200;
1684                 if(high_memory >= 16*1024*1024)
1685                          nr_hash = 16381;
1686                 else
1687                          nr_hash = 4093;
1688         } else {
1689                 min_free_pages = 20;
1690                 nr_hash = 997;
1691         };
1692         
1693         hash_table = (struct buffer_head **) vmalloc(nr_hash * 
1694                                                      sizeof(struct buffer_head *));
1695 
1696 
1697         buffer_pages = (struct buffer_head **) vmalloc((high_memory >>PAGE_SHIFT) * 
1698                                                      sizeof(struct buffer_head *));
1699         for (i = 0 ; i < high_memory >> PAGE_SHIFT ; i++)
1700                 buffer_pages[i] = NULL;
1701 
1702         for (i = 0 ; i < nr_hash ; i++)
1703                 hash_table[i] = NULL;
1704         lru_list[BUF_CLEAN] = 0;
1705         grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1706         if (!free_list[isize])
1707                 panic("VFS: Unable to initialize buffer free list!");
1708         return;
1709 }
1710 
1711 /* This is a simple kernel daemon, whose job it is to provide a dynamicly
1712  * response to dirty buffers.  Once this process is activated, we write back
1713  * a limited number of buffers to the disks and then go back to sleep again.
1714  * In effect this is a process which never leaves kernel mode, and does not have
1715  * any user memory associated with it except for the stack.  There is also
1716  * a kernel stack page, which obviously must be separate from the user stack.
1717  */
1718 struct wait_queue * bdflush_wait = NULL;
1719 struct wait_queue * bdflush_done = NULL;
1720 
1721 static int bdflush_running = 0;
1722 
1723 static void wakeup_bdflush(int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
1724 {
1725         if(!bdflush_running){
1726                 printk("Warning - bdflush not running\n");
1727                 sync_buffers(0,0);
1728                 return;
1729         };
1730         wake_up(&bdflush_wait);
1731         if(wait) sleep_on(&bdflush_done);
1732 }
1733 
1734 
1735 
1736 /* 
1737  * Here we attempt to write back old buffers.  We also try and flush indoes 
1738  * and supers as well, since this function is essentially "update", and 
1739  * otherwise there would be no way of ensuring that these quantities ever 
1740  * get written back.  Ideally, we would have a timestamp on the inodes
1741  * and superblocks so that we could write back only the old ones as well
1742  */
1743 
1744 asmlinkage int sync_old_buffers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1745 {
1746         int i, isize;
1747         int ndirty, nwritten;
1748         int nlist;
1749         int ncount;
1750         struct buffer_head * bh, *next;
1751 
1752         sync_supers(0);
1753         sync_inodes(0);
1754 
1755         ncount = 0;
1756 #ifdef DEBUG
1757         for(nlist = 0; nlist < NR_LIST; nlist++)
1758 #else
1759         for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1760 #endif
1761         {
1762                 ndirty = 0;
1763                 nwritten = 0;
1764         repeat:
1765                 bh = lru_list[nlist];
1766                 if(bh) 
1767                          for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1768                                  /* We may have stalled while waiting for I/O to complete. */
1769                                  if(bh->b_list != nlist) goto repeat;
1770                                  next = bh->b_next_free;
1771                                  if(!lru_list[nlist]) {
1772                                          printk("Dirty list empty %d\n", i);
1773                                          break;
1774                                  }
1775                                  
1776                                  /* Clean buffer on dirty list?  Refile it */
1777                                  if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1778                                   {
1779                                           refile_buffer(bh);
1780                                           continue;
1781                                   }
1782                                  
1783                                  if (bh->b_lock || !bh->b_dirt)
1784                                           continue;
1785                                  ndirty++;
1786                                  if(bh->b_flushtime > jiffies) continue;
1787                                  nwritten++;
1788                                  bh->b_count++;
1789                                  bh->b_flushtime = 0;
1790 #ifdef DEBUG
1791                                  if(nlist != BUF_DIRTY) ncount++;
1792 #endif
1793                                  ll_rw_block(WRITE, 1, &bh);
1794                                  bh->b_count--;
1795                          }
1796         }
1797 #ifdef DEBUG
1798         if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1799         printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1800 #endif
1801         
1802         /* We assume that we only come through here on a regular
1803            schedule, like every 5 seconds.  Now update load averages.  
1804            Shift usage counts to prevent overflow. */
1805         for(isize = 0; isize<NR_SIZES; isize++){
1806                 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1807                 buffer_usage[isize] = 0;
1808         };
1809         return 0;
1810 }
1811 
1812 
1813 /* This is the interface to bdflush.  As we get more sophisticated, we can
1814  * pass tuning parameters to this "process", to adjust how it behaves.  If you
1815  * invoke this again after you have done this once, you would simply modify 
1816  * the tuning parameters.  We would want to verify each parameter, however,
1817  * to make sure that it is reasonable. */
1818 
1819 asmlinkage int sys_bdflush(int func, int data)
     /* [previous][next][first][last][top][bottom][index][help] */
1820 {
1821         int i, error;
1822         int ndirty;
1823         int nlist;
1824         int ncount;
1825         struct buffer_head * bh, *next;
1826 
1827         if(!suser()) return -EPERM;
1828 
1829         if(func == 1)
1830                  return sync_old_buffers();
1831 
1832         /* Basically func 0 means start, 1 means read param 1, 2 means write param 1, etc */
1833         if(func >= 2){
1834                 i = (func-2) >> 1;
1835                 if (i < 0 || i >= N_PARAM) return -EINVAL;
1836                 if((func & 1) == 0) {
1837                         error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1838                         if(error) return error;
1839                         put_fs_long(bdf_prm.data[i], data);
1840                         return 0;
1841                 };
1842                 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1843                 bdf_prm.data[i] = data;
1844                 return 0;
1845         };
1846         
1847         if(bdflush_running++) return -EBUSY; /* Only one copy of this running at one time */
1848         
1849         /* OK, from here on is the daemon */
1850         
1851         while(1==1){
1852 #ifdef DEBUG
1853                 printk("bdflush() activated...");
1854 #endif
1855                 
1856                 ncount = 0;
1857 #ifdef DEBUG
1858                 for(nlist = 0; nlist < NR_LIST; nlist++)
1859 #else
1860                 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1861 #endif
1862                  {
1863                          ndirty = 0;
1864                  repeat:
1865                          bh = lru_list[nlist];
1866                          if(bh) 
1867                                   for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty; 
1868                                        bh = next) {
1869                                           /* We may have stalled while waiting for I/O to complete. */
1870                                           if(bh->b_list != nlist) goto repeat;
1871                                           next = bh->b_next_free;
1872                                           if(!lru_list[nlist]) {
1873                                                   printk("Dirty list empty %d\n", i);
1874                                                   break;
1875                                           }
1876                                           
1877                                           /* Clean buffer on dirty list?  Refile it */
1878                                           if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1879                                            {
1880                                                    refile_buffer(bh);
1881                                                    continue;
1882                                            }
1883                                           
1884                                           if (bh->b_lock || !bh->b_dirt)
1885                                                    continue;
1886                                           /* Should we write back buffers that are shared or not??
1887                                              currently dirty buffers are not shared, so it does not matter */
1888                                           bh->b_count++;
1889                                           ndirty++;
1890                                           bh->b_flushtime = 0;
1891                                           ll_rw_block(WRITE, 1, &bh);
1892 #ifdef DEBUG
1893                                           if(nlist != BUF_DIRTY) ncount++;
1894 #endif
1895                                           bh->b_count--;
1896                                   }
1897                  }
1898 #ifdef DEBUG
1899                 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1900                 printk("sleeping again.\n");
1901 #endif
1902                 wake_up(&bdflush_done);
1903                 
1904                 /* If there are still a lot of dirty buffers around, skip the sleep
1905                    and flush some more */
1906                 
1907                 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) * 
1908                    bdf_prm.b_un.nfract/100) {
1909                         if (current->signal & (1 << (SIGKILL-1))) {
1910                                 bdflush_running--;
1911                                 return 0;
1912                         }
1913                         current->signal = 0;
1914                         interruptible_sleep_on(&bdflush_wait);
1915                 }
1916         }
1917 }
1918 
1919 
1920 /*
1921  * Overrides for Emacs so that we follow Linus's tabbing style.
1922  * Emacs will notice this stuff at the end of the file and automatically
1923  * adjust the settings for this buffer only.  This must remain at the end
1924  * of the file.
1925  * ---------------------------------------------------------------------------
1926  * Local variables:
1927  * c-indent-level: 8
1928  * c-brace-imaginary-offset: 0
1929  * c-brace-offset: -8
1930  * c-argdecl-indent: 8
1931  * c-label-offset: -8
1932  * c-continued-statement-offset: 8
1933  * c-continued-brace-offset: 0
1934  * End:
1935  */

/* [previous][next][first][last][top][bottom][index][help] */