root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. invalidate_buffers
  6. check_disk_change
  7. remove_from_hash_queue
  8. remove_from_free_list
  9. remove_from_queues
  10. put_first_free
  11. put_last_free
  12. insert_into_queues
  13. find_buffer
  14. get_hash_table
  15. getblk
  16. brelse
  17. bread
  18. breada
  19. put_unused_buffer_head
  20. get_more_buffer_heads
  21. get_unused_buffer_head
  22. try_to_share_buffers
  23. bread_page
  24. grow_buffers
  25. try_to_free
  26. shrink_buffers
  27. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/io.h>
  29 
  30 #ifdef CONFIG_SCSI
  31 #ifdef CONFIG_BLK_DEV_SR
  32 extern int check_cdrom_media_change(int, int);
  33 #endif
  34 #ifdef CONFIG_BLK_DEV_SD
  35 extern int check_scsidisk_media_change(int, int);
  36 extern int revalidate_scsidisk(int, int);
  37 #endif
  38 #endif
  39 
  40 static struct buffer_head * hash_table[NR_HASH];
  41 static struct buffer_head * free_list = NULL;
  42 static struct buffer_head * unused_list = NULL;
  43 static struct wait_queue * buffer_wait = NULL;
  44 
  45 int nr_buffers = 0;
  46 int buffermem = 0;
  47 int nr_buffer_heads = 0;
  48 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  49 
  50 /*
  51  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  52  * and getting rid of the cli-sti pairs. The wait-queue routines still
  53  * need cli-sti, but now it's just a couple of 386 instructions or so.
  54  *
  55  * Note that the real wait_on_buffer() is an inline function that checks
  56  * if 'b_wait' is set before calling this, so that the queues aren't set
  57  * up unnecessarily.
  58  */
  59 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         struct wait_queue wait = { current, NULL };
  62 
  63         add_wait_queue(&bh->b_wait, &wait);
  64 repeat:
  65         current->state = TASK_UNINTERRUPTIBLE;
  66         if (bh->b_lock) {
  67                 schedule();
  68                 goto repeat;
  69         }
  70         remove_wait_queue(&bh->b_wait, &wait);
  71         current->state = TASK_RUNNING;
  72 }
  73 
  74 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  75 {
  76         int i;
  77         struct buffer_head * bh;
  78 
  79         bh = free_list;
  80         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  81                 if (dev && bh->b_dev != dev)
  82                         continue;
  83                 if (bh->b_lock)
  84                         continue;
  85                 if (!bh->b_dirt)
  86                         continue;
  87                 ll_rw_block(WRITE, 1, &bh);
  88         }
  89 }
  90 
  91 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  92 {
  93         sync_buffers(dev);
  94         sync_supers(dev);
  95         sync_inodes(dev);
  96         sync_buffers(dev);
  97 }
  98 
  99 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 100 {
 101         sync_dev(0);
 102         return 0;
 103 }
 104 
 105 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 106 {
 107         int i;
 108         struct buffer_head * bh;
 109 
 110         bh = free_list;
 111         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 112                 if (bh->b_dev != dev)
 113                         continue;
 114                 wait_on_buffer(bh);
 115                 if (bh->b_dev == dev)
 116                         bh->b_uptodate = bh->b_dirt = 0;
 117         }
 118 }
 119 
 120 /*
 121  * This routine checks whether a floppy has been changed, and
 122  * invalidates all buffer-cache-entries in that case. This
 123  * is a relatively slow routine, so we have to try to minimize using
 124  * it. Thus it is called only upon a 'mount' or 'open'. This
 125  * is the best way of combining speed and utility, I think.
 126  * People changing diskettes in the middle of an operation deserve
 127  * to loose :-)
 128  *
 129  * NOTE! Although currently this is only for floppies, the idea is
 130  * that any additional removable block-device will use this routine,
 131  * and that mount/open needn't know that floppies/whatever are
 132  * special.
 133  */
 134 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         int i;
 137         struct buffer_head * bh;
 138 
 139         switch(MAJOR(dev)){
 140         case 2: /* floppy disc */
 141                 if (!(bh = getblk(dev,0,1024)))
 142                         return;
 143                 i = floppy_change(bh);
 144                 brelse(bh);
 145                 break;
 146 
 147 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 148          case 8: /* Removable scsi disk */
 149                 i = check_scsidisk_media_change(dev, 0);
 150                 break;
 151 #endif
 152 
 153 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 154          case 11: /* CDROM */
 155                 i = check_cdrom_media_change(dev, 0);
 156                 break;
 157 #endif
 158 
 159          default:
 160                 return;
 161         };
 162 
 163         if (!i) return;
 164 
 165         printk("VFS: Disk change detected on device %d/%d\n",
 166                                         MAJOR(dev), MINOR(dev));
 167         for (i=0 ; i<NR_SUPER ; i++)
 168                 if (super_block[i].s_dev == dev)
 169                         put_super(super_block[i].s_dev);
 170         invalidate_inodes(dev);
 171         invalidate_buffers(dev);
 172 
 173 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 174 /* This is trickier for a removable hardisk, because we have to invalidate
 175    all of the partitions that lie on the disk. */
 176         if (MAJOR(dev) == 8)
 177                 revalidate_scsidisk(dev, 0);
 178 #endif
 179 }
 180 
 181 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 182 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 183 
 184 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 185 {
 186         if (bh->b_next)
 187                 bh->b_next->b_prev = bh->b_prev;
 188         if (bh->b_prev)
 189                 bh->b_prev->b_next = bh->b_next;
 190         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 191                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 192         bh->b_next = bh->b_prev = NULL;
 193 }
 194 
 195 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         if (!(bh->b_prev_free) || !(bh->b_next_free))
 198                 panic("VFS: Free block list corrupted");
 199         bh->b_prev_free->b_next_free = bh->b_next_free;
 200         bh->b_next_free->b_prev_free = bh->b_prev_free;
 201         if (free_list == bh)
 202                 free_list = bh->b_next_free;
 203         bh->b_next_free = bh->b_prev_free = NULL;
 204 }
 205 
 206 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         remove_from_hash_queue(bh);
 209         remove_from_free_list(bh);
 210 }
 211 
 212 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         if (!bh || (bh == free_list))
 215                 return;
 216         remove_from_free_list(bh);
 217 /* add to front of free list */
 218         bh->b_next_free = free_list;
 219         bh->b_prev_free = free_list->b_prev_free;
 220         free_list->b_prev_free->b_next_free = bh;
 221         free_list->b_prev_free = bh;
 222         free_list = bh;
 223 }
 224 
 225 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         if (!bh)
 228                 return;
 229         if (bh == free_list) {
 230                 free_list = bh->b_next_free;
 231                 return;
 232         }
 233         remove_from_free_list(bh);
 234 /* add to back of free list */
 235         bh->b_next_free = free_list;
 236         bh->b_prev_free = free_list->b_prev_free;
 237         free_list->b_prev_free->b_next_free = bh;
 238         free_list->b_prev_free = bh;
 239 }
 240 
 241 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243 /* put at end of free list */
 244         bh->b_next_free = free_list;
 245         bh->b_prev_free = free_list->b_prev_free;
 246         free_list->b_prev_free->b_next_free = bh;
 247         free_list->b_prev_free = bh;
 248 /* put the buffer in new hash-queue if it has a device */
 249         bh->b_prev = NULL;
 250         bh->b_next = NULL;
 251         if (!bh->b_dev)
 252                 return;
 253         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 254         hash(bh->b_dev,bh->b_blocknr) = bh;
 255         if (bh->b_next)
 256                 bh->b_next->b_prev = bh;
 257 }
 258 
 259 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 260 {               
 261         struct buffer_head * tmp;
 262 
 263         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 264                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 265                         if (tmp->b_size == size)
 266                                 return tmp;
 267                         else {
 268                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 269                                                         MAJOR(dev), MINOR(dev));
 270                                 return NULL;
 271                         }
 272         return NULL;
 273 }
 274 
 275 /*
 276  * Why like this, I hear you say... The reason is race-conditions.
 277  * As we don't lock buffers (unless we are readint them, that is),
 278  * something might happen to it while we sleep (ie a read-error
 279  * will force it bad). This shouldn't really happen currently, but
 280  * the code is ready.
 281  */
 282 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 283 {
 284         struct buffer_head * bh;
 285 
 286         for (;;) {
 287                 if (!(bh=find_buffer(dev,block,size)))
 288                         return NULL;
 289                 bh->b_count++;
 290                 wait_on_buffer(bh);
 291                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 292                         return bh;
 293                 bh->b_count--;
 294         }
 295 }
 296 
 297 /*
 298  * Ok, this is getblk, and it isn't very clear, again to hinder
 299  * race-conditions. Most of the code is seldom used, (ie repeating),
 300  * so it should be much more efficient than it looks.
 301  *
 302  * The algoritm is changed: hopefully better, and an elusive bug removed.
 303  *
 304  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 305  * when the filesystem starts to get full of dirty blocks (I hope).
 306  */
 307 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 308 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310         struct buffer_head * bh, * tmp;
 311         int buffers;
 312         static int grow_size = 0;
 313 
 314 repeat:
 315         bh = get_hash_table(dev, block, size);
 316         if (bh) {
 317                 if (bh->b_uptodate && !bh->b_dirt)
 318                         put_last_free(bh);
 319                 return bh;
 320         }
 321         grow_size -= size;
 322         if (nr_free_pages > min_free_pages &&
 323             buffermem < 6*1024*1024 &&
 324             grow_size <= 0) {
 325                 grow_buffers(size);
 326                 grow_size = 4096;
 327         }
 328         buffers = nr_buffers;
 329         bh = NULL;
 330 
 331         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 332                 if (tmp->b_count || tmp->b_size != size)
 333                         continue;
 334                 if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1)
 335                         continue;
 336                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 337                         bh = tmp;
 338                         if (!BADNESS(tmp))
 339                                 break;
 340                 }
 341 #if 0
 342                 if (tmp->b_dirt)
 343                         ll_rw_block(WRITEA, 1, &tmp);
 344 #endif
 345         }
 346 
 347         if (!bh && nr_free_pages > 5) {
 348                 grow_buffers(size);
 349                 goto repeat;
 350         }
 351         
 352 /* and repeat until we find something good */
 353         if (!bh) {
 354                 sleep_on(&buffer_wait);
 355                 goto repeat;
 356         }
 357         wait_on_buffer(bh);
 358         if (bh->b_count || bh->b_size != size)
 359                 goto repeat;
 360         if (bh->b_dirt) {
 361                 sync_buffers(bh->b_dev);
 362                 goto repeat;
 363         }
 364 /* NOTE!! While we slept waiting for this block, somebody else might */
 365 /* already have added "this" block to the cache. check it */
 366         if (find_buffer(dev,block,size))
 367                 goto repeat;
 368 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 369 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 370         bh->b_count=1;
 371         bh->b_dirt=0;
 372         bh->b_uptodate=0;
 373         remove_from_queues(bh);
 374         bh->b_dev=dev;
 375         bh->b_blocknr=block;
 376         insert_into_queues(bh);
 377         return bh;
 378 }
 379 
 380 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382         if (!buf)
 383                 return;
 384         wait_on_buffer(buf);
 385         if (buf->b_count) {
 386                 if (--buf->b_count)
 387                         return;
 388                 wake_up(&buffer_wait);
 389                 return;
 390         }
 391         printk("VFS: brelse: Trying to free free buffer\n");
 392 }
 393 
 394 /*
 395  * bread() reads a specified block and returns the buffer that contains
 396  * it. It returns NULL if the block was unreadable.
 397  */
 398 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 399 {
 400         struct buffer_head * bh;
 401 
 402         if (!(bh = getblk(dev, block, size))) {
 403                 printk("VFS: bread: READ error on device %d/%d\n",
 404                                                 MAJOR(dev), MINOR(dev));
 405                 return NULL;
 406         }
 407         if (bh->b_uptodate)
 408                 return bh;
 409         ll_rw_block(READ, 1, &bh);
 410         wait_on_buffer(bh);
 411         if (bh->b_uptodate)
 412                 return bh;
 413         brelse(bh);
 414         return NULL;
 415 }
 416 
 417 /*
 418  * Ok, breada can be used as bread, but additionally to mark other
 419  * blocks for reading as well. End the argument list with a negative
 420  * number.
 421  */
 422 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 423 {
 424         va_list args;
 425         struct buffer_head * bh, *tmp;
 426 
 427         va_start(args,first);
 428         if (!(bh = getblk(dev, first, 1024))) {
 429                 printk("VFS: breada: READ error on device %d/%d\n",
 430                                                 MAJOR(dev), MINOR(dev));
 431                 return NULL;
 432         }
 433         if (!bh->b_uptodate)
 434                 ll_rw_block(READ, 1, &bh);
 435         while ((first=va_arg(args,int))>=0) {
 436                 tmp = getblk(dev, first, 1024);
 437                 if (tmp) {
 438                         if (!tmp->b_uptodate)
 439                                 ll_rw_block(READA, 1, &tmp);
 440                         tmp->b_count--;
 441                 }
 442         }
 443         va_end(args);
 444         wait_on_buffer(bh);
 445         if (bh->b_uptodate)
 446                 return bh;
 447         brelse(bh);
 448         return (NULL);
 449 }
 450 
 451 /*
 452  * See fs/inode.c for the weird use of volatile..
 453  */
 454 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 455 {
 456         struct wait_queue * wait;
 457 
 458         wait = ((volatile struct buffer_head *) bh)->b_wait;
 459         memset((void *) bh,0,sizeof(*bh));
 460         ((volatile struct buffer_head *) bh)->b_wait = wait;
 461         bh->b_next_free = unused_list;
 462         unused_list = bh;
 463 }
 464 
 465 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 466 {
 467         unsigned long page;
 468         struct buffer_head * bh;
 469 
 470         if (unused_list)
 471                 return;
 472         page = get_free_page(GFP_KERNEL);
 473         if (!page)
 474                 return;
 475         bh = (struct buffer_head *) page;
 476         while ((unsigned long) (bh+1) <= page+4096) {
 477                 put_unused_buffer_head(bh);
 478                 bh++;
 479                 nr_buffer_heads++;
 480         }
 481 }
 482 
 483 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 484 {
 485         struct buffer_head * bh;
 486 
 487         get_more_buffer_heads();
 488         if (!unused_list)
 489                 return NULL;
 490         bh = unused_list;
 491         unused_list = bh->b_next_free;
 492         bh->b_next_free = NULL;
 493         bh->b_data = NULL;
 494         bh->b_size = 0;
 495         return bh;
 496 }
 497 
 498 static inline unsigned long try_to_share_buffers(unsigned long address, dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 499 {
 500         return 0;
 501 }
 502 
 503 #define COPYBLK(from,to) \
 504 __asm__ __volatile__("rep ; movsl" \
 505         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 506         :"cx","di","si")
 507 
 508 /*
 509  * bread_page reads four buffers into memory at the desired address. It's
 510  * a function of its own, as there is some speed to be got by reading them
 511  * all at the same time, not waiting for one to be read, and then another
 512  * etc. This also allows us to optimize memory usage by sharing code pages
 513  * and filesystem buffers.. This is not yet implemented.
 514  */
 515 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 516 {
 517         struct buffer_head * bh[4];
 518         struct buffer_head * bhr[4];
 519         unsigned long where;
 520         int bhnum = 0;
 521         int i;
 522 
 523         if (!(prot & PAGE_RW)) {
 524                 where = try_to_share_buffers(address,dev,b,size);
 525                 if (where)
 526                         return where;
 527         }
 528         for (i=0 ; i<4 ; i++) {
 529                 bh[i] = NULL;
 530                 if (b[i]) {
 531                         bh[i] = getblk(dev, b[i], size);
 532                         if (bh[i] && !bh[i]->b_uptodate)
 533                                 bhr[bhnum++] = bh[i];
 534                 }
 535         }
 536         if (bhnum)
 537                 ll_rw_block(READ, bhnum, bhr);
 538         where = address;
 539         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE) {
 540                 if (bh[i]) {
 541                         wait_on_buffer(bh[i]);
 542                         if (bh[i]->b_uptodate)
 543                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 544                         brelse(bh[i]);
 545                 }
 546         }
 547         return where;
 548 }
 549 
 550 /*
 551  * Try to increase the number of buffers available: the size argument
 552  * is used to determine what kind of buffers we want. Currently only
 553  * 1024-byte buffers are supported by the rest of the system, but I
 554  * think this will change eventually.
 555  */
 556 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 557 {
 558         unsigned long page;
 559         int i;
 560         struct buffer_head *bh, *tmp;
 561 
 562         if ((size & 511) || (size > 4096)) {
 563                 printk("VFS: grow_buffers: size = %d\n",size);
 564                 return;
 565         }
 566         page = get_free_page(GFP_BUFFER);
 567         if (!page)
 568                 return;
 569         tmp = NULL;
 570         i = 0;
 571         for (i = 0 ; i+size <= 4096 ; i += size) {
 572                 bh = get_unused_buffer_head();
 573                 if (!bh)
 574                         goto no_grow;
 575                 bh->b_this_page = tmp;
 576                 tmp = bh;
 577                 bh->b_data = (char * ) (page+i);
 578                 bh->b_size = size;
 579         }
 580         tmp = bh;
 581         while (1) {
 582                 if (free_list) {
 583                         tmp->b_next_free = free_list;
 584                         tmp->b_prev_free = free_list->b_prev_free;
 585                         free_list->b_prev_free->b_next_free = tmp;
 586                         free_list->b_prev_free = tmp;
 587                 } else {
 588                         tmp->b_prev_free = tmp;
 589                         tmp->b_next_free = tmp;
 590                 }
 591                 free_list = tmp;
 592                 ++nr_buffers;
 593                 if (tmp->b_this_page)
 594                         tmp = tmp->b_this_page;
 595                 else
 596                         break;
 597         }
 598         tmp->b_this_page = bh;
 599         buffermem += 4096;
 600         return;
 601 /*
 602  * In case anything failed, we just free everything we got.
 603  */
 604 no_grow:
 605         bh = tmp;
 606         while (bh) {
 607                 tmp = bh;
 608                 bh = bh->b_this_page;
 609                 put_unused_buffer_head(tmp);
 610         }       
 611         free_page(page);
 612 }
 613 
 614 /*
 615  * try_to_free() checks if all the buffers on this particular page
 616  * are unused, and free's the page if so.
 617  */
 618 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 619 {
 620         unsigned long page;
 621         struct buffer_head * tmp, * p;
 622 
 623         page = (unsigned long) bh->b_data;
 624         page &= 0xfffff000;
 625         if (mem_map[MAP_NR(page)] != 1)
 626                 return 0;
 627         tmp = bh;
 628         do {
 629                 if (!tmp)
 630                         return 0;
 631                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 632                         return 0;
 633                 tmp = tmp->b_this_page;
 634         } while (tmp != bh);
 635         tmp = bh;
 636         do {
 637                 p = tmp;
 638                 tmp = tmp->b_this_page;
 639                 nr_buffers--;
 640                 remove_from_queues(p);
 641                 put_unused_buffer_head(p);
 642         } while (tmp != bh);
 643         buffermem -= 4096;
 644         free_page(page);
 645         return 1;
 646 }
 647 
 648 /*
 649  * Try to free up some pages by shrinking the buffer-cache
 650  *
 651  * Priority tells the routine how hard to try to shrink the
 652  * buffers: 3 means "don't bother too much", while a value
 653  * of 0 means "we'd better get some free pages now".
 654  */
 655 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 656 {
 657         struct buffer_head *bh;
 658         int i;
 659 
 660         if (priority < 2)
 661                 sync_buffers(0);
 662         bh = free_list;
 663         i = nr_buffers >> priority;
 664         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 665                 if (bh->b_count || !bh->b_this_page)
 666                         continue;
 667                 if (bh->b_lock)
 668                         if (priority)
 669                                 continue;
 670                         else
 671                                 wait_on_buffer(bh);
 672                 if (bh->b_dirt) {
 673                         ll_rw_block(WRITEA, 1, &bh);
 674                         continue;
 675                 }
 676                 if (try_to_free(bh))
 677                         return 1;
 678         }
 679         return 0;
 680 }
 681 
 682 /*
 683  * This initializes the initial buffer free list.  nr_buffers is set
 684  * to one less the actual number of buffers, as a sop to backwards
 685  * compatibility --- the old code did this (I think unintentionally,
 686  * but I'm not sure), and programs in the ps package expect it.
 687  *                                      - TYT 8/30/92
 688  */
 689 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 690 {
 691         int i;
 692 
 693         if (high_memory >= 4*1024*1024)
 694                 min_free_pages = 200;
 695         else
 696                 min_free_pages = 20;
 697         for (i = 0 ; i < NR_HASH ; i++)
 698                 hash_table[i] = NULL;
 699         free_list = 0;
 700         grow_buffers(BLOCK_SIZE);
 701         if (!free_list)
 702                 panic("VFS: Unable to initialize buffer free list!");
 703         return;
 704 }

/* [previous][next][first][last][top][bottom][index][help] */