root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. sys_fsync
  6. invalidate_buffers
  7. check_disk_change
  8. remove_from_hash_queue
  9. remove_from_free_list
  10. remove_from_queues
  11. put_first_free
  12. put_last_free
  13. insert_into_queues
  14. find_buffer
  15. get_hash_table
  16. getblk
  17. brelse
  18. bread
  19. breada
  20. put_unused_buffer_head
  21. get_more_buffer_heads
  22. get_unused_buffer_head
  23. try_to_share_buffers
  24. bread_page
  25. grow_buffers
  26. try_to_free
  27. shrink_buffers
  28. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 #include <linux/errno.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/io.h>
  30 
  31 #ifdef CONFIG_SCSI
  32 #ifdef CONFIG_BLK_DEV_SR
  33 extern int check_cdrom_media_change(int, int);
  34 #endif
  35 #ifdef CONFIG_BLK_DEV_SD
  36 extern int check_scsidisk_media_change(int, int);
  37 extern int revalidate_scsidisk(int, int);
  38 #endif
  39 #endif
  40 
  41 static struct buffer_head * hash_table[NR_HASH];
  42 static struct buffer_head * free_list = NULL;
  43 static struct buffer_head * unused_list = NULL;
  44 static struct wait_queue * buffer_wait = NULL;
  45 
  46 int nr_buffers = 0;
  47 int buffermem = 0;
  48 int nr_buffer_heads = 0;
  49 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  50 
  51 /*
  52  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  53  * and getting rid of the cli-sti pairs. The wait-queue routines still
  54  * need cli-sti, but now it's just a couple of 386 instructions or so.
  55  *
  56  * Note that the real wait_on_buffer() is an inline function that checks
  57  * if 'b_wait' is set before calling this, so that the queues aren't set
  58  * up unnecessarily.
  59  */
  60 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  61 {
  62         struct wait_queue wait = { current, NULL };
  63 
  64         add_wait_queue(&bh->b_wait, &wait);
  65 repeat:
  66         current->state = TASK_UNINTERRUPTIBLE;
  67         if (bh->b_lock) {
  68                 schedule();
  69                 goto repeat;
  70         }
  71         remove_wait_queue(&bh->b_wait, &wait);
  72         current->state = TASK_RUNNING;
  73 }
  74 
  75 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         int i;
  78         struct buffer_head * bh;
  79 
  80         bh = free_list;
  81         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  82                 if (dev && bh->b_dev != dev)
  83                         continue;
  84                 if (bh->b_lock)
  85                         continue;
  86                 if (!bh->b_dirt)
  87                         continue;
  88                 ll_rw_block(WRITE, 1, &bh);
  89         }
  90 }
  91 
  92 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94         sync_buffers(dev);
  95         sync_supers(dev);
  96         sync_inodes(dev);
  97         sync_buffers(dev);
  98 }
  99 
 100 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 101 {
 102         sync_dev(0);
 103         return 0;
 104 }
 105 
 106 int sys_fsync(int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {
 108         return -ENOSYS;
 109 }
 110 
 111 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         int i;
 114         struct buffer_head * bh;
 115 
 116         bh = free_list;
 117         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 118                 if (bh->b_dev != dev)
 119                         continue;
 120                 wait_on_buffer(bh);
 121                 if (bh->b_dev == dev)
 122                         bh->b_uptodate = bh->b_dirt = 0;
 123         }
 124 }
 125 
 126 /*
 127  * This routine checks whether a floppy has been changed, and
 128  * invalidates all buffer-cache-entries in that case. This
 129  * is a relatively slow routine, so we have to try to minimize using
 130  * it. Thus it is called only upon a 'mount' or 'open'. This
 131  * is the best way of combining speed and utility, I think.
 132  * People changing diskettes in the middle of an operation deserve
 133  * to loose :-)
 134  *
 135  * NOTE! Although currently this is only for floppies, the idea is
 136  * that any additional removable block-device will use this routine,
 137  * and that mount/open needn't know that floppies/whatever are
 138  * special.
 139  */
 140 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 141 {
 142         int i;
 143         struct buffer_head * bh;
 144 
 145         switch(MAJOR(dev)){
 146         case 2: /* floppy disc */
 147                 if (!(bh = getblk(dev,0,1024)))
 148                         return;
 149                 i = floppy_change(bh);
 150                 brelse(bh);
 151                 break;
 152 
 153 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 154          case 8: /* Removable scsi disk */
 155                 i = check_scsidisk_media_change(dev, 0);
 156                 break;
 157 #endif
 158 
 159 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 160          case 11: /* CDROM */
 161                 i = check_cdrom_media_change(dev, 0);
 162                 break;
 163 #endif
 164 
 165          default:
 166                 return;
 167         };
 168 
 169         if (!i) return;
 170 
 171         printk("VFS: Disk change detected on device %d/%d\n",
 172                                         MAJOR(dev), MINOR(dev));
 173         for (i=0 ; i<NR_SUPER ; i++)
 174                 if (super_block[i].s_dev == dev)
 175                         put_super(super_block[i].s_dev);
 176         invalidate_inodes(dev);
 177         invalidate_buffers(dev);
 178 
 179 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 180 /* This is trickier for a removable hardisk, because we have to invalidate
 181    all of the partitions that lie on the disk. */
 182         if (MAJOR(dev) == 8)
 183                 revalidate_scsidisk(dev, 0);
 184 #endif
 185 }
 186 
 187 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 188 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 189 
 190 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 191 {
 192         if (bh->b_next)
 193                 bh->b_next->b_prev = bh->b_prev;
 194         if (bh->b_prev)
 195                 bh->b_prev->b_next = bh->b_next;
 196         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 197                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 198         bh->b_next = bh->b_prev = NULL;
 199 }
 200 
 201 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 202 {
 203         if (!(bh->b_prev_free) || !(bh->b_next_free))
 204                 panic("VFS: Free block list corrupted");
 205         bh->b_prev_free->b_next_free = bh->b_next_free;
 206         bh->b_next_free->b_prev_free = bh->b_prev_free;
 207         if (free_list == bh)
 208                 free_list = bh->b_next_free;
 209         bh->b_next_free = bh->b_prev_free = NULL;
 210 }
 211 
 212 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         remove_from_hash_queue(bh);
 215         remove_from_free_list(bh);
 216 }
 217 
 218 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220         if (!bh || (bh == free_list))
 221                 return;
 222         remove_from_free_list(bh);
 223 /* add to front of free list */
 224         bh->b_next_free = free_list;
 225         bh->b_prev_free = free_list->b_prev_free;
 226         free_list->b_prev_free->b_next_free = bh;
 227         free_list->b_prev_free = bh;
 228         free_list = bh;
 229 }
 230 
 231 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233         if (!bh)
 234                 return;
 235         if (bh == free_list) {
 236                 free_list = bh->b_next_free;
 237                 return;
 238         }
 239         remove_from_free_list(bh);
 240 /* add to back of free list */
 241         bh->b_next_free = free_list;
 242         bh->b_prev_free = free_list->b_prev_free;
 243         free_list->b_prev_free->b_next_free = bh;
 244         free_list->b_prev_free = bh;
 245 }
 246 
 247 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 248 {
 249 /* put at end of free list */
 250         bh->b_next_free = free_list;
 251         bh->b_prev_free = free_list->b_prev_free;
 252         free_list->b_prev_free->b_next_free = bh;
 253         free_list->b_prev_free = bh;
 254 /* put the buffer in new hash-queue if it has a device */
 255         bh->b_prev = NULL;
 256         bh->b_next = NULL;
 257         if (!bh->b_dev)
 258                 return;
 259         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 260         hash(bh->b_dev,bh->b_blocknr) = bh;
 261         if (bh->b_next)
 262                 bh->b_next->b_prev = bh;
 263 }
 264 
 265 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {               
 267         struct buffer_head * tmp;
 268 
 269         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 270                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 271                         if (tmp->b_size == size)
 272                                 return tmp;
 273                         else {
 274                                 printk("VFS: Wrong blocksize on device %d/%d\n",
 275                                                         MAJOR(dev), MINOR(dev));
 276                                 return NULL;
 277                         }
 278         return NULL;
 279 }
 280 
 281 /*
 282  * Why like this, I hear you say... The reason is race-conditions.
 283  * As we don't lock buffers (unless we are readint them, that is),
 284  * something might happen to it while we sleep (ie a read-error
 285  * will force it bad). This shouldn't really happen currently, but
 286  * the code is ready.
 287  */
 288 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         struct buffer_head * bh;
 291 
 292         for (;;) {
 293                 if (!(bh=find_buffer(dev,block,size)))
 294                         return NULL;
 295                 bh->b_count++;
 296                 wait_on_buffer(bh);
 297                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 298                         return bh;
 299                 bh->b_count--;
 300         }
 301 }
 302 
 303 /*
 304  * Ok, this is getblk, and it isn't very clear, again to hinder
 305  * race-conditions. Most of the code is seldom used, (ie repeating),
 306  * so it should be much more efficient than it looks.
 307  *
 308  * The algoritm is changed: hopefully better, and an elusive bug removed.
 309  *
 310  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 311  * when the filesystem starts to get full of dirty blocks (I hope).
 312  */
 313 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 314 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 315 {
 316         struct buffer_head * bh, * tmp;
 317         int buffers;
 318         static int grow_size = 0;
 319 
 320 repeat:
 321         bh = get_hash_table(dev, block, size);
 322         if (bh) {
 323                 if (bh->b_uptodate && !bh->b_dirt)
 324                         put_last_free(bh);
 325                 return bh;
 326         }
 327         grow_size -= size;
 328         if (nr_free_pages > min_free_pages &&
 329             buffermem < 6*1024*1024 &&
 330             grow_size <= 0) {
 331                 grow_buffers(size);
 332                 grow_size = 4096;
 333         }
 334         buffers = nr_buffers;
 335         bh = NULL;
 336 
 337         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 338                 if (tmp->b_count || tmp->b_size != size)
 339                         continue;
 340                 if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1)
 341                         continue;
 342                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 343                         bh = tmp;
 344                         if (!BADNESS(tmp))
 345                                 break;
 346                 }
 347 #if 0
 348                 if (tmp->b_dirt)
 349                         ll_rw_block(WRITEA, 1, &tmp);
 350 #endif
 351         }
 352 
 353         if (!bh && nr_free_pages > 5) {
 354                 grow_buffers(size);
 355                 goto repeat;
 356         }
 357         
 358 /* and repeat until we find something good */
 359         if (!bh) {
 360                 sleep_on(&buffer_wait);
 361                 goto repeat;
 362         }
 363         wait_on_buffer(bh);
 364         if (bh->b_count || bh->b_size != size)
 365                 goto repeat;
 366         if (bh->b_dirt) {
 367                 sync_buffers(0);
 368                 goto repeat;
 369         }
 370 /* NOTE!! While we slept waiting for this block, somebody else might */
 371 /* already have added "this" block to the cache. check it */
 372         if (find_buffer(dev,block,size))
 373                 goto repeat;
 374 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 375 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 376         bh->b_count=1;
 377         bh->b_dirt=0;
 378         bh->b_uptodate=0;
 379         remove_from_queues(bh);
 380         bh->b_dev=dev;
 381         bh->b_blocknr=block;
 382         insert_into_queues(bh);
 383         return bh;
 384 }
 385 
 386 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 387 {
 388         if (!buf)
 389                 return;
 390         wait_on_buffer(buf);
 391         if (buf->b_count) {
 392                 if (--buf->b_count)
 393                         return;
 394                 wake_up(&buffer_wait);
 395                 return;
 396         }
 397         printk("VFS: brelse: Trying to free free buffer\n");
 398 }
 399 
 400 /*
 401  * bread() reads a specified block and returns the buffer that contains
 402  * it. It returns NULL if the block was unreadable.
 403  */
 404 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 405 {
 406         struct buffer_head * bh;
 407 
 408         if (!(bh = getblk(dev, block, size))) {
 409                 printk("VFS: bread: READ error on device %d/%d\n",
 410                                                 MAJOR(dev), MINOR(dev));
 411                 return NULL;
 412         }
 413         if (bh->b_uptodate)
 414                 return bh;
 415         ll_rw_block(READ, 1, &bh);
 416         wait_on_buffer(bh);
 417         if (bh->b_uptodate)
 418                 return bh;
 419         brelse(bh);
 420         return NULL;
 421 }
 422 
 423 /*
 424  * Ok, breada can be used as bread, but additionally to mark other
 425  * blocks for reading as well. End the argument list with a negative
 426  * number.
 427  */
 428 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 429 {
 430         va_list args;
 431         struct buffer_head * bh, *tmp;
 432 
 433         va_start(args,first);
 434         if (!(bh = getblk(dev, first, 1024))) {
 435                 printk("VFS: breada: READ error on device %d/%d\n",
 436                                                 MAJOR(dev), MINOR(dev));
 437                 return NULL;
 438         }
 439         if (!bh->b_uptodate)
 440                 ll_rw_block(READ, 1, &bh);
 441         while ((first=va_arg(args,int))>=0) {
 442                 tmp = getblk(dev, first, 1024);
 443                 if (tmp) {
 444                         if (!tmp->b_uptodate)
 445                                 ll_rw_block(READA, 1, &tmp);
 446                         tmp->b_count--;
 447                 }
 448         }
 449         va_end(args);
 450         wait_on_buffer(bh);
 451         if (bh->b_uptodate)
 452                 return bh;
 453         brelse(bh);
 454         return (NULL);
 455 }
 456 
 457 /*
 458  * See fs/inode.c for the weird use of volatile..
 459  */
 460 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 461 {
 462         struct wait_queue * wait;
 463 
 464         wait = ((volatile struct buffer_head *) bh)->b_wait;
 465         memset((void *) bh,0,sizeof(*bh));
 466         ((volatile struct buffer_head *) bh)->b_wait = wait;
 467         bh->b_next_free = unused_list;
 468         unused_list = bh;
 469 }
 470 
 471 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 472 {
 473         unsigned long page;
 474         struct buffer_head * bh;
 475 
 476         if (unused_list)
 477                 return;
 478         page = get_free_page(GFP_KERNEL);
 479         if (!page)
 480                 return;
 481         bh = (struct buffer_head *) page;
 482         while ((unsigned long) (bh+1) <= page+4096) {
 483                 put_unused_buffer_head(bh);
 484                 bh++;
 485                 nr_buffer_heads++;
 486         }
 487 }
 488 
 489 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 490 {
 491         struct buffer_head * bh;
 492 
 493         get_more_buffer_heads();
 494         if (!unused_list)
 495                 return NULL;
 496         bh = unused_list;
 497         unused_list = bh->b_next_free;
 498         bh->b_next_free = NULL;
 499         bh->b_data = NULL;
 500         bh->b_size = 0;
 501         return bh;
 502 }
 503 
 504 static inline unsigned long try_to_share_buffers(unsigned long address, dev_t dev, int b[], int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 505 {
 506         return 0;
 507 }
 508 
 509 #define COPYBLK(from,to) \
 510 __asm__ __volatile__("rep ; movsl" \
 511         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 512         :"cx","di","si")
 513 
 514 /*
 515  * bread_page reads four buffers into memory at the desired address. It's
 516  * a function of its own, as there is some speed to be got by reading them
 517  * all at the same time, not waiting for one to be read, and then another
 518  * etc. This also allows us to optimize memory usage by sharing code pages
 519  * and filesystem buffers.. This is not yet implemented.
 520  */
 521 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 522 {
 523         struct buffer_head * bh[4];
 524         struct buffer_head * bhr[4];
 525         unsigned long where;
 526         int bhnum = 0;
 527         int i;
 528 
 529         if (!(prot & PAGE_RW)) {
 530                 where = try_to_share_buffers(address,dev,b,size);
 531                 if (where)
 532                         return where;
 533         }
 534         for (i=0 ; i<4 ; i++) {
 535                 bh[i] = NULL;
 536                 if (b[i]) {
 537                         bh[i] = getblk(dev, b[i], size);
 538                         if (bh[i] && !bh[i]->b_uptodate)
 539                                 bhr[bhnum++] = bh[i];
 540                 }
 541         }
 542         if (bhnum)
 543                 ll_rw_block(READ, bhnum, bhr);
 544         where = address;
 545         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE) {
 546                 if (bh[i]) {
 547                         wait_on_buffer(bh[i]);
 548                         if (bh[i]->b_uptodate)
 549                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 550                         brelse(bh[i]);
 551                 }
 552         }
 553         return where;
 554 }
 555 
 556 /*
 557  * Try to increase the number of buffers available: the size argument
 558  * is used to determine what kind of buffers we want. Currently only
 559  * 1024-byte buffers are supported by the rest of the system, but I
 560  * think this will change eventually.
 561  */
 562 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 563 {
 564         unsigned long page;
 565         int i;
 566         struct buffer_head *bh, *tmp;
 567 
 568         if ((size & 511) || (size > 4096)) {
 569                 printk("VFS: grow_buffers: size = %d\n",size);
 570                 return;
 571         }
 572         page = get_free_page(GFP_BUFFER);
 573         if (!page)
 574                 return;
 575         tmp = NULL;
 576         i = 0;
 577         for (i = 0 ; i+size <= 4096 ; i += size) {
 578                 bh = get_unused_buffer_head();
 579                 if (!bh)
 580                         goto no_grow;
 581                 bh->b_this_page = tmp;
 582                 tmp = bh;
 583                 bh->b_data = (char * ) (page+i);
 584                 bh->b_size = size;
 585         }
 586         tmp = bh;
 587         while (1) {
 588                 if (free_list) {
 589                         tmp->b_next_free = free_list;
 590                         tmp->b_prev_free = free_list->b_prev_free;
 591                         free_list->b_prev_free->b_next_free = tmp;
 592                         free_list->b_prev_free = tmp;
 593                 } else {
 594                         tmp->b_prev_free = tmp;
 595                         tmp->b_next_free = tmp;
 596                 }
 597                 free_list = tmp;
 598                 ++nr_buffers;
 599                 if (tmp->b_this_page)
 600                         tmp = tmp->b_this_page;
 601                 else
 602                         break;
 603         }
 604         tmp->b_this_page = bh;
 605         buffermem += 4096;
 606         return;
 607 /*
 608  * In case anything failed, we just free everything we got.
 609  */
 610 no_grow:
 611         bh = tmp;
 612         while (bh) {
 613                 tmp = bh;
 614                 bh = bh->b_this_page;
 615                 put_unused_buffer_head(tmp);
 616         }       
 617         free_page(page);
 618 }
 619 
 620 /*
 621  * try_to_free() checks if all the buffers on this particular page
 622  * are unused, and free's the page if so.
 623  */
 624 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 625 {
 626         unsigned long page;
 627         struct buffer_head * tmp, * p;
 628 
 629         page = (unsigned long) bh->b_data;
 630         page &= 0xfffff000;
 631         if (mem_map[MAP_NR(page)] != 1)
 632                 return 0;
 633         tmp = bh;
 634         do {
 635                 if (!tmp)
 636                         return 0;
 637                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 638                         return 0;
 639                 tmp = tmp->b_this_page;
 640         } while (tmp != bh);
 641         tmp = bh;
 642         do {
 643                 p = tmp;
 644                 tmp = tmp->b_this_page;
 645                 nr_buffers--;
 646                 remove_from_queues(p);
 647                 put_unused_buffer_head(p);
 648         } while (tmp != bh);
 649         buffermem -= 4096;
 650         free_page(page);
 651         return 1;
 652 }
 653 
 654 /*
 655  * Try to free up some pages by shrinking the buffer-cache
 656  *
 657  * Priority tells the routine how hard to try to shrink the
 658  * buffers: 3 means "don't bother too much", while a value
 659  * of 0 means "we'd better get some free pages now".
 660  */
 661 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 662 {
 663         struct buffer_head *bh;
 664         int i;
 665 
 666         if (priority < 2)
 667                 sync_buffers(0);
 668         bh = free_list;
 669         i = nr_buffers >> priority;
 670         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 671                 if (bh->b_count || !bh->b_this_page)
 672                         continue;
 673                 if (bh->b_lock)
 674                         if (priority)
 675                                 continue;
 676                         else
 677                                 wait_on_buffer(bh);
 678                 if (bh->b_dirt) {
 679                         ll_rw_block(WRITEA, 1, &bh);
 680                         continue;
 681                 }
 682                 if (try_to_free(bh))
 683                         return 1;
 684         }
 685         return 0;
 686 }
 687 
 688 /*
 689  * This initializes the initial buffer free list.  nr_buffers is set
 690  * to one less the actual number of buffers, as a sop to backwards
 691  * compatibility --- the old code did this (I think unintentionally,
 692  * but I'm not sure), and programs in the ps package expect it.
 693  *                                      - TYT 8/30/92
 694  */
 695 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 696 {
 697         int i;
 698 
 699         if (high_memory >= 4*1024*1024)
 700                 min_free_pages = 200;
 701         else
 702                 min_free_pages = 20;
 703         for (i = 0 ; i < NR_HASH ; i++)
 704                 hash_table[i] = NULL;
 705         free_list = 0;
 706         grow_buffers(BLOCK_SIZE);
 707         if (!free_list)
 708                 panic("VFS: Unable to initialize buffer free list!");
 709         return;
 710 }

/* [previous][next][first][last][top][bottom][index][help] */