root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. invalidate_buffers
  6. check_disk_change
  7. remove_from_hash_queue
  8. remove_from_free_list
  9. remove_from_queues
  10. put_first_free
  11. put_last_free
  12. insert_into_queues
  13. find_buffer
  14. get_hash_table
  15. getblk
  16. brelse
  17. bread
  18. bread_page
  19. breada
  20. put_unused_buffer_head
  21. get_more_buffer_heads
  22. get_unused_buffer_head
  23. grow_buffers
  24. try_to_free
  25. shrink_buffers
  26. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/io.h>
  29 
  30 #ifdef CONFIG_SCSI
  31 #ifdef CONFIG_BLK_DEV_SR
  32 extern int check_cdrom_media_change(int, int);
  33 #endif
  34 #ifdef CONFIG_BLK_DEV_SD
  35 extern int check_scsidisk_media_change(int, int);
  36 extern int revalidate_scsidisk(int, int);
  37 #endif
  38 #endif
  39 
  40 static struct buffer_head * hash_table[NR_HASH];
  41 static struct buffer_head * free_list = NULL;
  42 static struct buffer_head * unused_list = NULL;
  43 static struct wait_queue * buffer_wait = NULL;
  44 
  45 int nr_buffers = 0;
  46 int buffermem = 0;
  47 int nr_buffer_heads = 0;
  48 static int min_free_pages = 20; /* nr free pages needed before buffer grows */
  49 
  50 /*
  51  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  52  * and getting rid of the cli-sti pairs. The wait-queue routines still
  53  * need cli-sti, but now it's just a couple of 386 instructions or so.
  54  *
  55  * Note that the real wait_on_buffer() is an inline function that checks
  56  * if 'b_wait' is set before calling this, so that the queues aren't set
  57  * up unnecessarily.
  58  */
  59 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         struct wait_queue wait = { current, NULL };
  62 
  63         add_wait_queue(&bh->b_wait, &wait);
  64 repeat:
  65         current->state = TASK_UNINTERRUPTIBLE;
  66         if (bh->b_lock) {
  67                 schedule();
  68                 goto repeat;
  69         }
  70         remove_wait_queue(&bh->b_wait, &wait);
  71         current->state = TASK_RUNNING;
  72 }
  73 
  74 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  75 {
  76         int i;
  77         struct buffer_head * bh;
  78 
  79         bh = free_list;
  80         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  81                 if (bh->b_lock)
  82                         continue;
  83                 if (!bh->b_dirt)
  84                         continue;
  85                 ll_rw_block(WRITE, 1, &bh);
  86         }
  87 }
  88 
  89 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  90 {
  91         sync_buffers(dev);
  92         sync_supers(dev);
  93         sync_inodes(dev);
  94         sync_buffers(dev);
  95 }
  96 
  97 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  98 {
  99         sync_dev(0);
 100         return 0;
 101 }
 102 
 103 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 104 {
 105         int i;
 106         struct buffer_head * bh;
 107 
 108         bh = free_list;
 109         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 110                 if (bh->b_dev != dev)
 111                         continue;
 112                 wait_on_buffer(bh);
 113                 if (bh->b_dev == dev)
 114                         bh->b_uptodate = bh->b_dirt = 0;
 115         }
 116 }
 117 
 118 /*
 119  * This routine checks whether a floppy has been changed, and
 120  * invalidates all buffer-cache-entries in that case. This
 121  * is a relatively slow routine, so we have to try to minimize using
 122  * it. Thus it is called only upon a 'mount' or 'open'. This
 123  * is the best way of combining speed and utility, I think.
 124  * People changing diskettes in the middle of an operation deserve
 125  * to loose :-)
 126  *
 127  * NOTE! Although currently this is only for floppies, the idea is
 128  * that any additional removable block-device will use this routine,
 129  * and that mount/open needn't know that floppies/whatever are
 130  * special.
 131  */
 132 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 133 {
 134         int i;
 135         struct buffer_head * bh;
 136 
 137         switch(MAJOR(dev)){
 138         case 2: /* floppy disc */
 139                 if (!(bh = getblk(dev,0,1024)))
 140                         return;
 141                 i = floppy_change(bh);
 142                 brelse(bh);
 143                 break;
 144 
 145 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 146          case 8: /* Removable scsi disk */
 147                 i = check_scsidisk_media_change(dev, 0);
 148                 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
 149                 break;
 150 #endif
 151 
 152 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 153          case 11: /* CDROM */
 154                 i = check_cdrom_media_change(dev, 0);
 155                 if (i) printk("Flushing buffers and inodes for CDROM\n");
 156                 break;
 157 #endif
 158 
 159          default:
 160                 return;
 161         };
 162 
 163         if (!i) return;
 164 
 165         for (i=0 ; i<NR_SUPER ; i++)
 166                 if (super_block[i].s_dev == dev)
 167                         put_super(super_block[i].s_dev);
 168         invalidate_inodes(dev);
 169         invalidate_buffers(dev);
 170 
 171 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 172 /* This is trickier for a removable hardisk, because we have to invalidate
 173    all of the partitions that lie on the disk. */
 174         if (MAJOR(dev) == 8)
 175                 revalidate_scsidisk(dev, 0);
 176 #endif
 177 }
 178 
 179 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 180 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 181 
 182 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184         if (bh->b_next)
 185                 bh->b_next->b_prev = bh->b_prev;
 186         if (bh->b_prev)
 187                 bh->b_prev->b_next = bh->b_next;
 188         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 189                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 190         bh->b_next = bh->b_prev = NULL;
 191 }
 192 
 193 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         if (!(bh->b_prev_free) || !(bh->b_next_free))
 196                 panic("Free block list corrupted");
 197         bh->b_prev_free->b_next_free = bh->b_next_free;
 198         bh->b_next_free->b_prev_free = bh->b_prev_free;
 199         if (free_list == bh)
 200                 free_list = bh->b_next_free;
 201         bh->b_next_free = bh->b_prev_free = NULL;
 202 }
 203 
 204 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         remove_from_hash_queue(bh);
 207         remove_from_free_list(bh);
 208 }
 209 
 210 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         if (!bh || (bh == free_list))
 213                 return;
 214         remove_from_free_list(bh);
 215 /* add to front of free list */
 216         bh->b_next_free = free_list;
 217         bh->b_prev_free = free_list->b_prev_free;
 218         free_list->b_prev_free->b_next_free = bh;
 219         free_list->b_prev_free = bh;
 220         free_list = bh;
 221 }
 222 
 223 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 224 {
 225         if (!bh)
 226                 return;
 227         if (bh == free_list) {
 228                 free_list = bh->b_next_free;
 229                 return;
 230         }
 231         remove_from_free_list(bh);
 232 /* add to back of free list */
 233         bh->b_next_free = free_list;
 234         bh->b_prev_free = free_list->b_prev_free;
 235         free_list->b_prev_free->b_next_free = bh;
 236         free_list->b_prev_free = bh;
 237 }
 238 
 239 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 240 {
 241 /* put at end of free list */
 242         bh->b_next_free = free_list;
 243         bh->b_prev_free = free_list->b_prev_free;
 244         free_list->b_prev_free->b_next_free = bh;
 245         free_list->b_prev_free = bh;
 246 /* put the buffer in new hash-queue if it has a device */
 247         bh->b_prev = NULL;
 248         bh->b_next = NULL;
 249         if (!bh->b_dev)
 250                 return;
 251         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 252         hash(bh->b_dev,bh->b_blocknr) = bh;
 253         if (bh->b_next)
 254                 bh->b_next->b_prev = bh;
 255 }
 256 
 257 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 258 {               
 259         struct buffer_head * tmp;
 260 
 261         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 262                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 263                         if (tmp->b_size == size)
 264                                 return tmp;
 265                         else {
 266                                 printk("wrong block-size on device %04x\n",dev);
 267                                 return NULL;
 268                         }
 269         return NULL;
 270 }
 271 
 272 /*
 273  * Why like this, I hear you say... The reason is race-conditions.
 274  * As we don't lock buffers (unless we are readint them, that is),
 275  * something might happen to it while we sleep (ie a read-error
 276  * will force it bad). This shouldn't really happen currently, but
 277  * the code is ready.
 278  */
 279 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 280 {
 281         struct buffer_head * bh;
 282 
 283         for (;;) {
 284                 if (!(bh=find_buffer(dev,block,size)))
 285                         return NULL;
 286                 bh->b_count++;
 287                 wait_on_buffer(bh);
 288                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 289                         return bh;
 290                 bh->b_count--;
 291         }
 292 }
 293 
 294 /*
 295  * Ok, this is getblk, and it isn't very clear, again to hinder
 296  * race-conditions. Most of the code is seldom used, (ie repeating),
 297  * so it should be much more efficient than it looks.
 298  *
 299  * The algoritm is changed: hopefully better, and an elusive bug removed.
 300  *
 301  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 302  * when the filesystem starts to get full of dirty blocks (I hope).
 303  */
 304 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 305 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         struct buffer_head * bh, * tmp;
 308         int buffers;
 309         static int grow_size = 0;
 310 
 311 repeat:
 312         bh = get_hash_table(dev, block, size);
 313         if (bh) {
 314                 if (bh->b_uptodate && !bh->b_dirt)
 315                         put_last_free(bh);
 316                 return bh;
 317         }
 318         grow_size -= size;
 319         if (nr_free_pages > min_free_pages &&
 320             buffermem < 6*1024*1024 &&
 321             grow_size <= 0) {
 322                 grow_buffers(size);
 323                 grow_size = 4096;
 324         }
 325         buffers = nr_buffers;
 326         bh = NULL;
 327 
 328         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 329                 if (tmp->b_count || tmp->b_size != size)
 330                         continue;
 331                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 332                         bh = tmp;
 333                         if (!BADNESS(tmp))
 334                                 break;
 335                 }
 336 #if 0
 337                 if (tmp->b_dirt)
 338                         ll_rw_block(WRITEA, 1, &tmp);
 339 #endif
 340         }
 341 
 342         if (!bh && nr_free_pages > 5) {
 343                 grow_buffers(size);
 344                 goto repeat;
 345         }
 346         
 347 /* and repeat until we find something good */
 348         if (!bh) {
 349                 sleep_on(&buffer_wait);
 350                 goto repeat;
 351         }
 352         wait_on_buffer(bh);
 353         if (bh->b_count || bh->b_size != size)
 354                 goto repeat;
 355         if (bh->b_dirt) {
 356                 sync_buffers(bh->b_dev);
 357                 goto repeat;
 358         }
 359 /* NOTE!! While we slept waiting for this block, somebody else might */
 360 /* already have added "this" block to the cache. check it */
 361         if (find_buffer(dev,block,size))
 362                 goto repeat;
 363 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 364 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 365         bh->b_count=1;
 366         bh->b_dirt=0;
 367         bh->b_uptodate=0;
 368         remove_from_queues(bh);
 369         bh->b_dev=dev;
 370         bh->b_blocknr=block;
 371         insert_into_queues(bh);
 372         return bh;
 373 }
 374 
 375 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 376 {
 377         if (!buf)
 378                 return;
 379         wait_on_buffer(buf);
 380         if (buf->b_count) {
 381                 if (--buf->b_count)
 382                         return;
 383                 wake_up(&buffer_wait);
 384                 return;
 385         }
 386         printk("Trying to free free buffer\n");
 387 }
 388 
 389 /*
 390  * bread() reads a specified block and returns the buffer that contains
 391  * it. It returns NULL if the block was unreadable.
 392  */
 393 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         struct buffer_head * bh;
 396 
 397         if (!(bh = getblk(dev, block, size))) {
 398                 printk("bread: getblk returned NULL\n");
 399                 return NULL;
 400         }
 401         if (bh->b_uptodate)
 402                 return bh;
 403         ll_rw_block(READ, 1, &bh);
 404         wait_on_buffer(bh);
 405         if (bh->b_uptodate)
 406                 return bh;
 407         brelse(bh);
 408         return NULL;
 409 }
 410 
 411 #define COPYBLK(from,to) \
 412 __asm__("cld\n\t" \
 413         "rep\n\t" \
 414         "movsl\n\t" \
 415         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 416         :"cx","di","si")
 417 
 418 /*
 419  * bread_page reads four buffers into memory at the desired address. It's
 420  * a function of its own, as there is some speed to be got by reading them
 421  * all at the same time, not waiting for one to be read, and then another
 422  * etc.
 423  */
 424 void bread_page(unsigned long address, dev_t dev, int b[4])
     /* [previous][next][first][last][top][bottom][index][help] */
 425 {
 426         struct buffer_head * bh[4];
 427         struct buffer_head * bhr[4];
 428         int bhnum = 0;
 429         int i;
 430 
 431         for (i=0 ; i<4 ; i++)
 432                 if (b[i]) {
 433                         bh[i] = getblk(dev, b[i], 1024);
 434                         if (bh[i] && !bh[i]->b_uptodate)
 435                                 bhr[bhnum++] = bh[i];
 436                 } else
 437                         bh[i] = NULL;
 438 
 439         if (bhnum)
 440                 ll_rw_block(READ, bhnum, bhr);
 441 
 442         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
 443                 if (bh[i]) {
 444                         wait_on_buffer(bh[i]);
 445                         if (bh[i]->b_uptodate)
 446                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 447                         brelse(bh[i]);
 448                 }
 449 }
 450 
 451 /*
 452  * Ok, breada can be used as bread, but additionally to mark other
 453  * blocks for reading as well. End the argument list with a negative
 454  * number.
 455  */
 456 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 457 {
 458         va_list args;
 459         struct buffer_head * bh, *tmp;
 460 
 461         va_start(args,first);
 462         if (!(bh = getblk(dev, first, 1024))) {
 463                 printk("breada: getblk returned NULL\n");
 464                 return NULL;
 465         }
 466         if (!bh->b_uptodate)
 467                 ll_rw_block(READ, 1, &bh);
 468         while ((first=va_arg(args,int))>=0) {
 469                 tmp = getblk(dev, first, 1024);
 470                 if (tmp) {
 471                         if (!tmp->b_uptodate)
 472                                 ll_rw_block(READA, 1, &tmp);
 473                         tmp->b_count--;
 474                 }
 475         }
 476         va_end(args);
 477         wait_on_buffer(bh);
 478         if (bh->b_uptodate)
 479                 return bh;
 480         brelse(bh);
 481         return (NULL);
 482 }
 483 
 484 /*
 485  * See fs/inode.c for the weird use of volatile..
 486  */
 487 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 488 {
 489         struct wait_queue * wait;
 490 
 491         wait = ((volatile struct buffer_head *) bh)->b_wait;
 492         memset((void *) bh,0,sizeof(*bh));
 493         ((volatile struct buffer_head *) bh)->b_wait = wait;
 494         bh->b_next_free = unused_list;
 495         unused_list = bh;
 496 }
 497 
 498 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 499 {
 500         unsigned long page;
 501         struct buffer_head * bh;
 502 
 503         if (unused_list)
 504                 return;
 505         page = get_free_page(GFP_KERNEL);
 506         if (!page)
 507                 return;
 508         bh = (struct buffer_head *) page;
 509         while ((unsigned long) (bh+1) <= page+4096) {
 510                 put_unused_buffer_head(bh);
 511                 bh++;
 512                 nr_buffer_heads++;
 513         }
 514 }
 515 
 516 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 517 {
 518         struct buffer_head * bh;
 519 
 520         get_more_buffer_heads();
 521         if (!unused_list)
 522                 return NULL;
 523         bh = unused_list;
 524         unused_list = bh->b_next_free;
 525         bh->b_next_free = NULL;
 526         bh->b_data = NULL;
 527         bh->b_size = 0;
 528         return bh;
 529 }
 530 
 531 /*
 532  * Try to increase the number of buffers available: the size argument
 533  * is used to determine what kind of buffers we want. Currently only
 534  * 1024-byte buffers are supported by the rest of the system, but I
 535  * think this will change eventually.
 536  */
 537 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 538 {
 539         unsigned long page;
 540         int i;
 541         struct buffer_head *bh, *tmp;
 542 
 543         if ((size & 511) || (size > 4096)) {
 544                 printk("grow_buffers: size = %d\n",size);
 545                 return;
 546         }
 547         page = get_free_page(GFP_BUFFER);
 548         if (!page)
 549                 return;
 550         tmp = NULL;
 551         i = 0;
 552         for (i = 0 ; i+size <= 4096 ; i += size) {
 553                 bh = get_unused_buffer_head();
 554                 if (!bh)
 555                         goto no_grow;
 556                 bh->b_this_page = tmp;
 557                 tmp = bh;
 558                 bh->b_data = (char * ) (page+i);
 559                 bh->b_size = size;
 560         }
 561         tmp = bh;
 562         while (1) {
 563                 if (free_list) {
 564                         tmp->b_next_free = free_list;
 565                         tmp->b_prev_free = free_list->b_prev_free;
 566                         free_list->b_prev_free->b_next_free = tmp;
 567                         free_list->b_prev_free = tmp;
 568                 } else {
 569                         tmp->b_prev_free = tmp;
 570                         tmp->b_next_free = tmp;
 571                 }
 572                 free_list = tmp;
 573                 ++nr_buffers;
 574                 if (tmp->b_this_page)
 575                         tmp = tmp->b_this_page;
 576                 else
 577                         break;
 578         }
 579         tmp->b_this_page = bh;
 580         buffermem += 4096;
 581         return;
 582 /*
 583  * In case anything failed, we just free everything we got.
 584  */
 585 no_grow:
 586         bh = tmp;
 587         while (bh) {
 588                 tmp = bh;
 589                 bh = bh->b_this_page;
 590                 put_unused_buffer_head(tmp);
 591         }       
 592         free_page(page);
 593 }
 594 
 595 /*
 596  * try_to_free() checks if all the buffers on this particular page
 597  * are unused, and free's the page if so.
 598  */
 599 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 600 {
 601         unsigned long page;
 602         struct buffer_head * tmp, * p;
 603 
 604         tmp = bh;
 605         do {
 606                 if (!tmp)
 607                         return 0;
 608                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 609                         return 0;
 610                 tmp = tmp->b_this_page;
 611         } while (tmp != bh);
 612         page = (unsigned long) bh->b_data;
 613         page &= 0xfffff000;
 614         tmp = bh;
 615         do {
 616                 p = tmp;
 617                 tmp = tmp->b_this_page;
 618                 nr_buffers--;
 619                 remove_from_queues(p);
 620                 put_unused_buffer_head(p);
 621         } while (tmp != bh);
 622         buffermem -= 4096;
 623         free_page(page);
 624         return 1;
 625 }
 626 
 627 /*
 628  * Try to free up some pages by shrinking the buffer-cache
 629  *
 630  * Priority tells the routine how hard to try to shrink the
 631  * buffers: 3 means "don't bother too much", while a value
 632  * of 0 means "we'd better get some free pages now".
 633  */
 634 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 635 {
 636         struct buffer_head *bh;
 637         int i;
 638 
 639         if (priority < 2)
 640                 sync_buffers(0);
 641         bh = free_list;
 642         i = nr_buffers >> priority;
 643         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 644                 if (bh->b_count || !bh->b_this_page)
 645                         continue;
 646                 if (bh->b_lock)
 647                         if (priority)
 648                                 continue;
 649                         else
 650                                 wait_on_buffer(bh);
 651                 if (bh->b_dirt) {
 652                         ll_rw_block(WRITEA, 1, &bh);
 653                         continue;
 654                 }
 655                 if (try_to_free(bh))
 656                         return 1;
 657         }
 658         return 0;
 659 }
 660 
 661 /*
 662  * This initializes the initial buffer free list.  nr_buffers is set
 663  * to one less the actual number of buffers, as a sop to backwards
 664  * compatibility --- the old code did this (I think unintentionally,
 665  * but I'm not sure), and programs in the ps package expect it.
 666  *                                      - TYT 8/30/92
 667  */
 668 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 669 {
 670         int i;
 671 
 672         if (high_memory >= 4*1024*1024)
 673                 min_free_pages = 200;
 674         else
 675                 min_free_pages = 20;
 676         for (i = 0 ; i < NR_HASH ; i++)
 677                 hash_table[i] = NULL;
 678         free_list = 0;
 679         grow_buffers(BLOCK_SIZE);
 680         if (!free_list)
 681                 panic("Unable to initialize buffer free list!");
 682         return;
 683 }

/* [previous][next][first][last][top][bottom][index][help] */