root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. invalidate_buffers
  6. check_disk_change
  7. remove_from_hash_queue
  8. remove_from_free_list
  9. remove_from_queues
  10. put_first_free
  11. put_last_free
  12. insert_into_queues
  13. find_buffer
  14. get_hash_table
  15. getblk
  16. brelse
  17. bread
  18. bread_page
  19. breada
  20. put_unused_buffer_head
  21. get_more_buffer_heads
  22. get_unused_buffer_head
  23. grow_buffers
  24. try_to_free
  25. shrink_buffers
  26. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/io.h>
  29 
  30 #ifdef CONFIG_SCSI
  31 #ifdef CONFIG_BLK_DEV_SR
  32 extern int check_cdrom_media_change(int, int);
  33 #endif
  34 #ifdef CONFIG_BLK_DEV_SD
  35 extern int check_scsidisk_media_change(int, int);
  36 extern int revalidate_scsidisk(int, int);
  37 #endif
  38 #endif
  39 
  40 static struct buffer_head * hash_table[NR_HASH];
  41 static struct buffer_head * free_list = NULL;
  42 static struct buffer_head * unused_list = NULL;
  43 static struct wait_queue * buffer_wait = NULL;
  44 
  45 int nr_buffers = 0;
  46 int buffermem = 0;
  47 int nr_buffer_heads = 0;
  48 
  49 /*
  50  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  51  * and getting rid of the cli-sti pairs. The wait-queue routines still
  52  * need cli-sti, but now it's just a couple of 386 instructions or so.
  53  *
  54  * Note that the real wait_on_buffer() is an inline function that checks
  55  * if 'b_wait' is set before calling this, so that the queues aren't set
  56  * up unnecessarily.
  57  */
  58 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         struct wait_queue wait = { current, NULL };
  61 
  62         add_wait_queue(&bh->b_wait, &wait);
  63 repeat:
  64         current->state = TASK_UNINTERRUPTIBLE;
  65         if (bh->b_lock) {
  66                 schedule();
  67                 goto repeat;
  68         }
  69         remove_wait_queue(&bh->b_wait, &wait);
  70         current->state = TASK_RUNNING;
  71 }
  72 
  73 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         int i;
  76         struct buffer_head * bh;
  77 
  78         bh = free_list;
  79         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  80                 if (bh->b_lock)
  81                         continue;
  82                 if (!bh->b_dirt)
  83                         continue;
  84                 ll_rw_block(WRITE, 1, &bh);
  85         }
  86 }
  87 
  88 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {
  90         sync_buffers(dev);
  91         sync_supers(dev);
  92         sync_inodes(dev);
  93         sync_buffers(dev);
  94 }
  95 
  96 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98         sync_dev(0);
  99         return 0;
 100 }
 101 
 102 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 103 {
 104         int i;
 105         struct buffer_head * bh;
 106 
 107         bh = free_list;
 108         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 109                 if (bh->b_dev != dev)
 110                         continue;
 111                 wait_on_buffer(bh);
 112                 if (bh->b_dev == dev)
 113                         bh->b_uptodate = bh->b_dirt = 0;
 114         }
 115 }
 116 
 117 /*
 118  * This routine checks whether a floppy has been changed, and
 119  * invalidates all buffer-cache-entries in that case. This
 120  * is a relatively slow routine, so we have to try to minimize using
 121  * it. Thus it is called only upon a 'mount' or 'open'. This
 122  * is the best way of combining speed and utility, I think.
 123  * People changing diskettes in the middle of an operation deserve
 124  * to loose :-)
 125  *
 126  * NOTE! Although currently this is only for floppies, the idea is
 127  * that any additional removable block-device will use this routine,
 128  * and that mount/open needn't know that floppies/whatever are
 129  * special.
 130  */
 131 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         int i;
 134         struct buffer_head * bh;
 135 
 136         switch(MAJOR(dev)){
 137         case 2: /* floppy disc */
 138                 if (!(bh = getblk(dev,0,1024)))
 139                         return;
 140                 i = floppy_change(bh);
 141                 brelse(bh);
 142                 break;
 143 
 144 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 145          case 8: /* Removable scsi disk */
 146                 i = check_scsidisk_media_change(dev, 0);
 147                 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
 148                 break;
 149 #endif
 150 
 151 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 152          case 11: /* CDROM */
 153                 i = check_cdrom_media_change(dev, 0);
 154                 if (i) printk("Flushing buffers and inodes for CDROM\n");
 155                 break;
 156 #endif
 157 
 158          default:
 159                 return;
 160         };
 161 
 162         if (!i) return;
 163 
 164         for (i=0 ; i<NR_SUPER ; i++)
 165                 if (super_block[i].s_dev == dev)
 166                         put_super(super_block[i].s_dev);
 167         invalidate_inodes(dev);
 168         invalidate_buffers(dev);
 169 
 170 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 171 /* This is trickier for a removable hardisk, because we have to invalidate
 172    all of the partitions that lie on the disk. */
 173         if (MAJOR(dev) == 8)
 174                 revalidate_scsidisk(dev, 0);
 175 #endif
 176 }
 177 
 178 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 179 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 180 
 181 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 182 {
 183         if (bh->b_next)
 184                 bh->b_next->b_prev = bh->b_prev;
 185         if (bh->b_prev)
 186                 bh->b_prev->b_next = bh->b_next;
 187         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 188                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 189         bh->b_next = bh->b_prev = NULL;
 190 }
 191 
 192 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 193 {
 194         if (!(bh->b_prev_free) || !(bh->b_next_free))
 195                 panic("Free block list corrupted");
 196         bh->b_prev_free->b_next_free = bh->b_next_free;
 197         bh->b_next_free->b_prev_free = bh->b_prev_free;
 198         if (free_list == bh)
 199                 free_list = bh->b_next_free;
 200         bh->b_next_free = bh->b_prev_free = NULL;
 201 }
 202 
 203 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 204 {
 205         remove_from_hash_queue(bh);
 206         remove_from_free_list(bh);
 207 }
 208 
 209 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         if (!bh || (bh == free_list))
 212                 return;
 213         remove_from_free_list(bh);
 214 /* add to front of free list */
 215         bh->b_next_free = free_list;
 216         bh->b_prev_free = free_list->b_prev_free;
 217         free_list->b_prev_free->b_next_free = bh;
 218         free_list->b_prev_free = bh;
 219         free_list = bh;
 220 }
 221 
 222 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         if (!bh)
 225                 return;
 226         if (bh == free_list) {
 227                 free_list = bh->b_next_free;
 228                 return;
 229         }
 230         remove_from_free_list(bh);
 231 /* add to back of free list */
 232         bh->b_next_free = free_list;
 233         bh->b_prev_free = free_list->b_prev_free;
 234         free_list->b_prev_free->b_next_free = bh;
 235         free_list->b_prev_free = bh;
 236 }
 237 
 238 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 239 {
 240 /* put at end of free list */
 241         bh->b_next_free = free_list;
 242         bh->b_prev_free = free_list->b_prev_free;
 243         free_list->b_prev_free->b_next_free = bh;
 244         free_list->b_prev_free = bh;
 245 /* put the buffer in new hash-queue if it has a device */
 246         bh->b_prev = NULL;
 247         bh->b_next = NULL;
 248         if (!bh->b_dev)
 249                 return;
 250         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 251         hash(bh->b_dev,bh->b_blocknr) = bh;
 252         if (bh->b_next)
 253                 bh->b_next->b_prev = bh;
 254 }
 255 
 256 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 257 {               
 258         struct buffer_head * tmp;
 259 
 260         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 261                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 262                         if (tmp->b_size == size)
 263                                 return tmp;
 264                         else {
 265                                 printk("wrong block-size on device %04x\n",dev);
 266                                 return NULL;
 267                         }
 268         return NULL;
 269 }
 270 
 271 /*
 272  * Why like this, I hear you say... The reason is race-conditions.
 273  * As we don't lock buffers (unless we are readint them, that is),
 274  * something might happen to it while we sleep (ie a read-error
 275  * will force it bad). This shouldn't really happen currently, but
 276  * the code is ready.
 277  */
 278 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         struct buffer_head * bh;
 281 
 282         for (;;) {
 283                 if (!(bh=find_buffer(dev,block,size)))
 284                         return NULL;
 285                 bh->b_count++;
 286                 wait_on_buffer(bh);
 287                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
 288                         return bh;
 289                 bh->b_count--;
 290         }
 291 }
 292 
 293 /*
 294  * Ok, this is getblk, and it isn't very clear, again to hinder
 295  * race-conditions. Most of the code is seldom used, (ie repeating),
 296  * so it should be much more efficient than it looks.
 297  *
 298  * The algoritm is changed: hopefully better, and an elusive bug removed.
 299  *
 300  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 301  * when the filesystem starts to get full of dirty blocks (I hope).
 302  */
 303 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 304 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 305 {
 306         struct buffer_head * bh, * tmp;
 307         int buffers;
 308 
 309 repeat:
 310         bh = get_hash_table(dev, block, size);
 311         if (bh) {
 312                 if (bh->b_uptodate && !bh->b_dirt)
 313                         put_last_free(bh);
 314                 return bh;
 315         }
 316 
 317         if (nr_free_pages > 30 && buffermem < 6*1024*1024)
 318                 grow_buffers(size);
 319 
 320         buffers = nr_buffers;
 321         bh = NULL;
 322 
 323         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 324                 if (tmp->b_count || tmp->b_size != size)
 325                         continue;
 326                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 327                         bh = tmp;
 328                         if (!BADNESS(tmp))
 329                                 break;
 330                 }
 331 #if 0
 332                 if (tmp->b_dirt)
 333                         ll_rw_block(WRITEA, 1, &tmp);
 334 #endif
 335         }
 336 
 337         if (!bh && nr_free_pages > 5) {
 338                 grow_buffers(size);
 339                 goto repeat;
 340         }
 341         
 342 /* and repeat until we find something good */
 343         if (!bh) {
 344                 sleep_on(&buffer_wait);
 345                 goto repeat;
 346         }
 347         wait_on_buffer(bh);
 348         if (bh->b_count || bh->b_size != size)
 349                 goto repeat;
 350         if (bh->b_dirt) {
 351                 sync_buffers(bh->b_dev);
 352                 goto repeat;
 353         }
 354 /* NOTE!! While we slept waiting for this block, somebody else might */
 355 /* already have added "this" block to the cache. check it */
 356         if (find_buffer(dev,block,size))
 357                 goto repeat;
 358 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 359 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 360         bh->b_count=1;
 361         bh->b_dirt=0;
 362         bh->b_uptodate=0;
 363         remove_from_queues(bh);
 364         bh->b_dev=dev;
 365         bh->b_blocknr=block;
 366         insert_into_queues(bh);
 367         return bh;
 368 }
 369 
 370 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 371 {
 372         if (!buf)
 373                 return;
 374         wait_on_buffer(buf);
 375         if (buf->b_count) {
 376                 if (--buf->b_count)
 377                         return;
 378                 wake_up(&buffer_wait);
 379                 return;
 380         }
 381         printk("Trying to free free buffer\n");
 382 }
 383 
 384 /*
 385  * bread() reads a specified block and returns the buffer that contains
 386  * it. It returns NULL if the block was unreadable.
 387  */
 388 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390         struct buffer_head * bh;
 391 
 392         if (!(bh = getblk(dev, block, size))) {
 393                 printk("bread: getblk returned NULL\n");
 394                 return NULL;
 395         }
 396         if (bh->b_uptodate)
 397                 return bh;
 398         ll_rw_block(READ, 1, &bh);
 399         wait_on_buffer(bh);
 400         if (bh->b_uptodate)
 401                 return bh;
 402         brelse(bh);
 403         return NULL;
 404 }
 405 
 406 #define COPYBLK(from,to) \
 407 __asm__("cld\n\t" \
 408         "rep\n\t" \
 409         "movsl\n\t" \
 410         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 411         :"cx","di","si")
 412 
 413 /*
 414  * bread_page reads four buffers into memory at the desired address. It's
 415  * a function of its own, as there is some speed to be got by reading them
 416  * all at the same time, not waiting for one to be read, and then another
 417  * etc.
 418  */
 419 void bread_page(unsigned long address, dev_t dev, int b[4])
     /* [previous][next][first][last][top][bottom][index][help] */
 420 {
 421         struct buffer_head * bh[4];
 422         struct buffer_head * bhr[4];
 423         int bhnum = 0;
 424         int i;
 425 
 426         for (i=0 ; i<4 ; i++)
 427                 if (b[i]) {
 428                         bh[i] = getblk(dev, b[i], 1024);
 429                         if (bh[i] && !bh[i]->b_uptodate)
 430                                 bhr[bhnum++] = bh[i];
 431                 } else
 432                         bh[i] = NULL;
 433 
 434         if(bhnum)
 435           ll_rw_block(READ, bhnum, bhr);
 436 
 437         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
 438                 if (bh[i]) {
 439                         wait_on_buffer(bh[i]);
 440                         if (bh[i]->b_uptodate)
 441                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 442                         brelse(bh[i]);
 443                 }
 444 }
 445 
 446 /*
 447  * Ok, breada can be used as bread, but additionally to mark other
 448  * blocks for reading as well. End the argument list with a negative
 449  * number.
 450  */
 451 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 452 {
 453         va_list args;
 454         struct buffer_head * bh, *tmp;
 455 
 456         va_start(args,first);
 457         if (!(bh = getblk(dev, first, 1024))) {
 458                 printk("breada: getblk returned NULL\n");
 459                 return NULL;
 460         }
 461         if (!bh->b_uptodate)
 462                 ll_rw_block(READ, 1, &bh);
 463         while ((first=va_arg(args,int))>=0) {
 464                 tmp = getblk(dev, first, 1024);
 465                 if (tmp) {
 466                         if (!tmp->b_uptodate)
 467                                 ll_rw_block(READA, 1, &tmp);
 468                         tmp->b_count--;
 469                 }
 470         }
 471         va_end(args);
 472         wait_on_buffer(bh);
 473         if (bh->b_uptodate)
 474                 return bh;
 475         brelse(bh);
 476         return (NULL);
 477 }
 478 
 479 /*
 480  * See fs/inode.c for the weird use of volatile..
 481  */
 482 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 483 {
 484         struct wait_queue * wait;
 485 
 486         wait = ((volatile struct buffer_head *) bh)->b_wait;
 487         memset((void *) bh,0,sizeof(*bh));
 488         ((volatile struct buffer_head *) bh)->b_wait = wait;
 489         bh->b_next_free = unused_list;
 490         unused_list = bh;
 491 }
 492 
 493 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 494 {
 495         unsigned long page;
 496         struct buffer_head * bh;
 497 
 498         if (unused_list)
 499                 return;
 500         page = get_free_page(GFP_KERNEL);
 501         if (!page)
 502                 return;
 503         bh = (struct buffer_head *) page;
 504         while ((unsigned long) (bh+1) <= page+4096) {
 505                 put_unused_buffer_head(bh);
 506                 bh++;
 507                 nr_buffer_heads++;
 508         }
 509 }
 510 
 511 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 512 {
 513         struct buffer_head * bh;
 514 
 515         get_more_buffer_heads();
 516         if (!unused_list)
 517                 return NULL;
 518         bh = unused_list;
 519         unused_list = bh->b_next_free;
 520         bh->b_next_free = NULL;
 521         bh->b_data = NULL;
 522         bh->b_size = 0;
 523         return bh;
 524 }
 525 
 526 /*
 527  * Try to increase the number of buffers available: the size argument
 528  * is used to determine what kind of buffers we want. Currently only
 529  * 1024-byte buffers are supported by the rest of the system, but I
 530  * think this will change eventually.
 531  */
 532 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 533 {
 534         unsigned long page;
 535         int i;
 536         struct buffer_head *bh, *tmp;
 537 
 538         if ((size & 511) || (size > 4096)) {
 539                 printk("grow_buffers: size = %d\n",size);
 540                 return;
 541         }
 542         page = get_free_page(GFP_BUFFER);
 543         if (!page)
 544                 return;
 545         tmp = NULL;
 546         i = 0;
 547         for (i = 0 ; i+size <= 4096 ; i += size) {
 548                 bh = get_unused_buffer_head();
 549                 if (!bh)
 550                         goto no_grow;
 551                 bh->b_this_page = tmp;
 552                 tmp = bh;
 553                 bh->b_data = (char * ) (page+i);
 554                 bh->b_size = size;
 555         }
 556         tmp = bh;
 557         while (1) {
 558                 if (free_list) {
 559                         tmp->b_next_free = free_list;
 560                         tmp->b_prev_free = free_list->b_prev_free;
 561                         free_list->b_prev_free->b_next_free = tmp;
 562                         free_list->b_prev_free = tmp;
 563                 } else {
 564                         tmp->b_prev_free = tmp;
 565                         tmp->b_next_free = tmp;
 566                 }
 567                 free_list = tmp;
 568                 ++nr_buffers;
 569                 if (tmp->b_this_page)
 570                         tmp = tmp->b_this_page;
 571                 else
 572                         break;
 573         }
 574         tmp->b_this_page = bh;
 575         buffermem += 4096;
 576         return;
 577 /*
 578  * In case anything failed, we just free everything we got.
 579  */
 580 no_grow:
 581         bh = tmp;
 582         while (bh) {
 583                 tmp = bh;
 584                 bh = bh->b_this_page;
 585                 put_unused_buffer_head(tmp);
 586         }       
 587         free_page(page);
 588 }
 589 
 590 /*
 591  * try_to_free() checks if all the buffers on this particular page
 592  * are unused, and free's the page if so.
 593  */
 594 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 595 {
 596         unsigned long page;
 597         struct buffer_head * tmp, * p;
 598 
 599         tmp = bh;
 600         do {
 601                 if (!tmp)
 602                         return 0;
 603                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 604                         return 0;
 605                 tmp = tmp->b_this_page;
 606         } while (tmp != bh);
 607         page = (unsigned long) bh->b_data;
 608         page &= 0xfffff000;
 609         tmp = bh;
 610         do {
 611                 p = tmp;
 612                 tmp = tmp->b_this_page;
 613                 nr_buffers--;
 614                 remove_from_queues(p);
 615                 put_unused_buffer_head(p);
 616         } while (tmp != bh);
 617         buffermem -= 4096;
 618         free_page(page);
 619         return 1;
 620 }
 621 
 622 /*
 623  * Try to free up some pages by shrinking the buffer-cache
 624  *
 625  * Priority tells the routine how hard to try to shrink the
 626  * buffers: 3 means "don't bother too much", while a value
 627  * of 0 means "we'd better get some free pages now".
 628  */
 629 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 630 {
 631         struct buffer_head *bh;
 632         int i;
 633 
 634         if (priority < 2)
 635                 sync_buffers(0);
 636         bh = free_list;
 637         i = nr_buffers >> priority;
 638         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 639                 if (bh->b_count || !bh->b_this_page)
 640                         continue;
 641                 if (bh->b_lock)
 642                         if (priority)
 643                                 continue;
 644                         else
 645                                 wait_on_buffer(bh);
 646                 if (bh->b_dirt) {
 647                         ll_rw_block(WRITEA, 1, &bh);
 648                         continue;
 649                 }
 650                 if (try_to_free(bh))
 651                         return 1;
 652         }
 653         return 0;
 654 }
 655 
 656 /*
 657  * This initializes the initial buffer free list.  nr_buffers is set
 658  * to one less the actual number of buffers, as a sop to backwards
 659  * compatibility --- the old code did this (I think unintentionally,
 660  * but I'm not sure), and programs in the ps package expect it.
 661  *                                      - TYT 8/30/92
 662  */
 663 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 664 {
 665         int i;
 666 
 667         for (i = 0 ; i < NR_HASH ; i++)
 668                 hash_table[i] = NULL;
 669         free_list = 0;
 670         grow_buffers(BLOCK_SIZE);
 671         if (!free_list)
 672                 panic("Unable to initialize buffer free list!");
 673         return;
 674 }

/* [previous][next][first][last][top][bottom][index][help] */