root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. invalidate_buffers
  6. check_disk_change
  7. remove_from_hash_queue
  8. remove_from_free_list
  9. remove_from_queues
  10. put_first_free
  11. put_last_free
  12. insert_into_queues
  13. find_buffer
  14. get_hash_table
  15. getblk
  16. brelse
  17. bread
  18. bread_page
  19. breada
  20. put_unused_buffer_head
  21. get_more_buffer_heads
  22. get_unused_buffer_head
  23. grow_buffers
  24. try_to_free
  25. shrink_buffers
  26. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/io.h>
  29 
  30 #ifdef CONFIG_SCSI
  31 #ifdef CONFIG_BLK_DEV_SR
  32 extern int check_cdrom_media_change(int, int);
  33 #endif
  34 #ifdef CONFIG_BLK_DEV_SD
  35 extern int check_scsidisk_media_change(int, int);
  36 extern int revalidate_scsidisk(int, int);
  37 #endif
  38 #endif
  39 
  40 static struct buffer_head * hash_table[NR_HASH];
  41 static struct buffer_head * free_list = NULL;
  42 static struct buffer_head * unused_list = NULL;
  43 static struct wait_queue * buffer_wait = NULL;
  44 
  45 int nr_buffers = 0;
  46 int nr_buffer_heads = 0;
  47 
  48 /*
  49  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  50  * and getting rid of the cli-sti pairs. The wait-queue routines still
  51  * need cli-sti, but now it's just a couple of 386 instructions or so.
  52  *
  53  * Note that the real wait_on_buffer() is an inline function that checks
  54  * if 'b_wait' is set before calling this, so that the queues aren't set
  55  * up unnecessarily.
  56  */
  57 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  58 {
  59         struct wait_queue wait = { current, NULL };
  60 
  61         add_wait_queue(&bh->b_wait, &wait);
  62 repeat:
  63         current->state = TASK_UNINTERRUPTIBLE;
  64         if (bh->b_lock) {
  65                 schedule();
  66                 goto repeat;
  67         }
  68         remove_wait_queue(&bh->b_wait, &wait);
  69         current->state = TASK_RUNNING;
  70 }
  71 
  72 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         int i;
  75         struct buffer_head * bh;
  76 
  77         bh = free_list;
  78         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  79                 if (bh->b_lock)
  80                         continue;
  81                 if (!bh->b_dirt)
  82                         continue;
  83                 ll_rw_block(WRITE, 1, &bh);
  84         }
  85 }
  86 
  87 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         sync_buffers(dev);
  90         sync_supers(dev);
  91         sync_inodes(dev);
  92         sync_buffers(dev);
  93 }
  94 
  95 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         sync_dev(0);
  98         return 0;
  99 }
 100 
 101 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 102 {
 103         int i;
 104         struct buffer_head * bh;
 105 
 106         bh = free_list;
 107         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 108                 if (bh->b_dev != dev)
 109                         continue;
 110                 wait_on_buffer(bh);
 111                 if (bh->b_dev == dev)
 112                         bh->b_uptodate = bh->b_dirt = 0;
 113         }
 114 }
 115 
 116 /*
 117  * This routine checks whether a floppy has been changed, and
 118  * invalidates all buffer-cache-entries in that case. This
 119  * is a relatively slow routine, so we have to try to minimize using
 120  * it. Thus it is called only upon a 'mount' or 'open'. This
 121  * is the best way of combining speed and utility, I think.
 122  * People changing diskettes in the middle of an operation deserve
 123  * to loose :-)
 124  *
 125  * NOTE! Although currently this is only for floppies, the idea is
 126  * that any additional removable block-device will use this routine,
 127  * and that mount/open needn't know that floppies/whatever are
 128  * special.
 129  */
 130 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 131 {
 132         int i;
 133         struct buffer_head * bh;
 134 
 135         switch(MAJOR(dev)){
 136         case 2: /* floppy disc */
 137                 if (!(bh = getblk(dev,0,1024)))
 138                         return;
 139                 i = floppy_change(bh);
 140                 brelse(bh);
 141                 break;
 142 
 143 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 144          case 8: /* Removable scsi disk */
 145                 i = check_scsidisk_media_change(dev, 0);
 146                 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
 147                 break;
 148 #endif
 149 
 150 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 151          case 11: /* CDROM */
 152                 i = check_cdrom_media_change(dev, 0);
 153                 if (i) printk("Flushing buffers and inodes for CDROM\n");
 154                 break;
 155 #endif
 156 
 157          default:
 158                 return;
 159         };
 160 
 161         if (!i) return;
 162 
 163         for (i=0 ; i<NR_SUPER ; i++)
 164                 if (super_block[i].s_dev == dev)
 165                         put_super(super_block[i].s_dev);
 166         invalidate_inodes(dev);
 167         invalidate_buffers(dev);
 168 
 169 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 170 /* This is trickier for a removable hardisk, because we have to invalidate
 171    all of the partitions that lie on the disk. */
 172         if (MAJOR(dev) == 8)
 173                 revalidate_scsidisk(dev, 0);
 174 #endif
 175 }
 176 
 177 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 178 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 179 
 180 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 181 {
 182         if (bh->b_next)
 183                 bh->b_next->b_prev = bh->b_prev;
 184         if (bh->b_prev)
 185                 bh->b_prev->b_next = bh->b_next;
 186         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 187                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 188         bh->b_next = bh->b_prev = NULL;
 189 }
 190 
 191 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193         if (!(bh->b_prev_free) || !(bh->b_next_free))
 194                 panic("Free block list corrupted");
 195         bh->b_prev_free->b_next_free = bh->b_next_free;
 196         bh->b_next_free->b_prev_free = bh->b_prev_free;
 197         if (free_list == bh)
 198                 free_list = bh->b_next_free;
 199         bh->b_next_free = bh->b_prev_free = NULL;
 200 }
 201 
 202 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 203 {
 204         remove_from_hash_queue(bh);
 205         remove_from_free_list(bh);
 206 }
 207 
 208 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         if (!bh || (bh == free_list))
 211                 return;
 212         remove_from_free_list(bh);
 213 /* add to front of free list */
 214         bh->b_next_free = free_list;
 215         bh->b_prev_free = free_list->b_prev_free;
 216         free_list->b_prev_free->b_next_free = bh;
 217         free_list->b_prev_free = bh;
 218         free_list = bh;
 219 }
 220 
 221 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         if (!bh)
 224                 return;
 225         if (bh == free_list) {
 226                 free_list = bh->b_next_free;
 227                 return;
 228         }
 229         remove_from_free_list(bh);
 230 /* add to back of free list */
 231         bh->b_next_free = free_list;
 232         bh->b_prev_free = free_list->b_prev_free;
 233         free_list->b_prev_free->b_next_free = bh;
 234         free_list->b_prev_free = bh;
 235 }
 236 
 237 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 238 {
 239 /* put at end of free list */
 240         bh->b_next_free = free_list;
 241         bh->b_prev_free = free_list->b_prev_free;
 242         free_list->b_prev_free->b_next_free = bh;
 243         free_list->b_prev_free = bh;
 244 /* put the buffer in new hash-queue if it has a device */
 245         bh->b_prev = NULL;
 246         bh->b_next = NULL;
 247         if (!bh->b_dev)
 248                 return;
 249         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 250         hash(bh->b_dev,bh->b_blocknr) = bh;
 251         if (bh->b_next)
 252                 bh->b_next->b_prev = bh;
 253 }
 254 
 255 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 256 {               
 257         struct buffer_head * tmp;
 258 
 259         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 260                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 261                         if (tmp->b_size == size)
 262                                 return tmp;
 263                         else {
 264                                 printk("wrong block-size on device %04x\n",dev);
 265                                 return NULL;
 266                         }
 267         return NULL;
 268 }
 269 
 270 /*
 271  * Why like this, I hear you say... The reason is race-conditions.
 272  * As we don't lock buffers (unless we are readint them, that is),
 273  * something might happen to it while we sleep (ie a read-error
 274  * will force it bad). This shouldn't really happen currently, but
 275  * the code is ready.
 276  */
 277 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         struct buffer_head * bh;
 280 
 281         for (;;) {
 282                 if (!(bh=find_buffer(dev,block,size)))
 283                         return NULL;
 284                 bh->b_count++;
 285                 wait_on_buffer(bh);
 286                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) {
 287                         put_last_free(bh);
 288                         return bh;
 289                 }
 290                 bh->b_count--;
 291         }
 292 }
 293 
 294 /*
 295  * Ok, this is getblk, and it isn't very clear, again to hinder
 296  * race-conditions. Most of the code is seldom used, (ie repeating),
 297  * so it should be much more efficient than it looks.
 298  *
 299  * The algoritm is changed: hopefully better, and an elusive bug removed.
 300  *
 301  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 302  * when the filesystem starts to get full of dirty blocks (I hope).
 303  */
 304 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 305 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         struct buffer_head * bh, * tmp;
 308         int buffers;
 309 
 310 repeat:
 311         if (bh = get_hash_table(dev, block, size))
 312                 return bh;
 313 
 314         if (nr_free_pages > 30)
 315                 grow_buffers(size);
 316 
 317         buffers = nr_buffers;
 318         bh = NULL;
 319 
 320         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 321                 if (tmp->b_count || tmp->b_size != size)
 322                         continue;
 323                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 324                         bh = tmp;
 325                         if (!BADNESS(tmp))
 326                                 break;
 327                 }
 328 #if 0
 329                 if (tmp->b_dirt)
 330                         ll_rw_block(WRITEA, 1, &tmp);
 331 #endif
 332         }
 333 
 334         if (!bh && nr_free_pages > 5) {
 335                 grow_buffers(size);
 336                 goto repeat;
 337         }
 338         
 339 /* and repeat until we find something good */
 340         if (!bh) {
 341                 sleep_on(&buffer_wait);
 342                 goto repeat;
 343         }
 344         wait_on_buffer(bh);
 345         if (bh->b_count || bh->b_size != size)
 346                 goto repeat;
 347         if (bh->b_dirt) {
 348                 sync_buffers(bh->b_dev);
 349                 goto repeat;
 350         }
 351 /* NOTE!! While we slept waiting for this block, somebody else might */
 352 /* already have added "this" block to the cache. check it */
 353         if (find_buffer(dev,block,size))
 354                 goto repeat;
 355 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 356 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 357         bh->b_count=1;
 358         bh->b_dirt=0;
 359         bh->b_uptodate=0;
 360         remove_from_queues(bh);
 361         bh->b_dev=dev;
 362         bh->b_blocknr=block;
 363         insert_into_queues(bh);
 364         return bh;
 365 }
 366 
 367 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         if (!buf)
 370                 return;
 371         wait_on_buffer(buf);
 372         if (!(buf->b_count--))
 373                 panic("Trying to free free buffer");
 374         wake_up(&buffer_wait);
 375 }
 376 
 377 /*
 378  * bread() reads a specified block and returns the buffer that contains
 379  * it. It returns NULL if the block was unreadable.
 380  */
 381 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383         struct buffer_head * bh;
 384 
 385         if (!(bh = getblk(dev, block, size))) {
 386                 printk("bread: getblk returned NULL\n");
 387                 return NULL;
 388         }
 389         if (bh->b_uptodate)
 390                 return bh;
 391         ll_rw_block(READ, 1, &bh);
 392         wait_on_buffer(bh);
 393         if (bh->b_uptodate)
 394                 return bh;
 395         brelse(bh);
 396         return NULL;
 397 }
 398 
 399 #define COPYBLK(from,to) \
 400 __asm__("cld\n\t" \
 401         "rep\n\t" \
 402         "movsl\n\t" \
 403         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 404         :"cx","di","si")
 405 
 406 /*
 407  * bread_page reads four buffers into memory at the desired address. It's
 408  * a function of its own, as there is some speed to be got by reading them
 409  * all at the same time, not waiting for one to be read, and then another
 410  * etc.
 411  */
 412 void bread_page(unsigned long address, dev_t dev, int b[4])
     /* [previous][next][first][last][top][bottom][index][help] */
 413 {
 414         struct buffer_head * bh[4];
 415         int i;
 416 
 417         for (i=0 ; i<4 ; i++)
 418                 if (b[i]) {
 419                         if (bh[i] = getblk(dev, b[i], 1024))
 420                                 if (!bh[i]->b_uptodate)
 421                                         ll_rw_block(READ, 1, &bh[i]);
 422                 } else
 423                         bh[i] = NULL;
 424         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
 425                 if (bh[i]) {
 426                         wait_on_buffer(bh[i]);
 427                         if (bh[i]->b_uptodate)
 428                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 429                         brelse(bh[i]);
 430                 }
 431 }
 432 
 433 /*
 434  * Ok, breada can be used as bread, but additionally to mark other
 435  * blocks for reading as well. End the argument list with a negative
 436  * number.
 437  */
 438 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 439 {
 440         va_list args;
 441         struct buffer_head * bh, *tmp;
 442 
 443         va_start(args,first);
 444         if (!(bh = getblk(dev, first, 1024))) {
 445                 printk("breada: getblk returned NULL\n");
 446                 return NULL;
 447         }
 448         if (!bh->b_uptodate)
 449                 ll_rw_block(READ, 1, &bh);
 450         while ((first=va_arg(args,int))>=0) {
 451                 tmp = getblk(dev, first, 1024);
 452                 if (tmp) {
 453                         if (!tmp->b_uptodate)
 454                                 ll_rw_block(READA, 1, &tmp);
 455                         tmp->b_count--;
 456                 }
 457         }
 458         va_end(args);
 459         wait_on_buffer(bh);
 460         if (bh->b_uptodate)
 461                 return bh;
 462         brelse(bh);
 463         return (NULL);
 464 }
 465 
 466 /*
 467  * See fs/inode.c for the weird use of volatile..
 468  */
 469 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 470 {
 471         struct wait_queue * wait;
 472 
 473         wait = ((volatile struct buffer_head *) bh)->b_wait;
 474         memset((void *) bh,0,sizeof(*bh));
 475         ((volatile struct buffer_head *) bh)->b_wait = wait;
 476         bh->b_next_free = unused_list;
 477         unused_list = bh;
 478 }
 479 
 480 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 481 {
 482         unsigned long page;
 483         struct buffer_head * bh;
 484 
 485         if (unused_list)
 486                 return;
 487         page = get_free_page(GFP_KERNEL);
 488         if (!page)
 489                 return;
 490         bh = (struct buffer_head *) page;
 491         while ((unsigned long) (bh+1) <= page+4096) {
 492                 put_unused_buffer_head(bh);
 493                 bh++;
 494                 nr_buffer_heads++;
 495         }
 496 }
 497 
 498 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 499 {
 500         struct buffer_head * bh;
 501 
 502         get_more_buffer_heads();
 503         if (!unused_list)
 504                 return NULL;
 505         bh = unused_list;
 506         unused_list = bh->b_next_free;
 507         bh->b_next_free = NULL;
 508         bh->b_data = NULL;
 509         bh->b_size = 0;
 510         return bh;
 511 }
 512 
 513 /*
 514  * Try to increase the number of buffers available: the size argument
 515  * is used to determine what kind of buffers we want. Currently only
 516  * 1024-byte buffers are supported by the rest of the system, but I
 517  * think this will change eventually.
 518  */
 519 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 520 {
 521         unsigned long page;
 522         int i;
 523         struct buffer_head *bh, *tmp;
 524 
 525         if ((size & 511) || (size > 4096)) {
 526                 printk("grow_buffers: size = %d\n",size);
 527                 return;
 528         }
 529         page = get_free_page(GFP_BUFFER);
 530         if (!page)
 531                 return;
 532         tmp = NULL;
 533         i = 0;
 534         for (i = 0 ; i+size <= 4096 ; i += size) {
 535                 bh = get_unused_buffer_head();
 536                 if (!bh)
 537                         goto no_grow;
 538                 bh->b_this_page = tmp;
 539                 tmp = bh;
 540                 bh->b_data = (char * ) (page+i);
 541                 bh->b_size = size;
 542         }
 543         tmp = bh;
 544         while (1) {
 545                 if (free_list) {
 546                         tmp->b_next_free = free_list;
 547                         tmp->b_prev_free = free_list->b_prev_free;
 548                         free_list->b_prev_free->b_next_free = tmp;
 549                         free_list->b_prev_free = tmp;
 550                 } else {
 551                         tmp->b_prev_free = tmp;
 552                         tmp->b_next_free = tmp;
 553                 }
 554                 free_list = tmp;
 555                 ++nr_buffers;
 556                 if (tmp->b_this_page)
 557                         tmp = tmp->b_this_page;
 558                 else
 559                         break;
 560         }
 561         tmp->b_this_page = bh;
 562         return;
 563 /*
 564  * In case anything failed, we just free everything we got.
 565  */
 566 no_grow:
 567         bh = tmp;
 568         while (bh) {
 569                 tmp = bh;
 570                 bh = bh->b_this_page;
 571                 put_unused_buffer_head(tmp);
 572         }       
 573         free_page(page);
 574 }
 575 
 576 /*
 577  * try_to_free() checks if all the buffers on this particular page
 578  * are unused, and free's the page if so.
 579  */
 580 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 581 {
 582         unsigned long page;
 583         struct buffer_head * tmp, * p;
 584 
 585         tmp = bh;
 586         do {
 587                 if (!tmp)
 588                         return 0;
 589                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 590                         return 0;
 591                 tmp = tmp->b_this_page;
 592         } while (tmp != bh);
 593         page = (unsigned long) bh->b_data;
 594         page &= 0xfffff000;
 595         tmp = bh;
 596         do {
 597                 p = tmp;
 598                 tmp = tmp->b_this_page;
 599                 nr_buffers--;
 600                 remove_from_queues(p);
 601                 put_unused_buffer_head(p);
 602         } while (tmp != bh);
 603         free_page(page);
 604         return 1;
 605 }
 606 
 607 /*
 608  * Try to free up some pages by shrinking the buffer-cache
 609  *
 610  * Priority tells the routine how hard to try to shrink the
 611  * buffers: 3 means "don't bother too much", while a value
 612  * of 0 means "we'd better get some free pages now".
 613  */
 614 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 615 {
 616         struct buffer_head *bh;
 617         int i;
 618 
 619         if (priority < 2)
 620                 sync_buffers(0);
 621         bh = free_list;
 622         i = nr_buffers >> priority;
 623         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 624                 if (bh->b_count || !bh->b_this_page)
 625                         continue;
 626                 if (bh->b_lock)
 627                         if (priority)
 628                                 continue;
 629                         else
 630                                 wait_on_buffer(bh);
 631                 if (bh->b_dirt) {
 632                         ll_rw_block(WRITEA, 1, &bh);
 633                         continue;
 634                 }
 635                 if (try_to_free(bh))
 636                         return 1;
 637         }
 638         return 0;
 639 }
 640 
 641 /*
 642  * This initializes the initial buffer free list.  nr_buffers is set
 643  * to one less the actual number of buffers, as a sop to backwards
 644  * compatibility --- the old code did this (I think unintentionally,
 645  * but I'm not sure), and programs in the ps package expect it.
 646  *                                      - TYT 8/30/92
 647  */
 648 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 649 {
 650         int i;
 651 
 652         for (i = 0 ; i < NR_HASH ; i++)
 653                 hash_table[i] = NULL;
 654         free_list = 0;
 655         grow_buffers(BLOCK_SIZE);
 656         if (!free_list)
 657                 panic("Unable to initialize buffer free list!");
 658         return;
 659 }

/* [previous][next][first][last][top][bottom][index][help] */