root/fs/buffer.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __wait_on_buffer
  2. sync_buffers
  3. sync_dev
  4. sys_sync
  5. invalidate_buffers
  6. check_disk_change
  7. remove_from_hash_queue
  8. remove_from_free_list
  9. remove_from_queues
  10. put_first_free
  11. put_last_free
  12. insert_into_queues
  13. find_buffer
  14. get_hash_table
  15. getblk
  16. brelse
  17. bread
  18. bread_page
  19. breada
  20. put_unused_buffer_head
  21. get_more_buffer_heads
  22. get_unused_buffer_head
  23. grow_buffers
  24. try_to_free
  25. shrink_buffers
  26. buffer_init

   1 /*
   2  *  linux/fs/buffer.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  *  'buffer.c' implements the buffer-cache functions. Race-conditions have
   9  * been avoided by NEVER letting a interrupt change a buffer (except for the
  10  * data, of course), but instead letting the caller do it.
  11  */
  12 
  13 /*
  14  * NOTE! There is one discordant note here: checking floppies for
  15  * disk change. This is where it fits best, I think, as it should
  16  * invalidate changed floppy-disk-caches.
  17  */
  18 
  19 #include <stdarg.h>
  20  
  21 #include <linux/config.h>
  22 #include <linux/sched.h>
  23 #include <linux/kernel.h>
  24 #include <linux/string.h>
  25 #include <linux/locks.h>
  26 
  27 #include <asm/system.h>
  28 #include <asm/io.h>
  29 
  30 #ifdef CONFIG_SCSI
  31 #ifdef CONFIG_BLK_DEV_SR
  32 extern int check_cdrom_media_change(int, int);
  33 #endif
  34 #ifdef CONFIG_BLK_DEV_SD
  35 extern int check_scsidisk_media_change(int, int);
  36 extern int revalidate_scsidisk(int, int);
  37 #endif
  38 #endif
  39 
  40 static struct buffer_head * hash_table[NR_HASH];
  41 static struct buffer_head * free_list = NULL;
  42 static struct buffer_head * unused_list = NULL;
  43 static struct wait_queue * buffer_wait = NULL;
  44 
  45 int nr_buffers = 0;
  46 int nr_buffer_heads = 0;
  47 
  48 /*
  49  * Rewrote the wait-routines to use the "new" wait-queue functionality,
  50  * and getting rid of the cli-sti pairs. The wait-queue routines still
  51  * need cli-sti, but now it's just a couple of 386 instructions or so.
  52  *
  53  * Note that the real wait_on_buffer() is an inline function that checks
  54  * if 'b_wait' is set before calling this, so that the queues aren't set
  55  * up unnecessarily.
  56  */
  57 void __wait_on_buffer(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
  58 {
  59         add_wait_queue(&bh->b_wait,&current->wait);
  60 repeat:
  61         current->state = TASK_UNINTERRUPTIBLE;
  62         if (bh->b_lock) {
  63                 schedule();
  64                 goto repeat;
  65         }
  66         remove_wait_queue(&bh->b_wait,&current->wait);
  67         current->state = TASK_RUNNING;
  68 }
  69 
  70 static void sync_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         int i;
  73         struct buffer_head * bh;
  74 
  75         bh = free_list;
  76         for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
  77                 if (bh->b_lock)
  78                         continue;
  79                 if (!bh->b_dirt)
  80                         continue;
  81                 ll_rw_block(WRITE,bh);
  82         }
  83 }
  84 
  85 void sync_dev(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87         sync_buffers(dev);
  88         sync_supers(dev);
  89         sync_inodes(dev);
  90         sync_buffers(dev);
  91 }
  92 
  93 int sys_sync(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         sync_dev(0);
  96         return 0;
  97 }
  98 
  99 void invalidate_buffers(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 100 {
 101         int i;
 102         struct buffer_head * bh;
 103 
 104         bh = free_list;
 105         for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
 106                 if (bh->b_dev != dev)
 107                         continue;
 108                 wait_on_buffer(bh);
 109                 if (bh->b_dev == dev)
 110                         bh->b_uptodate = bh->b_dirt = 0;
 111         }
 112 }
 113 
 114 /*
 115  * This routine checks whether a floppy has been changed, and
 116  * invalidates all buffer-cache-entries in that case. This
 117  * is a relatively slow routine, so we have to try to minimize using
 118  * it. Thus it is called only upon a 'mount' or 'open'. This
 119  * is the best way of combining speed and utility, I think.
 120  * People changing diskettes in the middle of an operation deserve
 121  * to loose :-)
 122  *
 123  * NOTE! Although currently this is only for floppies, the idea is
 124  * that any additional removable block-device will use this routine,
 125  * and that mount/open needn't know that floppies/whatever are
 126  * special.
 127  */
 128 void check_disk_change(dev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         int i;
 131         struct buffer_head * bh;
 132 
 133         switch(MAJOR(dev)){
 134         case 2: /* floppy disc */
 135                 if (!(bh = getblk(dev,0,1024)))
 136                         return;
 137                 i = floppy_change(bh);
 138                 brelse(bh);
 139                 break;
 140 
 141 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 142          case 8: /* Removable scsi disk */
 143                 i = check_scsidisk_media_change(dev, 0);
 144                 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
 145                 break;
 146 #endif
 147 
 148 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
 149          case 11: /* CDROM */
 150                 i = check_cdrom_media_change(dev, 0);
 151                 if (i) printk("Flushing buffers and inodes for CDROM\n");
 152                 break;
 153 #endif
 154 
 155          default:
 156                 return;
 157         };
 158 
 159         if (!i) return;
 160 
 161         for (i=0 ; i<NR_SUPER ; i++)
 162                 if (super_block[i].s_dev == dev)
 163                         put_super(super_block[i].s_dev);
 164         invalidate_inodes(dev);
 165         invalidate_buffers(dev);
 166 
 167 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
 168 /* This is trickier for a removable hardisk, because we have to invalidate
 169    all of the partitions that lie on the disk. */
 170         if (MAJOR(dev) == 8)
 171                 revalidate_scsidisk(dev, 0);
 172 #endif
 173 }
 174 
 175 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
 176 #define hash(dev,block) hash_table[_hashfn(dev,block)]
 177 
 178 static inline void remove_from_hash_queue(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         if (bh->b_next)
 181                 bh->b_next->b_prev = bh->b_prev;
 182         if (bh->b_prev)
 183                 bh->b_prev->b_next = bh->b_next;
 184         if (hash(bh->b_dev,bh->b_blocknr) == bh)
 185                 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
 186         bh->b_next = bh->b_prev = NULL;
 187 }
 188 
 189 static inline void remove_from_free_list(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         if (!(bh->b_prev_free) || !(bh->b_next_free))
 192                 panic("Free block list corrupted");
 193         bh->b_prev_free->b_next_free = bh->b_next_free;
 194         bh->b_next_free->b_prev_free = bh->b_prev_free;
 195         if (free_list == bh)
 196                 free_list = bh->b_next_free;
 197         bh->b_next_free = bh->b_prev_free = NULL;
 198 }
 199 
 200 static inline void remove_from_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         remove_from_hash_queue(bh);
 203         remove_from_free_list(bh);
 204 }
 205 
 206 static inline void put_first_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         if (!bh || (bh == free_list))
 209                 return;
 210         remove_from_free_list(bh);
 211 /* add to front of free list */
 212         bh->b_next_free = free_list;
 213         bh->b_prev_free = free_list->b_prev_free;
 214         free_list->b_prev_free->b_next_free = bh;
 215         free_list->b_prev_free = bh;
 216         free_list = bh;
 217 }
 218 
 219 static inline void put_last_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 {
 221         if (!bh)
 222                 return;
 223         if (bh == free_list) {
 224                 free_list = bh->b_next_free;
 225                 return;
 226         }
 227         remove_from_free_list(bh);
 228 /* add to back of free list */
 229         bh->b_next_free = free_list;
 230         bh->b_prev_free = free_list->b_prev_free;
 231         free_list->b_prev_free->b_next_free = bh;
 232         free_list->b_prev_free = bh;
 233 }
 234 
 235 static inline void insert_into_queues(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237 /* put at end of free list */
 238         bh->b_next_free = free_list;
 239         bh->b_prev_free = free_list->b_prev_free;
 240         free_list->b_prev_free->b_next_free = bh;
 241         free_list->b_prev_free = bh;
 242 /* put the buffer in new hash-queue if it has a device */
 243         bh->b_prev = NULL;
 244         bh->b_next = NULL;
 245         if (!bh->b_dev)
 246                 return;
 247         bh->b_next = hash(bh->b_dev,bh->b_blocknr);
 248         hash(bh->b_dev,bh->b_blocknr) = bh;
 249         if (bh->b_next)
 250                 bh->b_next->b_prev = bh;
 251 }
 252 
 253 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {               
 255         struct buffer_head * tmp;
 256 
 257         for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
 258                 if (tmp->b_dev==dev && tmp->b_blocknr==block)
 259                         if (tmp->b_size == size)
 260                                 return tmp;
 261                         else {
 262                                 printk("wrong block-size on device %04x\n",dev);
 263                                 return NULL;
 264                         }
 265         return NULL;
 266 }
 267 
 268 /*
 269  * Why like this, I hear you say... The reason is race-conditions.
 270  * As we don't lock buffers (unless we are readint them, that is),
 271  * something might happen to it while we sleep (ie a read-error
 272  * will force it bad). This shouldn't really happen currently, but
 273  * the code is ready.
 274  */
 275 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277         struct buffer_head * bh;
 278 
 279         for (;;) {
 280                 if (!(bh=find_buffer(dev,block,size)))
 281                         return NULL;
 282                 bh->b_count++;
 283                 wait_on_buffer(bh);
 284                 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) {
 285                         put_last_free(bh);
 286                         return bh;
 287                 }
 288                 bh->b_count--;
 289         }
 290 }
 291 
 292 /*
 293  * Ok, this is getblk, and it isn't very clear, again to hinder
 294  * race-conditions. Most of the code is seldom used, (ie repeating),
 295  * so it should be much more efficient than it looks.
 296  *
 297  * The algoritm is changed: hopefully better, and an elusive bug removed.
 298  *
 299  * 14.02.92: changed it to sync dirty buffers a bit: better performance
 300  * when the filesystem starts to get full of dirty blocks (I hope).
 301  */
 302 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
 303 struct buffer_head * getblk(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         struct buffer_head * bh, * tmp;
 306         int buffers;
 307 
 308 repeat:
 309         if (bh = get_hash_table(dev, block, size))
 310                 return bh;
 311 
 312         if (nr_free_pages > 30)
 313                 grow_buffers(size);
 314 
 315         buffers = nr_buffers;
 316         bh = NULL;
 317 
 318         for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
 319                 if (tmp->b_count || tmp->b_size != size)
 320                         continue;
 321                 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
 322                         bh = tmp;
 323                         if (!BADNESS(tmp))
 324                                 break;
 325                 }
 326 #if 0
 327                 if (tmp->b_dirt)
 328                         ll_rw_block(WRITEA,tmp);
 329 #endif
 330         }
 331 
 332         if (!bh && nr_free_pages > 5) {
 333                 grow_buffers(size);
 334                 goto repeat;
 335         }
 336         
 337 /* and repeat until we find something good */
 338         if (!bh) {
 339                 sleep_on(&buffer_wait);
 340                 goto repeat;
 341         }
 342         wait_on_buffer(bh);
 343         if (bh->b_count || bh->b_size != size)
 344                 goto repeat;
 345         if (bh->b_dirt) {
 346                 sync_buffers(bh->b_dev);
 347                 goto repeat;
 348         }
 349 /* NOTE!! While we slept waiting for this block, somebody else might */
 350 /* already have added "this" block to the cache. check it */
 351         if (find_buffer(dev,block,size))
 352                 goto repeat;
 353 /* OK, FINALLY we know that this buffer is the only one of it's kind, */
 354 /* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
 355         bh->b_count=1;
 356         bh->b_dirt=0;
 357         bh->b_uptodate=0;
 358         remove_from_queues(bh);
 359         bh->b_dev=dev;
 360         bh->b_blocknr=block;
 361         insert_into_queues(bh);
 362         return bh;
 363 }
 364 
 365 void brelse(struct buffer_head * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         if (!buf)
 368                 return;
 369         wait_on_buffer(buf);
 370         if (!(buf->b_count--))
 371                 panic("Trying to free free buffer");
 372         wake_up(&buffer_wait);
 373 }
 374 
 375 /*
 376  * bread() reads a specified block and returns the buffer that contains
 377  * it. It returns NULL if the block was unreadable.
 378  */
 379 struct buffer_head * bread(dev_t dev, int block, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 380 {
 381         struct buffer_head * bh;
 382 
 383         if (!(bh = getblk(dev, block, size))) {
 384                 printk("bread: getblk returned NULL\n");
 385                 return NULL;
 386         }
 387         if (bh->b_uptodate)
 388                 return bh;
 389         ll_rw_block(READ,bh);
 390         wait_on_buffer(bh);
 391         if (bh->b_uptodate)
 392                 return bh;
 393         brelse(bh);
 394         return NULL;
 395 }
 396 
 397 #define COPYBLK(from,to) \
 398 __asm__("cld\n\t" \
 399         "rep\n\t" \
 400         "movsl\n\t" \
 401         ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
 402         :"cx","di","si")
 403 
 404 /*
 405  * bread_page reads four buffers into memory at the desired address. It's
 406  * a function of its own, as there is some speed to be got by reading them
 407  * all at the same time, not waiting for one to be read, and then another
 408  * etc.
 409  */
 410 void bread_page(unsigned long address, dev_t dev, int b[4])
     /* [previous][next][first][last][top][bottom][index][help] */
 411 {
 412         struct buffer_head * bh[4];
 413         int i;
 414 
 415         for (i=0 ; i<4 ; i++)
 416                 if (b[i]) {
 417                         if (bh[i] = getblk(dev, b[i], 1024))
 418                                 if (!bh[i]->b_uptodate)
 419                                         ll_rw_block(READ,bh[i]);
 420                 } else
 421                         bh[i] = NULL;
 422         for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
 423                 if (bh[i]) {
 424                         wait_on_buffer(bh[i]);
 425                         if (bh[i]->b_uptodate)
 426                                 COPYBLK((unsigned long) bh[i]->b_data,address);
 427                         brelse(bh[i]);
 428                 }
 429 }
 430 
 431 /*
 432  * Ok, breada can be used as bread, but additionally to mark other
 433  * blocks for reading as well. End the argument list with a negative
 434  * number.
 435  */
 436 struct buffer_head * breada(dev_t dev,int first, ...)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438         va_list args;
 439         struct buffer_head * bh, *tmp;
 440 
 441         va_start(args,first);
 442         if (!(bh = getblk(dev, first, 1024))) {
 443                 printk("breada: getblk returned NULL\n");
 444                 return NULL;
 445         }
 446         if (!bh->b_uptodate)
 447                 ll_rw_block(READ,bh);
 448         while ((first=va_arg(args,int))>=0) {
 449                 tmp = getblk(dev, first, 1024);
 450                 if (tmp) {
 451                         if (!tmp->b_uptodate)
 452                                 ll_rw_block(READA,tmp);
 453                         tmp->b_count--;
 454                 }
 455         }
 456         va_end(args);
 457         wait_on_buffer(bh);
 458         if (bh->b_uptodate)
 459                 return bh;
 460         brelse(bh);
 461         return (NULL);
 462 }
 463 
 464 static void put_unused_buffer_head(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 465 {
 466         memset((void *) bh,0,sizeof(*bh));
 467         bh->b_next_free = unused_list;
 468         unused_list = bh;
 469 }
 470 
 471 static void get_more_buffer_heads(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 472 {
 473         unsigned long page;
 474         struct buffer_head * bh;
 475 
 476         if (unused_list)
 477                 return;
 478         page = get_free_page(GFP_KERNEL);
 479         if (!page)
 480                 return;
 481         bh = (struct buffer_head *) page;
 482         while ((unsigned long) (bh+1) <= page+4096) {
 483                 put_unused_buffer_head(bh);
 484                 bh++;
 485                 nr_buffer_heads++;
 486         }
 487 }
 488 
 489 static struct buffer_head * get_unused_buffer_head(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 490 {
 491         struct buffer_head * bh;
 492 
 493         get_more_buffer_heads();
 494         if (!unused_list)
 495                 return NULL;
 496         bh = unused_list;
 497         unused_list = bh->b_next_free;
 498         bh->b_next_free = NULL;
 499         bh->b_data = NULL;
 500         bh->b_size = 0;
 501         return bh;
 502 }
 503 
 504 /*
 505  * Try to increase the number of buffers available: the size argument
 506  * is used to determine what kind of buffers we want. Currently only
 507  * 1024-byte buffers are supported by the rest of the system, but I
 508  * think this will change eventually.
 509  */
 510 void grow_buffers(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 511 {
 512         unsigned long page;
 513         int i;
 514         struct buffer_head *bh, *tmp;
 515 
 516         if ((size & 511) || (size > 4096)) {
 517                 printk("grow_buffers: size = %d\n",size);
 518                 return;
 519         }
 520         page = get_free_page(GFP_BUFFER);
 521         if (!page)
 522                 return;
 523         tmp = NULL;
 524         i = 0;
 525         for (i = 0 ; i+size <= 4096 ; i += size) {
 526                 bh = get_unused_buffer_head();
 527                 if (!bh)
 528                         goto no_grow;
 529                 bh->b_this_page = tmp;
 530                 tmp = bh;
 531                 bh->b_data = (char * ) (page+i);
 532                 bh->b_size = size;
 533         }
 534         tmp = bh;
 535         while (1) {
 536                 if (free_list) {
 537                         tmp->b_next_free = free_list;
 538                         tmp->b_prev_free = free_list->b_prev_free;
 539                         free_list->b_prev_free->b_next_free = tmp;
 540                         free_list->b_prev_free = tmp;
 541                 } else {
 542                         tmp->b_prev_free = tmp;
 543                         tmp->b_next_free = tmp;
 544                 }
 545                 free_list = tmp;
 546                 ++nr_buffers;
 547                 if (tmp->b_this_page)
 548                         tmp = tmp->b_this_page;
 549                 else
 550                         break;
 551         }
 552         tmp->b_this_page = bh;
 553         return;
 554 /*
 555  * In case anything failed, we just free everything we got.
 556  */
 557 no_grow:
 558         bh = tmp;
 559         while (bh) {
 560                 tmp = bh;
 561                 bh = bh->b_this_page;
 562                 put_unused_buffer_head(tmp);
 563         }       
 564         free_page(page);
 565 }
 566 
 567 /*
 568  * try_to_free() checks if all the buffers on this particular page
 569  * are unused, and free's the page if so.
 570  */
 571 static int try_to_free(struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 572 {
 573         unsigned long page;
 574         struct buffer_head * tmp, * p;
 575 
 576         tmp = bh;
 577         do {
 578                 if (!tmp)
 579                         return 0;
 580                 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
 581                         return 0;
 582                 tmp = tmp->b_this_page;
 583         } while (tmp != bh);
 584         page = (unsigned long) bh->b_data;
 585         page &= 0xfffff000;
 586         tmp = bh;
 587         do {
 588                 p = tmp;
 589                 tmp = tmp->b_this_page;
 590                 nr_buffers--;
 591                 remove_from_queues(p);
 592                 put_unused_buffer_head(p);
 593         } while (tmp != bh);
 594         free_page(page);
 595         return 1;
 596 }
 597 
 598 /*
 599  * Try to free up some pages by shrinking the buffer-cache
 600  *
 601  * Priority tells the routine how hard to try to shrink the
 602  * buffers: 3 means "don't bother too much", while a value
 603  * of 0 means "we'd better get some free pages now".
 604  */
 605 int shrink_buffers(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 606 {
 607         struct buffer_head *bh;
 608         int i;
 609 
 610         if (priority < 2)
 611                 sync_buffers(0);
 612         bh = free_list;
 613         i = nr_buffers >> priority;
 614         for ( ; i-- > 0 ; bh = bh->b_next_free) {
 615                 if (bh->b_count || !bh->b_this_page)
 616                         continue;
 617                 if (bh->b_lock)
 618                         if (priority)
 619                                 continue;
 620                         else
 621                                 wait_on_buffer(bh);
 622                 if (bh->b_dirt) {
 623                         ll_rw_block(WRITEA,bh);
 624                         continue;
 625                 }
 626                 if (try_to_free(bh))
 627                         return 1;
 628         }
 629         return 0;
 630 }
 631 
 632 /*
 633  * This initializes the initial buffer free list.  nr_buffers is set
 634  * to one less the actual number of buffers, as a sop to backwards
 635  * compatibility --- the old code did this (I think unintentionally,
 636  * but I'm not sure), and programs in the ps package expect it.
 637  *                                      - TYT 8/30/92
 638  */
 639 void buffer_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 640 {
 641         int i;
 642 
 643         for (i = 0 ; i < NR_HASH ; i++)
 644                 hash_table[i] = NULL;
 645         free_list = 0;
 646         grow_buffers(BLOCK_SIZE);
 647         if (!free_list)
 648                 panic("Unable to initialize buffer free list!");
 649         return;
 650 }

/* [previous][next][first][last][top][bottom][index][help] */