This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/major.h>
23 #include <linux/string.h>
24 #include <linux/locks.h>
25 #include <linux/errno.h>
26 #include <linux/malloc.h>
27
28 #include <asm/system.h>
29 #include <asm/segment.h>
30 #include <asm/io.h>
31
32 #ifdef CONFIG_SCSI
33 #ifdef CONFIG_BLK_DEV_SR
34 extern int check_cdrom_media_change(int, int);
35 #endif
36 #ifdef CONFIG_BLK_DEV_SD
37 extern int check_scsidisk_media_change(int, int);
38 extern int revalidate_scsidisk(int, int);
39 #endif
40 #endif
41 #ifdef CONFIG_CDU31A
42 extern int check_cdu31a_media_change(int, int);
43 #endif
44 #ifdef CONFIG_MCD
45 extern int check_mcd_media_change(int, int);
46 #endif
47
48 #define NR_SIZES 4
49 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
50 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
51
52 #define BUFSIZE_INDEX(X) (buffersize_index[(X)>>9])
53
54 static int grow_buffers(int pri, int size);
55 static int shrink_specific_buffers(unsigned int priority, int size);
56 static int maybe_shrink_lav_buffers(int);
57
58 static int nr_hash = 0;
59 static struct buffer_head ** hash_table;
60 struct buffer_head ** buffer_pages;
61 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
62 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
63 static struct buffer_head * unused_list = NULL;
64 static struct wait_queue * buffer_wait = NULL;
65
66 int nr_buffers = 0;
67 int nr_buffers_type[NR_LIST] = {0,};
68 int nr_buffers_size[NR_SIZES] = {0,};
69 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
70 int buffer_usage[NR_SIZES] = {0,};
71 int buffers_lav[NR_SIZES] = {0,};
72 int nr_free[NR_SIZES] = {0,};
73 int buffermem = 0;
74 int nr_buffer_heads = 0;
75 static int min_free_pages = 20;
76 extern int *blksize_size[];
77
78
79 static void wakeup_bdflush(int);
80
81 #define N_PARAM 9
82 #define LAV
83
84 static union bdflush_param{
85 struct {
86 int nfract;
87
88 int ndirty;
89
90 int nrefill;
91
92 int nref_dirt;
93
94 int clu_nfract;
95
96 int age_buffer;
97
98 int age_super;
99
100 int lav_const;
101
102 int lav_ratio;
103
104
105 } b_un;
106 unsigned int data[N_PARAM];
107 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
108
109
110
111
112
113
114
115 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
116 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
117
118
119
120
121
122
123
124
125
126
127 void __wait_on_buffer(struct buffer_head * bh)
128 {
129 struct wait_queue wait = { current, NULL };
130
131 bh->b_count++;
132 add_wait_queue(&bh->b_wait, &wait);
133 repeat:
134 current->state = TASK_UNINTERRUPTIBLE;
135 if (bh->b_lock) {
136 schedule();
137 goto repeat;
138 }
139 remove_wait_queue(&bh->b_wait, &wait);
140 bh->b_count--;
141 current->state = TASK_RUNNING;
142 }
143
144
145
146
147
148
149
150
151
152
153
154 static int sync_buffers(dev_t dev, int wait)
155 {
156 int i, retry, pass = 0, err = 0;
157 int nlist, ncount;
158 struct buffer_head * bh, *next;
159
160
161
162
163
164 repeat:
165 retry = 0;
166 ncount = 0;
167
168
169 for(nlist = 0; nlist < NR_LIST; nlist++)
170 {
171 repeat1:
172 bh = lru_list[nlist];
173 if(!bh) continue;
174 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
175 if(bh->b_list != nlist) goto repeat1;
176 next = bh->b_next_free;
177 if(!lru_list[nlist]) break;
178 if (dev && bh->b_dev != dev)
179 continue;
180 if (bh->b_lock)
181 {
182
183
184 if (!wait || !pass) {
185 retry = 1;
186 continue;
187 }
188 wait_on_buffer (bh);
189 }
190
191
192 if (wait && bh->b_req && !bh->b_lock &&
193 !bh->b_dirt && !bh->b_uptodate) {
194 err = 1;
195 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
196 continue;
197 }
198
199
200 if (!bh->b_dirt || pass>=2)
201 continue;
202 bh->b_count++;
203 bh->b_flushtime = 0;
204 ll_rw_block(WRITE, 1, &bh);
205
206 if(nlist != BUF_DIRTY) {
207 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
208 ncount++;
209 };
210 bh->b_count--;
211 retry = 1;
212 }
213 }
214 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
215
216
217
218
219
220 if (wait && retry && ++pass<=2)
221 goto repeat;
222 return err;
223 }
224
225 void sync_dev(dev_t dev)
226 {
227 sync_buffers(dev, 0);
228 sync_supers(dev);
229 sync_inodes(dev);
230 sync_buffers(dev, 0);
231 }
232
233 int fsync_dev(dev_t dev)
234 {
235 sync_buffers(dev, 0);
236 sync_supers(dev);
237 sync_inodes(dev);
238 return sync_buffers(dev, 1);
239 }
240
241 asmlinkage int sys_sync(void)
242 {
243 sync_dev(0);
244 return 0;
245 }
246
247 int file_fsync (struct inode *inode, struct file *filp)
248 {
249 return fsync_dev(inode->i_dev);
250 }
251
252 asmlinkage int sys_fsync(unsigned int fd)
253 {
254 struct file * file;
255 struct inode * inode;
256
257 if (fd>=NR_OPEN || !(file=current->filp[fd]) || !(inode=file->f_inode))
258 return -EBADF;
259 if (!file->f_op || !file->f_op->fsync)
260 return -EINVAL;
261 if (file->f_op->fsync(inode,file))
262 return -EIO;
263 return 0;
264 }
265
266 void invalidate_buffers(dev_t dev)
267 {
268 int i;
269 int nlist;
270 struct buffer_head * bh;
271
272 for(nlist = 0; nlist < NR_LIST; nlist++) {
273 bh = lru_list[nlist];
274 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
275 bh = bh->b_next_free) {
276 if (bh->b_dev != dev)
277 continue;
278 wait_on_buffer(bh);
279 if (bh->b_dev == dev)
280 bh->b_flushtime = bh->b_uptodate =
281 bh->b_dirt = bh->b_req = 0;
282 }
283 }
284 }
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 void check_disk_change(dev_t dev)
301 {
302 int i;
303 struct buffer_head * bh;
304
305 switch(MAJOR(dev)){
306 case FLOPPY_MAJOR:
307 if (!(bh = getblk(dev,0,1024)))
308 return;
309 i = floppy_change(bh);
310 brelse(bh);
311 break;
312
313 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
314 case SCSI_DISK_MAJOR:
315 i = check_scsidisk_media_change(dev, 0);
316 break;
317 #endif
318
319 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
320 case SCSI_CDROM_MAJOR:
321 i = check_cdrom_media_change(dev, 0);
322 break;
323 #endif
324
325 #if defined(CONFIG_CDU31A)
326 case CDU31A_CDROM_MAJOR:
327 i = check_cdu31a_media_change(dev, 0);
328 break;
329 #endif
330
331 #if defined(CONFIG_MCD)
332 case MITSUMI_CDROM_MAJOR:
333 i = check_mcd_media_change(dev, 0);
334 break;
335 #endif
336
337 default:
338 return;
339 };
340
341 if (!i) return;
342
343 printk("VFS: Disk change detected on device %d/%d\n",
344 MAJOR(dev), MINOR(dev));
345 for (i=0 ; i<NR_SUPER ; i++)
346 if (super_blocks[i].s_dev == dev)
347 put_super(super_blocks[i].s_dev);
348 invalidate_inodes(dev);
349 invalidate_buffers(dev);
350
351 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
352
353
354 if (MAJOR(dev) == SCSI_DISK_MAJOR)
355 revalidate_scsidisk(dev, 0);
356 #endif
357 }
358
359 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
360 #define hash(dev,block) hash_table[_hashfn(dev,block)]
361
362 static inline void remove_from_hash_queue(struct buffer_head * bh)
363 {
364 if (bh->b_next)
365 bh->b_next->b_prev = bh->b_prev;
366 if (bh->b_prev)
367 bh->b_prev->b_next = bh->b_next;
368 if (hash(bh->b_dev,bh->b_blocknr) == bh)
369 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
370 bh->b_next = bh->b_prev = NULL;
371 }
372
373 static inline void remove_from_lru_list(struct buffer_head * bh)
374 {
375 if (!(bh->b_prev_free) || !(bh->b_next_free))
376 panic("VFS: LRU block list corrupted");
377 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
378 bh->b_prev_free->b_next_free = bh->b_next_free;
379 bh->b_next_free->b_prev_free = bh->b_prev_free;
380
381 if (lru_list[bh->b_list] == bh)
382 lru_list[bh->b_list] = bh->b_next_free;
383 if(lru_list[bh->b_list] == bh)
384 lru_list[bh->b_list] = NULL;
385 bh->b_next_free = bh->b_prev_free = NULL;
386 }
387
388 static inline void remove_from_free_list(struct buffer_head * bh)
389 {
390 int isize = BUFSIZE_INDEX(bh->b_size);
391 if (!(bh->b_prev_free) || !(bh->b_next_free))
392 panic("VFS: Free block list corrupted");
393 if(bh->b_dev != 0xffff) panic("Free list corrupted");
394 if(!free_list[isize])
395 panic("Free list empty");
396 nr_free[isize]--;
397 if(bh->b_next_free == bh)
398 free_list[isize] = NULL;
399 else {
400 bh->b_prev_free->b_next_free = bh->b_next_free;
401 bh->b_next_free->b_prev_free = bh->b_prev_free;
402 if (free_list[isize] == bh)
403 free_list[isize] = bh->b_next_free;
404 };
405 bh->b_next_free = bh->b_prev_free = NULL;
406 }
407
408 static inline void remove_from_queues(struct buffer_head * bh)
409 {
410 if(bh->b_dev == 0xffff) {
411 remove_from_free_list(bh);
412
413 return;
414 };
415 nr_buffers_type[bh->b_list]--;
416 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
417 remove_from_hash_queue(bh);
418 remove_from_lru_list(bh);
419 }
420
421 static inline void put_last_lru(struct buffer_head * bh)
422 {
423 if (!bh)
424 return;
425 if (bh == lru_list[bh->b_list]) {
426 lru_list[bh->b_list] = bh->b_next_free;
427 return;
428 }
429 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
430 remove_from_lru_list(bh);
431
432
433 if(!lru_list[bh->b_list]) {
434 lru_list[bh->b_list] = bh;
435 lru_list[bh->b_list]->b_prev_free = bh;
436 };
437
438 bh->b_next_free = lru_list[bh->b_list];
439 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
440 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
441 lru_list[bh->b_list]->b_prev_free = bh;
442 }
443
444 static inline void put_last_free(struct buffer_head * bh)
445 {
446 int isize;
447 if (!bh)
448 return;
449
450 isize = BUFSIZE_INDEX(bh->b_size);
451 bh->b_dev = 0xffff;
452
453
454 if(!free_list[isize]) {
455 free_list[isize] = bh;
456 bh->b_prev_free = bh;
457 };
458
459 nr_free[isize]++;
460 bh->b_next_free = free_list[isize];
461 bh->b_prev_free = free_list[isize]->b_prev_free;
462 free_list[isize]->b_prev_free->b_next_free = bh;
463 free_list[isize]->b_prev_free = bh;
464 }
465
466 static inline void insert_into_queues(struct buffer_head * bh)
467 {
468
469
470 if(bh->b_dev == 0xffff) {
471 put_last_free(bh);
472 return;
473 };
474 if(!lru_list[bh->b_list]) {
475 lru_list[bh->b_list] = bh;
476 bh->b_prev_free = bh;
477 };
478 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
479 bh->b_next_free = lru_list[bh->b_list];
480 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
481 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
482 lru_list[bh->b_list]->b_prev_free = bh;
483 nr_buffers_type[bh->b_list]++;
484 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
485
486 bh->b_prev = NULL;
487 bh->b_next = NULL;
488 if (!bh->b_dev)
489 return;
490 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
491 hash(bh->b_dev,bh->b_blocknr) = bh;
492 if (bh->b_next)
493 bh->b_next->b_prev = bh;
494 }
495
496 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
497 {
498 struct buffer_head * tmp;
499
500 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
501 if (tmp->b_dev==dev && tmp->b_blocknr==block)
502 if (tmp->b_size == size)
503 return tmp;
504 else {
505 printk("VFS: Wrong blocksize on device %d/%d\n",
506 MAJOR(dev), MINOR(dev));
507 return NULL;
508 }
509 return NULL;
510 }
511
512
513
514
515
516
517
518
519 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
520 {
521 struct buffer_head * bh;
522
523 for (;;) {
524 if (!(bh=find_buffer(dev,block,size)))
525 return NULL;
526 bh->b_count++;
527 wait_on_buffer(bh);
528 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
529 return bh;
530 bh->b_count--;
531 }
532 }
533
534 void set_blocksize(dev_t dev, int size)
535 {
536 int i, nlist;
537 struct buffer_head * bh, *bhnext;
538
539 if (!blksize_size[MAJOR(dev)])
540 return;
541
542 switch(size) {
543 default: panic("Invalid blocksize passed to set_blocksize");
544 case 512: case 1024: case 2048: case 4096:;
545 }
546
547 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
548 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
549 return;
550 }
551 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
552 return;
553 sync_buffers(dev, 2);
554 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
555
556
557
558
559 for(nlist = 0; nlist < NR_LIST; nlist++) {
560 bh = lru_list[nlist];
561 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
562 if(!bh) break;
563 bhnext = bh->b_next_free;
564 if (bh->b_dev != dev)
565 continue;
566 if (bh->b_size == size)
567 continue;
568
569 wait_on_buffer(bh);
570 if (bh->b_dev == dev && bh->b_size != size) {
571 bh->b_uptodate = bh->b_dirt =
572 bh->b_flushtime = 0;
573 };
574 remove_from_hash_queue(bh);
575 }
576 }
577 }
578
579 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
580
581 void refill_freelist(int size)
582 {
583 struct buffer_head * bh, * tmp;
584 struct buffer_head * candidate[NR_LIST];
585 unsigned int best_time, winner;
586 int isize = BUFSIZE_INDEX(size);
587 int buffers[NR_LIST];
588 int i;
589 int needed;
590
591
592
593
594
595 if (nr_free[isize] > 100)
596 return;
597
598
599
600
601
602
603 needed =bdf_prm.b_un.nrefill * size;
604
605 while (nr_free_pages > min_free_pages && needed > 0 &&
606 grow_buffers(GFP_BUFFER, size)) {
607 needed -= PAGE_SIZE;
608 }
609
610 if(needed <= 0) return;
611
612
613
614
615 while(maybe_shrink_lav_buffers(size))
616 {
617 if(!grow_buffers(GFP_BUFFER, size)) break;
618 needed -= PAGE_SIZE;
619 if(needed <= 0) return;
620 };
621
622
623
624
625
626
627
628 repeat0:
629 for(i=0; i<NR_LIST; i++){
630 if(i == BUF_DIRTY || i == BUF_SHARED ||
631 nr_buffers_type[i] == 0) {
632 candidate[i] = NULL;
633 buffers[i] = 0;
634 continue;
635 }
636 buffers[i] = nr_buffers_type[i];
637 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
638 {
639 if(buffers[i] < 0) panic("Here is the problem");
640 tmp = bh->b_next_free;
641 if (!bh) break;
642
643 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
644 bh->b_dirt) {
645 refile_buffer(bh);
646 continue;
647 };
648
649 if (bh->b_count || bh->b_size != size)
650 continue;
651
652
653
654
655
656 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
657 buffers[i] = 0;
658 break;
659 }
660
661 if (BADNESS(bh)) continue;
662 break;
663 };
664 if(!buffers[i]) candidate[i] = NULL;
665 else candidate[i] = bh;
666 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
667 }
668
669 repeat:
670 if(needed <= 0) return;
671
672
673
674 winner = best_time = UINT_MAX;
675 for(i=0; i<NR_LIST; i++){
676 if(!candidate[i]) continue;
677 if(candidate[i]->b_lru_time < best_time){
678 best_time = candidate[i]->b_lru_time;
679 winner = i;
680 }
681 }
682
683
684 if(winner != UINT_MAX) {
685 i = winner;
686 bh = candidate[i];
687 candidate[i] = bh->b_next_free;
688 if(candidate[i] == bh) candidate[i] = NULL;
689 if (bh->b_count || bh->b_size != size)
690 panic("Busy buffer in candidate list\n");
691 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
692 panic("Shared buffer in candidate list\n");
693 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
694
695 if(bh->b_dev == 0xffff) panic("Wrong list");
696 remove_from_queues(bh);
697 bh->b_dev = 0xffff;
698 put_last_free(bh);
699 needed -= bh->b_size;
700 buffers[i]--;
701 if(buffers[i] < 0) panic("Here is the problem");
702
703 if(buffers[i] == 0) candidate[i] = NULL;
704
705
706
707 if(candidate[i] && buffers[i] > 0){
708 if(buffers[i] <= 0) panic("Here is another problem");
709 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
710 if(buffers[i] < 0) panic("Here is the problem");
711 tmp = bh->b_next_free;
712 if (!bh) break;
713
714 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
715 bh->b_dirt) {
716 refile_buffer(bh);
717 continue;
718 };
719
720 if (bh->b_count || bh->b_size != size)
721 continue;
722
723
724
725
726
727 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
728 buffers[i] = 0;
729 break;
730 }
731
732 if (BADNESS(bh)) continue;
733 break;
734 };
735 if(!buffers[i]) candidate[i] = NULL;
736 else candidate[i] = bh;
737 if(candidate[i] && candidate[i]->b_count)
738 panic("Here is the problem");
739 }
740
741 goto repeat;
742 }
743
744 if(needed <= 0) return;
745
746
747
748 if (nr_free_pages > 5) {
749 if (grow_buffers(GFP_BUFFER, size)) {
750 needed -= PAGE_SIZE;
751 goto repeat0;
752 };
753 }
754
755
756 if (!grow_buffers(GFP_ATOMIC, size))
757 wakeup_bdflush(1);
758 needed -= PAGE_SIZE;
759 goto repeat0;
760 }
761
762
763
764
765
766
767
768
769
770
771
772 struct buffer_head * getblk(dev_t dev, int block, int size)
773 {
774 struct buffer_head * bh;
775 int isize = BUFSIZE_INDEX(size);
776
777
778 buffer_usage[isize]++;
779
780
781
782
783 repeat:
784 bh = get_hash_table(dev, block, size);
785 if (bh) {
786 if (bh->b_uptodate && !bh->b_dirt)
787 put_last_lru(bh);
788 if(!bh->b_dirt) bh->b_flushtime = 0;
789 return bh;
790 }
791
792 while(!free_list[isize]) refill_freelist(size);
793
794 if (find_buffer(dev,block,size))
795 goto repeat;
796
797 bh = free_list[isize];
798 remove_from_free_list(bh);
799
800
801
802 bh->b_count=1;
803 bh->b_dirt=0;
804 bh->b_lock=0;
805 bh->b_uptodate=0;
806 bh->b_flushtime = 0;
807 bh->b_req=0;
808 bh->b_dev=dev;
809 bh->b_blocknr=block;
810 insert_into_queues(bh);
811 return bh;
812 }
813
814 void set_writetime(struct buffer_head * buf, int flag)
815 {
816 int newtime;
817
818 if (buf->b_dirt){
819
820 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
821 bdf_prm.b_un.age_buffer);
822 if(!buf->b_flushtime || buf->b_flushtime > newtime)
823 buf->b_flushtime = newtime;
824 } else {
825 buf->b_flushtime = 0;
826 }
827 }
828
829
830 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
831 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
832
833 void refile_buffer(struct buffer_head * buf){
834 int i, dispose;
835 i = 0;
836 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
837 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
838 if(buf->b_lock) i |= 2;
839 if(buf->b_dirt) i |= 4;
840 dispose = buffer_disposition[i];
841 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
842 dispose = BUF_UNSHARED;
843 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
844 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
845 if(dispose != buf->b_list) {
846 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
847 buf->b_lru_time = jiffies;
848 if(dispose == BUF_LOCKED &&
849 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
850 dispose = BUF_LOCKED1;
851 remove_from_queues(buf);
852 buf->b_list = dispose;
853 insert_into_queues(buf);
854 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
855 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
856 bdf_prm.b_un.nfract/100)
857 wakeup_bdflush(0);
858 }
859 }
860
861 void brelse(struct buffer_head * buf)
862 {
863 if (!buf)
864 return;
865 wait_on_buffer(buf);
866
867
868 set_writetime(buf, 0);
869 refile_buffer(buf);
870
871 if (buf->b_count) {
872 if (--buf->b_count)
873 return;
874 wake_up(&buffer_wait);
875 return;
876 }
877 printk("VFS: brelse: Trying to free free buffer\n");
878 }
879
880
881
882
883
884 struct buffer_head * bread(dev_t dev, int block, int size)
885 {
886 struct buffer_head * bh;
887
888 if (!(bh = getblk(dev, block, size))) {
889 printk("VFS: bread: READ error on device %d/%d\n",
890 MAJOR(dev), MINOR(dev));
891 return NULL;
892 }
893 if (bh->b_uptodate)
894 return bh;
895 ll_rw_block(READ, 1, &bh);
896 wait_on_buffer(bh);
897 if (bh->b_uptodate)
898 return bh;
899 brelse(bh);
900 return NULL;
901 }
902
903
904
905
906
907
908
909 #define NBUF 16
910
911 struct buffer_head * breada(dev_t dev, int block, int bufsize,
912 unsigned int pos, unsigned int filesize)
913 {
914 struct buffer_head * bhlist[NBUF];
915 unsigned int blocks;
916 struct buffer_head * bh;
917 int index;
918 int i, j;
919
920 if (pos >= filesize)
921 return NULL;
922
923 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
924 return NULL;
925
926 index = BUFSIZE_INDEX(bh->b_size);
927
928 if (bh->b_uptodate)
929 return bh;
930
931 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
932
933 if (blocks > (read_ahead[MAJOR(dev)] >> index))
934 blocks = read_ahead[MAJOR(dev)] >> index;
935 if (blocks > NBUF)
936 blocks = NBUF;
937
938 bhlist[0] = bh;
939 j = 1;
940 for(i=1; i<blocks; i++) {
941 bh = getblk(dev,block+i,bufsize);
942 if (bh->b_uptodate) {
943 brelse(bh);
944 break;
945 }
946 bhlist[j++] = bh;
947 }
948
949
950 ll_rw_block(READ, j, bhlist);
951
952 for(i=1; i<j; i++)
953 brelse(bhlist[i]);
954
955
956 bh = bhlist[0];
957 wait_on_buffer(bh);
958 if (bh->b_uptodate)
959 return bh;
960 brelse(bh);
961 return NULL;
962 }
963
964
965
966
967 static void put_unused_buffer_head(struct buffer_head * bh)
968 {
969 struct wait_queue * wait;
970
971 wait = ((volatile struct buffer_head *) bh)->b_wait;
972 memset((void *) bh,0,sizeof(*bh));
973 ((volatile struct buffer_head *) bh)->b_wait = wait;
974 bh->b_next_free = unused_list;
975 unused_list = bh;
976 }
977
978 static void get_more_buffer_heads(void)
979 {
980 int i;
981 struct buffer_head * bh;
982
983 if (unused_list)
984 return;
985
986 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
987 return;
988
989 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
990 bh->b_next_free = unused_list;
991 unused_list = bh++;
992 }
993 }
994
995 static struct buffer_head * get_unused_buffer_head(void)
996 {
997 struct buffer_head * bh;
998
999 get_more_buffer_heads();
1000 if (!unused_list)
1001 return NULL;
1002 bh = unused_list;
1003 unused_list = bh->b_next_free;
1004 bh->b_next_free = NULL;
1005 bh->b_data = NULL;
1006 bh->b_size = 0;
1007 bh->b_req = 0;
1008 return bh;
1009 }
1010
1011
1012
1013
1014
1015
1016
1017 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
1018 {
1019 struct buffer_head *bh, *head;
1020 unsigned long offset;
1021
1022 head = NULL;
1023 offset = PAGE_SIZE;
1024 while ((offset -= size) < PAGE_SIZE) {
1025 bh = get_unused_buffer_head();
1026 if (!bh)
1027 goto no_grow;
1028 bh->b_this_page = head;
1029 head = bh;
1030 bh->b_data = (char *) (page+offset);
1031 bh->b_size = size;
1032 bh->b_dev = 0xffff;
1033 }
1034 return head;
1035
1036
1037
1038 no_grow:
1039 bh = head;
1040 while (bh) {
1041 head = bh;
1042 bh = bh->b_this_page;
1043 put_unused_buffer_head(head);
1044 }
1045 return NULL;
1046 }
1047
1048 static void read_buffers(struct buffer_head * bh[], int nrbuf)
1049 {
1050 int i;
1051 int bhnum = 0;
1052 struct buffer_head * bhr[8];
1053
1054 for (i = 0 ; i < nrbuf ; i++) {
1055 if (bh[i] && !bh[i]->b_uptodate)
1056 bhr[bhnum++] = bh[i];
1057 }
1058 if (bhnum)
1059 ll_rw_block(READ, bhnum, bhr);
1060 for (i = 0 ; i < nrbuf ; i++) {
1061 if (bh[i]) {
1062 wait_on_buffer(bh[i]);
1063 }
1064 }
1065 }
1066
1067 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1068 dev_t dev, int *b, int size)
1069 {
1070 struct buffer_head * bh[8];
1071 unsigned long page;
1072 unsigned long offset;
1073 int block;
1074 int nrbuf;
1075
1076 page = (unsigned long) first->b_data;
1077 if (page & ~PAGE_MASK) {
1078 brelse(first);
1079 return 0;
1080 }
1081 mem_map[MAP_NR(page)]++;
1082 bh[0] = first;
1083 nrbuf = 1;
1084 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1085 block = *++b;
1086 if (!block)
1087 goto no_go;
1088 first = get_hash_table(dev, block, size);
1089 if (!first)
1090 goto no_go;
1091 bh[nrbuf++] = first;
1092 if (page+offset != (unsigned long) first->b_data)
1093 goto no_go;
1094 }
1095 read_buffers(bh,nrbuf);
1096 while (nrbuf-- > 0)
1097 brelse(bh[nrbuf]);
1098 free_page(address);
1099 ++current->min_flt;
1100 return page;
1101 no_go:
1102 while (nrbuf-- > 0)
1103 brelse(bh[nrbuf]);
1104 free_page(page);
1105 return 0;
1106 }
1107
1108 static unsigned long try_to_load_aligned(unsigned long address,
1109 dev_t dev, int b[], int size)
1110 {
1111 struct buffer_head * bh, * tmp, * arr[8];
1112 unsigned long offset;
1113 int isize = BUFSIZE_INDEX(size);
1114 int * p;
1115 int block;
1116
1117 bh = create_buffers(address, size);
1118 if (!bh)
1119 return 0;
1120
1121 p = b;
1122 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1123 block = *(p++);
1124 if (!block)
1125 goto not_aligned;
1126 if (find_buffer(dev, block, size))
1127 goto not_aligned;
1128 }
1129 tmp = bh;
1130 p = b;
1131 block = 0;
1132 while (1) {
1133 arr[block++] = bh;
1134 bh->b_count = 1;
1135 bh->b_dirt = 0;
1136 bh->b_flushtime = 0;
1137 bh->b_uptodate = 0;
1138 bh->b_req = 0;
1139 bh->b_dev = dev;
1140 bh->b_blocknr = *(p++);
1141 bh->b_list = BUF_CLEAN;
1142 nr_buffers++;
1143 nr_buffers_size[isize]++;
1144 insert_into_queues(bh);
1145 if (bh->b_this_page)
1146 bh = bh->b_this_page;
1147 else
1148 break;
1149 }
1150 buffermem += PAGE_SIZE;
1151 bh->b_this_page = tmp;
1152 mem_map[MAP_NR(address)]++;
1153 buffer_pages[address >> PAGE_SHIFT] = bh;
1154 read_buffers(arr,block);
1155 while (block-- > 0)
1156 brelse(arr[block]);
1157 ++current->maj_flt;
1158 return address;
1159 not_aligned:
1160 while ((tmp = bh) != NULL) {
1161 bh = bh->b_this_page;
1162 put_unused_buffer_head(tmp);
1163 }
1164 return 0;
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 static inline unsigned long try_to_share_buffers(unsigned long address,
1179 dev_t dev, int *b, int size)
1180 {
1181 struct buffer_head * bh;
1182 int block;
1183
1184 block = b[0];
1185 if (!block)
1186 return 0;
1187 bh = get_hash_table(dev, block, size);
1188 if (bh)
1189 return check_aligned(bh, address, dev, b, size);
1190 return try_to_load_aligned(address, dev, b, size);
1191 }
1192
1193 #define COPYBLK(size,from,to) \
1194 __asm__ __volatile__("rep ; movsl": \
1195 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1196 :"cx","di","si")
1197
1198
1199
1200
1201
1202
1203
1204
1205 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
1206 {
1207 struct buffer_head * bh[8];
1208 unsigned long where;
1209 int i, j;
1210
1211 if (!(prot & PAGE_RW)) {
1212 where = try_to_share_buffers(address,dev,b,size);
1213 if (where)
1214 return where;
1215 }
1216 ++current->maj_flt;
1217 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1218 bh[i] = NULL;
1219 if (b[i])
1220 bh[i] = getblk(dev, b[i], size);
1221 }
1222 read_buffers(bh,i);
1223 where = address;
1224 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
1225 if (bh[i]) {
1226 if (bh[i]->b_uptodate)
1227 COPYBLK(size, (unsigned long) bh[i]->b_data,address);
1228 brelse(bh[i]);
1229 }
1230 }
1231 return where;
1232 }
1233
1234
1235
1236
1237
1238 static int grow_buffers(int pri, int size)
1239 {
1240 unsigned long page;
1241 struct buffer_head *bh, *tmp;
1242 struct buffer_head * insert_point;
1243 int isize;
1244
1245 if ((size & 511) || (size > PAGE_SIZE)) {
1246 printk("VFS: grow_buffers: size = %d\n",size);
1247 return 0;
1248 }
1249
1250 isize = BUFSIZE_INDEX(size);
1251
1252 if (!(page = __get_free_page(pri)))
1253 return 0;
1254 bh = create_buffers(page, size);
1255 if (!bh) {
1256 free_page(page);
1257 return 0;
1258 }
1259
1260 insert_point = free_list[isize];
1261
1262 tmp = bh;
1263 while (1) {
1264 nr_free[isize]++;
1265 if (insert_point) {
1266 tmp->b_next_free = insert_point->b_next_free;
1267 tmp->b_prev_free = insert_point;
1268 insert_point->b_next_free->b_prev_free = tmp;
1269 insert_point->b_next_free = tmp;
1270 } else {
1271 tmp->b_prev_free = tmp;
1272 tmp->b_next_free = tmp;
1273 }
1274 insert_point = tmp;
1275 ++nr_buffers;
1276 if (tmp->b_this_page)
1277 tmp = tmp->b_this_page;
1278 else
1279 break;
1280 }
1281 free_list[isize] = bh;
1282 buffer_pages[page >> PAGE_SHIFT] = bh;
1283 tmp->b_this_page = bh;
1284 wake_up(&buffer_wait);
1285 buffermem += PAGE_SIZE;
1286 return 1;
1287 }
1288
1289
1290
1291
1292
1293 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1294 {
1295 unsigned long page;
1296 struct buffer_head * tmp, * p;
1297 int isize = BUFSIZE_INDEX(bh->b_size);
1298
1299 *bhp = bh;
1300 page = (unsigned long) bh->b_data;
1301 page &= PAGE_MASK;
1302 tmp = bh;
1303 do {
1304 if (!tmp)
1305 return 0;
1306 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1307 return 0;
1308 tmp = tmp->b_this_page;
1309 } while (tmp != bh);
1310 tmp = bh;
1311 do {
1312 p = tmp;
1313 tmp = tmp->b_this_page;
1314 nr_buffers--;
1315 nr_buffers_size[isize]--;
1316 if (p == *bhp)
1317 {
1318 *bhp = p->b_prev_free;
1319 if (p == *bhp)
1320 *bhp = NULL;
1321 }
1322 remove_from_queues(p);
1323 put_unused_buffer_head(p);
1324 } while (tmp != bh);
1325 buffermem -= PAGE_SIZE;
1326 buffer_pages[page >> PAGE_SHIFT] = NULL;
1327 free_page(page);
1328 return !mem_map[MAP_NR(page)];
1329 }
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 static int maybe_shrink_lav_buffers(int size)
1345 {
1346 int nlist;
1347 int isize;
1348 int total_lav, total_n_buffers, n_sizes;
1349
1350
1351
1352
1353
1354
1355 total_lav = total_n_buffers = n_sizes = 0;
1356 for(nlist = 0; nlist < NR_SIZES; nlist++)
1357 {
1358 total_lav += buffers_lav[nlist];
1359 if(nr_buffers_size[nlist]) n_sizes++;
1360 total_n_buffers += nr_buffers_size[nlist];
1361 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1362 }
1363
1364
1365
1366
1367 isize = (size ? BUFSIZE_INDEX(size) : -1);
1368
1369 if (n_sizes > 1)
1370 for(nlist = 0; nlist < NR_SIZES; nlist++)
1371 {
1372 if(nlist == isize) continue;
1373 if(nr_buffers_size[nlist] &&
1374 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1375 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1376 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1377 return 1;
1378 }
1379 return 0;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388 int shrink_buffers(unsigned int priority)
1389 {
1390 if (priority < 2) {
1391 sync_buffers(0,0);
1392 }
1393
1394 if(priority == 2) wakeup_bdflush(1);
1395
1396 if(maybe_shrink_lav_buffers(0)) return 1;
1397
1398
1399 return shrink_specific_buffers(priority, 0);
1400 }
1401
1402 static int shrink_specific_buffers(unsigned int priority, int size)
1403 {
1404 struct buffer_head *bh;
1405 int nlist;
1406 int i, isize, isize1;
1407
1408 #ifdef DEBUG
1409 if(size) printk("Shrinking buffers of size %d\n", size);
1410 #endif
1411
1412
1413 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1414
1415 for(isize = 0; isize<NR_SIZES; isize++){
1416 if(isize1 != -1 && isize1 != isize) continue;
1417 bh = free_list[isize];
1418 if(!bh) continue;
1419 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1420 if (bh->b_count || !bh->b_this_page)
1421 continue;
1422 if (try_to_free(bh, &bh))
1423 return 1;
1424 if(!bh) break;
1425
1426 }
1427 }
1428
1429
1430
1431 for(nlist = 0; nlist < NR_LIST; nlist++) {
1432 repeat1:
1433 if(priority > 3 && nlist == BUF_SHARED) continue;
1434 bh = lru_list[nlist];
1435 if(!bh) continue;
1436 i = nr_buffers_type[nlist] >> priority;
1437 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1438
1439 if(bh->b_list != nlist) goto repeat1;
1440 if (bh->b_count || !bh->b_this_page)
1441 continue;
1442 if(size && bh->b_size != size) continue;
1443 if (bh->b_lock)
1444 if (priority)
1445 continue;
1446 else
1447 wait_on_buffer(bh);
1448 if (bh->b_dirt) {
1449 bh->b_count++;
1450 bh->b_flushtime = 0;
1451 ll_rw_block(WRITEA, 1, &bh);
1452 bh->b_count--;
1453 continue;
1454 }
1455 if (try_to_free(bh, &bh))
1456 return 1;
1457 if(!bh) break;
1458 }
1459 }
1460 return 0;
1461 }
1462
1463
1464 void show_buffers(void)
1465 {
1466 struct buffer_head * bh;
1467 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1468 int shared;
1469 int nlist, isize;
1470
1471 printk("Buffer memory: %6dkB\n",buffermem>>10);
1472 printk("Buffer heads: %6d\n",nr_buffer_heads);
1473 printk("Buffer blocks: %6d\n",nr_buffers);
1474
1475 for(nlist = 0; nlist < NR_LIST; nlist++) {
1476 shared = found = locked = dirty = used = lastused = 0;
1477 bh = lru_list[nlist];
1478 if(!bh) continue;
1479 do {
1480 found++;
1481 if (bh->b_lock)
1482 locked++;
1483 if (bh->b_dirt)
1484 dirty++;
1485 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1486 if (bh->b_count)
1487 used++, lastused = found;
1488 bh = bh->b_next_free;
1489 } while (bh != lru_list[nlist]);
1490 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1491 nlist, found, used, lastused, locked, dirty, shared);
1492 };
1493 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1494 for(isize = 0; isize<NR_SIZES; isize++){
1495 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1496 buffers_lav[isize], nr_free[isize]);
1497 for(nlist = 0; nlist < NR_LIST; nlist++)
1498 printk("%7d ", nr_buffers_st[isize][nlist]);
1499 printk("\n");
1500 }
1501 }
1502
1503
1504
1505
1506
1507 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1508 dev_t dev, unsigned int starting_block)
1509 {
1510 unsigned long page;
1511 struct buffer_head * tmp, * p;
1512
1513 *bhp = bh;
1514 page = (unsigned long) bh->b_data;
1515 page &= PAGE_MASK;
1516 if(mem_map[MAP_NR(page)] != 1) return 0;
1517 tmp = bh;
1518 do {
1519 if (!tmp)
1520 return 0;
1521
1522 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1523 return 0;
1524 tmp = tmp->b_this_page;
1525 } while (tmp != bh);
1526 tmp = bh;
1527
1528 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1529 tmp = tmp->b_this_page;
1530
1531
1532 bh = tmp;
1533 do {
1534 p = tmp;
1535 tmp = tmp->b_this_page;
1536 remove_from_queues(p);
1537 p->b_dev=dev;
1538 p->b_uptodate = 0;
1539 p->b_req = 0;
1540 p->b_blocknr=starting_block++;
1541 insert_into_queues(p);
1542 } while (tmp != bh);
1543 return 1;
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 static int reassign_cluster(dev_t dev,
1561 unsigned int starting_block, int size)
1562 {
1563 struct buffer_head *bh;
1564 int isize = BUFSIZE_INDEX(size);
1565 int i;
1566
1567
1568
1569
1570
1571 while(nr_free[isize] < 32) refill_freelist(size);
1572
1573 bh = free_list[isize];
1574 if(bh)
1575 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1576 if (!bh->b_this_page) continue;
1577 if (try_to_reassign(bh, &bh, dev, starting_block))
1578 return 4;
1579 }
1580 return 0;
1581 }
1582
1583
1584
1585
1586
1587 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1588 {
1589 struct buffer_head * bh, * tmp, * arr[8];
1590 int isize = BUFSIZE_INDEX(size);
1591 unsigned long offset;
1592 unsigned long page;
1593 int nblock;
1594
1595 page = get_free_page(GFP_NOBUFFER);
1596 if(!page) return 0;
1597
1598 bh = create_buffers(page, size);
1599 if (!bh) {
1600 free_page(page);
1601 return 0;
1602 };
1603 nblock = block;
1604 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1605 if (find_buffer(dev, nblock++, size))
1606 goto not_aligned;
1607 }
1608 tmp = bh;
1609 nblock = 0;
1610 while (1) {
1611 arr[nblock++] = bh;
1612 bh->b_count = 1;
1613 bh->b_dirt = 0;
1614 bh->b_flushtime = 0;
1615 bh->b_lock = 0;
1616 bh->b_uptodate = 0;
1617 bh->b_req = 0;
1618 bh->b_dev = dev;
1619 bh->b_list = BUF_CLEAN;
1620 bh->b_blocknr = block++;
1621 nr_buffers++;
1622 nr_buffers_size[isize]++;
1623 insert_into_queues(bh);
1624 if (bh->b_this_page)
1625 bh = bh->b_this_page;
1626 else
1627 break;
1628 }
1629 buffermem += PAGE_SIZE;
1630 buffer_pages[page >> PAGE_SHIFT] = bh;
1631 bh->b_this_page = tmp;
1632 while (nblock-- > 0)
1633 brelse(arr[nblock]);
1634 return 4;
1635 not_aligned:
1636 while ((tmp = bh) != NULL) {
1637 bh = bh->b_this_page;
1638 put_unused_buffer_head(tmp);
1639 }
1640 free_page(page);
1641 return 0;
1642 }
1643
1644 unsigned long generate_cluster(dev_t dev, int b[], int size)
1645 {
1646 int i, offset;
1647
1648 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1649 if(i && b[i]-1 != b[i-1]) return 0;
1650 if(find_buffer(dev, b[i], size)) return 0;
1651 };
1652
1653
1654
1655
1656
1657 if(maybe_shrink_lav_buffers(size))
1658 {
1659 int retval;
1660 retval = try_to_generate_cluster(dev, b[0], size);
1661 if(retval) return retval;
1662 };
1663
1664 if (nr_free_pages > min_free_pages)
1665 return try_to_generate_cluster(dev, b[0], size);
1666 else
1667 return reassign_cluster(dev, b[0], size);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677 void buffer_init(void)
1678 {
1679 int i;
1680 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1681
1682 if (high_memory >= 4*1024*1024) {
1683 min_free_pages = 200;
1684 if(high_memory >= 16*1024*1024)
1685 nr_hash = 16381;
1686 else
1687 nr_hash = 4093;
1688 } else {
1689 min_free_pages = 20;
1690 nr_hash = 997;
1691 };
1692
1693 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1694 sizeof(struct buffer_head *));
1695
1696
1697 buffer_pages = (struct buffer_head **) vmalloc((high_memory >>PAGE_SHIFT) *
1698 sizeof(struct buffer_head *));
1699 for (i = 0 ; i < high_memory >> PAGE_SHIFT ; i++)
1700 buffer_pages[i] = NULL;
1701
1702 for (i = 0 ; i < nr_hash ; i++)
1703 hash_table[i] = NULL;
1704 lru_list[BUF_CLEAN] = 0;
1705 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1706 if (!free_list[isize])
1707 panic("VFS: Unable to initialize buffer free list!");
1708 return;
1709 }
1710
1711
1712
1713
1714
1715
1716
1717
1718 struct wait_queue * bdflush_wait = NULL;
1719 struct wait_queue * bdflush_done = NULL;
1720
1721 static int bdflush_running = 0;
1722
1723 static void wakeup_bdflush(int wait)
1724 {
1725 if(!bdflush_running){
1726 printk("Warning - bdflush not running\n");
1727 sync_buffers(0,0);
1728 return;
1729 };
1730 wake_up(&bdflush_wait);
1731 if(wait) sleep_on(&bdflush_done);
1732 }
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744 asmlinkage int sync_old_buffers(void)
1745 {
1746 int i, isize;
1747 int ndirty, nwritten;
1748 int nlist;
1749 int ncount;
1750 struct buffer_head * bh, *next;
1751
1752 sync_supers(0);
1753 sync_inodes(0);
1754
1755 ncount = 0;
1756 #ifdef DEBUG
1757 for(nlist = 0; nlist < NR_LIST; nlist++)
1758 #else
1759 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1760 #endif
1761 {
1762 ndirty = 0;
1763 nwritten = 0;
1764 repeat:
1765 bh = lru_list[nlist];
1766 if(bh)
1767 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1768
1769 if(bh->b_list != nlist) goto repeat;
1770 next = bh->b_next_free;
1771 if(!lru_list[nlist]) {
1772 printk("Dirty list empty %d\n", i);
1773 break;
1774 }
1775
1776
1777 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1778 {
1779 refile_buffer(bh);
1780 continue;
1781 }
1782
1783 if (bh->b_lock || !bh->b_dirt)
1784 continue;
1785 ndirty++;
1786 if(bh->b_flushtime > jiffies) continue;
1787 nwritten++;
1788 bh->b_count++;
1789 bh->b_flushtime = 0;
1790 #ifdef DEBUG
1791 if(nlist != BUF_DIRTY) ncount++;
1792 #endif
1793 ll_rw_block(WRITE, 1, &bh);
1794 bh->b_count--;
1795 }
1796 }
1797 #ifdef DEBUG
1798 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1799 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1800 #endif
1801
1802
1803
1804
1805 for(isize = 0; isize<NR_SIZES; isize++){
1806 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1807 buffer_usage[isize] = 0;
1808 };
1809 return 0;
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819 asmlinkage int sys_bdflush(int func, int data)
1820 {
1821 int i, error;
1822 int ndirty;
1823 int nlist;
1824 int ncount;
1825 struct buffer_head * bh, *next;
1826
1827 if(!suser()) return -EPERM;
1828
1829 if(func == 1)
1830 return sync_old_buffers();
1831
1832
1833 if(func >= 2){
1834 i = (func-2) >> 1;
1835 if (i < 0 || i >= N_PARAM) return -EINVAL;
1836 if((func & 1) == 0) {
1837 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1838 if(error) return error;
1839 put_fs_long(bdf_prm.data[i], data);
1840 return 0;
1841 };
1842 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1843 bdf_prm.data[i] = data;
1844 return 0;
1845 };
1846
1847 if(bdflush_running++) return -EBUSY;
1848
1849
1850
1851 while(1==1){
1852 #ifdef DEBUG
1853 printk("bdflush() activated...");
1854 #endif
1855
1856 ncount = 0;
1857 #ifdef DEBUG
1858 for(nlist = 0; nlist < NR_LIST; nlist++)
1859 #else
1860 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1861 #endif
1862 {
1863 ndirty = 0;
1864 repeat:
1865 bh = lru_list[nlist];
1866 if(bh)
1867 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1868 bh = next) {
1869
1870 if(bh->b_list != nlist) goto repeat;
1871 next = bh->b_next_free;
1872 if(!lru_list[nlist]) {
1873 printk("Dirty list empty %d\n", i);
1874 break;
1875 }
1876
1877
1878 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1879 {
1880 refile_buffer(bh);
1881 continue;
1882 }
1883
1884 if (bh->b_lock || !bh->b_dirt)
1885 continue;
1886
1887
1888 bh->b_count++;
1889 ndirty++;
1890 bh->b_flushtime = 0;
1891 ll_rw_block(WRITE, 1, &bh);
1892 #ifdef DEBUG
1893 if(nlist != BUF_DIRTY) ncount++;
1894 #endif
1895 bh->b_count--;
1896 }
1897 }
1898 #ifdef DEBUG
1899 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1900 printk("sleeping again.\n");
1901 #endif
1902 wake_up(&bdflush_done);
1903
1904
1905
1906
1907 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1908 bdf_prm.b_un.nfract/100) {
1909 if (current->signal & (1 << (SIGKILL-1))) {
1910 bdflush_running--;
1911 return 0;
1912 }
1913 current->signal = 0;
1914 interruptible_sleep_on(&bdflush_wait);
1915 }
1916 }
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935