This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/major.h>
23 #include <linux/string.h>
24 #include <linux/locks.h>
25 #include <linux/errno.h>
26 #include <linux/malloc.h>
27
28 #include <asm/system.h>
29 #include <asm/segment.h>
30 #include <asm/io.h>
31
32 #ifdef CONFIG_SCSI
33 #ifdef CONFIG_BLK_DEV_SR
34 extern int check_cdrom_media_change(int, int);
35 #endif
36 #ifdef CONFIG_BLK_DEV_SD
37 extern int check_scsidisk_media_change(int, int);
38 extern int revalidate_scsidisk(int, int);
39 #endif
40 #endif
41 #ifdef CONFIG_CDU31A
42 extern int check_cdu31a_media_change(int, int);
43 #endif
44 #ifdef CONFIG_MCD
45 extern int check_mcd_media_change(int, int);
46 #endif
47 #ifdef CONFIG_SBPCD
48 extern int check_sbpcd_media_change(int, int);
49 #endif
50
51 #define NR_SIZES 4
52 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
53 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
54
55 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
56
57 static int grow_buffers(int pri, int size);
58 static int shrink_specific_buffers(unsigned int priority, int size);
59 static int maybe_shrink_lav_buffers(int);
60
61 static int nr_hash = 0;
62 static struct buffer_head ** hash_table;
63 struct buffer_head ** buffer_pages;
64 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
65 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
66 static struct buffer_head * unused_list = NULL;
67 static struct wait_queue * buffer_wait = NULL;
68
69 int nr_buffers = 0;
70 int nr_buffers_type[NR_LIST] = {0,};
71 int nr_buffers_size[NR_SIZES] = {0,};
72 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
73 int buffer_usage[NR_SIZES] = {0,};
74 int buffers_lav[NR_SIZES] = {0,};
75 int nr_free[NR_SIZES] = {0,};
76 int buffermem = 0;
77 int nr_buffer_heads = 0;
78 static int min_free_pages = 20;
79 extern int *blksize_size[];
80
81
82 static void wakeup_bdflush(int);
83
84 #define N_PARAM 9
85 #define LAV
86
87 static union bdflush_param{
88 struct {
89 int nfract;
90
91 int ndirty;
92
93 int nrefill;
94
95 int nref_dirt;
96
97 int clu_nfract;
98
99 int age_buffer;
100
101 int age_super;
102
103 int lav_const;
104
105 int lav_ratio;
106
107
108 } b_un;
109 unsigned int data[N_PARAM];
110 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
111
112
113
114
115
116
117
118 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
119 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
120
121
122
123
124
125
126
127
128
129
130 void __wait_on_buffer(struct buffer_head * bh)
131 {
132 struct wait_queue wait = { current, NULL };
133
134 bh->b_count++;
135 add_wait_queue(&bh->b_wait, &wait);
136 repeat:
137 current->state = TASK_UNINTERRUPTIBLE;
138 if (bh->b_lock) {
139 schedule();
140 goto repeat;
141 }
142 remove_wait_queue(&bh->b_wait, &wait);
143 bh->b_count--;
144 current->state = TASK_RUNNING;
145 }
146
147
148
149
150
151
152
153
154
155
156
157 static int sync_buffers(dev_t dev, int wait)
158 {
159 int i, retry, pass = 0, err = 0;
160 int nlist, ncount;
161 struct buffer_head * bh, *next;
162
163
164
165
166
167 repeat:
168 retry = 0;
169 ncount = 0;
170
171
172 for(nlist = 0; nlist < NR_LIST; nlist++)
173 {
174 repeat1:
175 bh = lru_list[nlist];
176 if(!bh) continue;
177 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
178 if(bh->b_list != nlist) goto repeat1;
179 next = bh->b_next_free;
180 if(!lru_list[nlist]) break;
181 if (dev && bh->b_dev != dev)
182 continue;
183 if (bh->b_lock)
184 {
185
186
187 if (!wait || !pass) {
188 retry = 1;
189 continue;
190 }
191 wait_on_buffer (bh);
192 }
193
194
195 if (wait && bh->b_req && !bh->b_lock &&
196 !bh->b_dirt && !bh->b_uptodate) {
197 err = 1;
198 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
199 continue;
200 }
201
202
203 if (!bh->b_dirt || pass>=2)
204 continue;
205 bh->b_count++;
206 bh->b_flushtime = 0;
207 ll_rw_block(WRITE, 1, &bh);
208
209 if(nlist != BUF_DIRTY) {
210 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
211 ncount++;
212 };
213 bh->b_count--;
214 retry = 1;
215 }
216 }
217 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
218
219
220
221
222
223 if (wait && retry && ++pass<=2)
224 goto repeat;
225 return err;
226 }
227
228 void sync_dev(dev_t dev)
229 {
230 sync_buffers(dev, 0);
231 sync_supers(dev);
232 sync_inodes(dev);
233 sync_buffers(dev, 0);
234 }
235
236 int fsync_dev(dev_t dev)
237 {
238 sync_buffers(dev, 0);
239 sync_supers(dev);
240 sync_inodes(dev);
241 return sync_buffers(dev, 1);
242 }
243
244 asmlinkage int sys_sync(void)
245 {
246 sync_dev(0);
247 return 0;
248 }
249
250 int file_fsync (struct inode *inode, struct file *filp)
251 {
252 return fsync_dev(inode->i_dev);
253 }
254
255 asmlinkage int sys_fsync(unsigned int fd)
256 {
257 struct file * file;
258 struct inode * inode;
259
260 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
261 return -EBADF;
262 if (!file->f_op || !file->f_op->fsync)
263 return -EINVAL;
264 if (file->f_op->fsync(inode,file))
265 return -EIO;
266 return 0;
267 }
268
269 void invalidate_buffers(dev_t dev)
270 {
271 int i;
272 int nlist;
273 struct buffer_head * bh;
274
275 for(nlist = 0; nlist < NR_LIST; nlist++) {
276 bh = lru_list[nlist];
277 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
278 bh = bh->b_next_free) {
279 if (bh->b_dev != dev)
280 continue;
281 wait_on_buffer(bh);
282 if (bh->b_dev == dev)
283 bh->b_flushtime = bh->b_uptodate =
284 bh->b_dirt = bh->b_req = 0;
285 }
286 }
287 }
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 void check_disk_change(dev_t dev)
304 {
305 int i;
306 struct buffer_head * bh;
307
308 switch(MAJOR(dev)){
309 case FLOPPY_MAJOR:
310 if (!(bh = getblk(dev,0,1024)))
311 return;
312 i = floppy_change(bh);
313 brelse(bh);
314 break;
315
316 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
317 case SCSI_DISK_MAJOR:
318 i = check_scsidisk_media_change(dev, 0);
319 break;
320 #endif
321
322 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
323 case SCSI_CDROM_MAJOR:
324 i = check_cdrom_media_change(dev, 0);
325 break;
326 #endif
327
328 #if defined(CONFIG_CDU31A)
329 case CDU31A_CDROM_MAJOR:
330 i = check_cdu31a_media_change(dev, 0);
331 break;
332 #endif
333
334 #if defined(CONFIG_MCD)
335 case MITSUMI_CDROM_MAJOR:
336 i = check_mcd_media_change(dev, 0);
337 break;
338 #endif
339
340 #if defined(CONFIG_SBPCD)
341 case MATSUSHITA_CDROM_MAJOR:
342 i = check_sbpcd_media_change(dev, 0);
343 break;
344 #endif
345
346 default:
347 return;
348 };
349
350 if (!i) return;
351
352 printk("VFS: Disk change detected on device %d/%d\n",
353 MAJOR(dev), MINOR(dev));
354 for (i=0 ; i<NR_SUPER ; i++)
355 if (super_blocks[i].s_dev == dev)
356 put_super(super_blocks[i].s_dev);
357 invalidate_inodes(dev);
358 invalidate_buffers(dev);
359
360 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
361
362
363 if (MAJOR(dev) == SCSI_DISK_MAJOR)
364 revalidate_scsidisk(dev, 0);
365 #endif
366 }
367
368 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
369 #define hash(dev,block) hash_table[_hashfn(dev,block)]
370
371 static inline void remove_from_hash_queue(struct buffer_head * bh)
372 {
373 if (bh->b_next)
374 bh->b_next->b_prev = bh->b_prev;
375 if (bh->b_prev)
376 bh->b_prev->b_next = bh->b_next;
377 if (hash(bh->b_dev,bh->b_blocknr) == bh)
378 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
379 bh->b_next = bh->b_prev = NULL;
380 }
381
382 static inline void remove_from_lru_list(struct buffer_head * bh)
383 {
384 if (!(bh->b_prev_free) || !(bh->b_next_free))
385 panic("VFS: LRU block list corrupted");
386 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
387 bh->b_prev_free->b_next_free = bh->b_next_free;
388 bh->b_next_free->b_prev_free = bh->b_prev_free;
389
390 if (lru_list[bh->b_list] == bh)
391 lru_list[bh->b_list] = bh->b_next_free;
392 if(lru_list[bh->b_list] == bh)
393 lru_list[bh->b_list] = NULL;
394 bh->b_next_free = bh->b_prev_free = NULL;
395 }
396
397 static inline void remove_from_free_list(struct buffer_head * bh)
398 {
399 int isize = BUFSIZE_INDEX(bh->b_size);
400 if (!(bh->b_prev_free) || !(bh->b_next_free))
401 panic("VFS: Free block list corrupted");
402 if(bh->b_dev != 0xffff) panic("Free list corrupted");
403 if(!free_list[isize])
404 panic("Free list empty");
405 nr_free[isize]--;
406 if(bh->b_next_free == bh)
407 free_list[isize] = NULL;
408 else {
409 bh->b_prev_free->b_next_free = bh->b_next_free;
410 bh->b_next_free->b_prev_free = bh->b_prev_free;
411 if (free_list[isize] == bh)
412 free_list[isize] = bh->b_next_free;
413 };
414 bh->b_next_free = bh->b_prev_free = NULL;
415 }
416
417 static inline void remove_from_queues(struct buffer_head * bh)
418 {
419 if(bh->b_dev == 0xffff) {
420 remove_from_free_list(bh);
421
422 return;
423 };
424 nr_buffers_type[bh->b_list]--;
425 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
426 remove_from_hash_queue(bh);
427 remove_from_lru_list(bh);
428 }
429
430 static inline void put_last_lru(struct buffer_head * bh)
431 {
432 if (!bh)
433 return;
434 if (bh == lru_list[bh->b_list]) {
435 lru_list[bh->b_list] = bh->b_next_free;
436 return;
437 }
438 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
439 remove_from_lru_list(bh);
440
441
442 if(!lru_list[bh->b_list]) {
443 lru_list[bh->b_list] = bh;
444 lru_list[bh->b_list]->b_prev_free = bh;
445 };
446
447 bh->b_next_free = lru_list[bh->b_list];
448 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
449 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
450 lru_list[bh->b_list]->b_prev_free = bh;
451 }
452
453 static inline void put_last_free(struct buffer_head * bh)
454 {
455 int isize;
456 if (!bh)
457 return;
458
459 isize = BUFSIZE_INDEX(bh->b_size);
460 bh->b_dev = 0xffff;
461
462
463 if(!free_list[isize]) {
464 free_list[isize] = bh;
465 bh->b_prev_free = bh;
466 };
467
468 nr_free[isize]++;
469 bh->b_next_free = free_list[isize];
470 bh->b_prev_free = free_list[isize]->b_prev_free;
471 free_list[isize]->b_prev_free->b_next_free = bh;
472 free_list[isize]->b_prev_free = bh;
473 }
474
475 static inline void insert_into_queues(struct buffer_head * bh)
476 {
477
478
479 if(bh->b_dev == 0xffff) {
480 put_last_free(bh);
481 return;
482 };
483 if(!lru_list[bh->b_list]) {
484 lru_list[bh->b_list] = bh;
485 bh->b_prev_free = bh;
486 };
487 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
488 bh->b_next_free = lru_list[bh->b_list];
489 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
490 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
491 lru_list[bh->b_list]->b_prev_free = bh;
492 nr_buffers_type[bh->b_list]++;
493 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
494
495 bh->b_prev = NULL;
496 bh->b_next = NULL;
497 if (!bh->b_dev)
498 return;
499 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
500 hash(bh->b_dev,bh->b_blocknr) = bh;
501 if (bh->b_next)
502 bh->b_next->b_prev = bh;
503 }
504
505 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
506 {
507 struct buffer_head * tmp;
508
509 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
510 if (tmp->b_dev==dev && tmp->b_blocknr==block)
511 if (tmp->b_size == size)
512 return tmp;
513 else {
514 printk("VFS: Wrong blocksize on device %d/%d\n",
515 MAJOR(dev), MINOR(dev));
516 return NULL;
517 }
518 return NULL;
519 }
520
521
522
523
524
525
526
527
528 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
529 {
530 struct buffer_head * bh;
531
532 for (;;) {
533 if (!(bh=find_buffer(dev,block,size)))
534 return NULL;
535 bh->b_count++;
536 wait_on_buffer(bh);
537 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
538 return bh;
539 bh->b_count--;
540 }
541 }
542
543 void set_blocksize(dev_t dev, int size)
544 {
545 int i, nlist;
546 struct buffer_head * bh, *bhnext;
547
548 if (!blksize_size[MAJOR(dev)])
549 return;
550
551 switch(size) {
552 default: panic("Invalid blocksize passed to set_blocksize");
553 case 512: case 1024: case 2048: case 4096:;
554 }
555
556 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
557 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
558 return;
559 }
560 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
561 return;
562 sync_buffers(dev, 2);
563 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
564
565
566
567
568 for(nlist = 0; nlist < NR_LIST; nlist++) {
569 bh = lru_list[nlist];
570 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
571 if(!bh) break;
572 bhnext = bh->b_next_free;
573 if (bh->b_dev != dev)
574 continue;
575 if (bh->b_size == size)
576 continue;
577
578 wait_on_buffer(bh);
579 if (bh->b_dev == dev && bh->b_size != size) {
580 bh->b_uptodate = bh->b_dirt =
581 bh->b_flushtime = 0;
582 };
583 remove_from_hash_queue(bh);
584 }
585 }
586 }
587
588 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
589
590 void refill_freelist(int size)
591 {
592 struct buffer_head * bh, * tmp;
593 struct buffer_head * candidate[NR_LIST];
594 unsigned int best_time, winner;
595 int isize = BUFSIZE_INDEX(size);
596 int buffers[NR_LIST];
597 int i;
598 int needed;
599
600
601
602
603
604 if (nr_free[isize] > 100)
605 return;
606
607
608
609
610
611
612 needed =bdf_prm.b_un.nrefill * size;
613
614 while (nr_free_pages > min_free_pages && needed > 0 &&
615 grow_buffers(GFP_BUFFER, size)) {
616 needed -= PAGE_SIZE;
617 }
618
619 if(needed <= 0) return;
620
621
622
623
624 while(maybe_shrink_lav_buffers(size))
625 {
626 if(!grow_buffers(GFP_BUFFER, size)) break;
627 needed -= PAGE_SIZE;
628 if(needed <= 0) return;
629 };
630
631
632
633
634
635
636
637 repeat0:
638 for(i=0; i<NR_LIST; i++){
639 if(i == BUF_DIRTY || i == BUF_SHARED ||
640 nr_buffers_type[i] == 0) {
641 candidate[i] = NULL;
642 buffers[i] = 0;
643 continue;
644 }
645 buffers[i] = nr_buffers_type[i];
646 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
647 {
648 if(buffers[i] < 0) panic("Here is the problem");
649 tmp = bh->b_next_free;
650 if (!bh) break;
651
652 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
653 bh->b_dirt) {
654 refile_buffer(bh);
655 continue;
656 };
657
658 if (bh->b_count || bh->b_size != size)
659 continue;
660
661
662
663
664
665 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
666 buffers[i] = 0;
667 break;
668 }
669
670 if (BADNESS(bh)) continue;
671 break;
672 };
673 if(!buffers[i]) candidate[i] = NULL;
674 else candidate[i] = bh;
675 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
676 }
677
678 repeat:
679 if(needed <= 0) return;
680
681
682
683 winner = best_time = UINT_MAX;
684 for(i=0; i<NR_LIST; i++){
685 if(!candidate[i]) continue;
686 if(candidate[i]->b_lru_time < best_time){
687 best_time = candidate[i]->b_lru_time;
688 winner = i;
689 }
690 }
691
692
693 if(winner != UINT_MAX) {
694 i = winner;
695 bh = candidate[i];
696 candidate[i] = bh->b_next_free;
697 if(candidate[i] == bh) candidate[i] = NULL;
698 if (bh->b_count || bh->b_size != size)
699 panic("Busy buffer in candidate list\n");
700 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
701 panic("Shared buffer in candidate list\n");
702 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
703
704 if(bh->b_dev == 0xffff) panic("Wrong list");
705 remove_from_queues(bh);
706 bh->b_dev = 0xffff;
707 put_last_free(bh);
708 needed -= bh->b_size;
709 buffers[i]--;
710 if(buffers[i] < 0) panic("Here is the problem");
711
712 if(buffers[i] == 0) candidate[i] = NULL;
713
714
715
716 if(candidate[i] && buffers[i] > 0){
717 if(buffers[i] <= 0) panic("Here is another problem");
718 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
719 if(buffers[i] < 0) panic("Here is the problem");
720 tmp = bh->b_next_free;
721 if (!bh) break;
722
723 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
724 bh->b_dirt) {
725 refile_buffer(bh);
726 continue;
727 };
728
729 if (bh->b_count || bh->b_size != size)
730 continue;
731
732
733
734
735
736 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
737 buffers[i] = 0;
738 break;
739 }
740
741 if (BADNESS(bh)) continue;
742 break;
743 };
744 if(!buffers[i]) candidate[i] = NULL;
745 else candidate[i] = bh;
746 if(candidate[i] && candidate[i]->b_count)
747 panic("Here is the problem");
748 }
749
750 goto repeat;
751 }
752
753 if(needed <= 0) return;
754
755
756
757 if (nr_free_pages > 5) {
758 if (grow_buffers(GFP_BUFFER, size)) {
759 needed -= PAGE_SIZE;
760 goto repeat0;
761 };
762 }
763
764
765 if (!grow_buffers(GFP_ATOMIC, size))
766 wakeup_bdflush(1);
767 needed -= PAGE_SIZE;
768 goto repeat0;
769 }
770
771
772
773
774
775
776
777
778
779
780
781 struct buffer_head * getblk(dev_t dev, int block, int size)
782 {
783 struct buffer_head * bh;
784 int isize = BUFSIZE_INDEX(size);
785
786
787 buffer_usage[isize]++;
788
789
790
791
792 repeat:
793 bh = get_hash_table(dev, block, size);
794 if (bh) {
795 if (bh->b_uptodate && !bh->b_dirt)
796 put_last_lru(bh);
797 if(!bh->b_dirt) bh->b_flushtime = 0;
798 return bh;
799 }
800
801 while(!free_list[isize]) refill_freelist(size);
802
803 if (find_buffer(dev,block,size))
804 goto repeat;
805
806 bh = free_list[isize];
807 remove_from_free_list(bh);
808
809
810
811 bh->b_count=1;
812 bh->b_dirt=0;
813 bh->b_lock=0;
814 bh->b_uptodate=0;
815 bh->b_flushtime = 0;
816 bh->b_req=0;
817 bh->b_dev=dev;
818 bh->b_blocknr=block;
819 insert_into_queues(bh);
820 return bh;
821 }
822
823 void set_writetime(struct buffer_head * buf, int flag)
824 {
825 int newtime;
826
827 if (buf->b_dirt){
828
829 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
830 bdf_prm.b_un.age_buffer);
831 if(!buf->b_flushtime || buf->b_flushtime > newtime)
832 buf->b_flushtime = newtime;
833 } else {
834 buf->b_flushtime = 0;
835 }
836 }
837
838
839 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
840 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
841
842 void refile_buffer(struct buffer_head * buf){
843 int i, dispose;
844 i = 0;
845 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
846 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
847 if(buf->b_lock) i |= 2;
848 if(buf->b_dirt) i |= 4;
849 dispose = buffer_disposition[i];
850 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
851 dispose = BUF_UNSHARED;
852 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
853 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
854 if(dispose != buf->b_list) {
855 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
856 buf->b_lru_time = jiffies;
857 if(dispose == BUF_LOCKED &&
858 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
859 dispose = BUF_LOCKED1;
860 remove_from_queues(buf);
861 buf->b_list = dispose;
862 insert_into_queues(buf);
863 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
864 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
865 bdf_prm.b_un.nfract/100)
866 wakeup_bdflush(0);
867 }
868 }
869
870 void brelse(struct buffer_head * buf)
871 {
872 if (!buf)
873 return;
874 wait_on_buffer(buf);
875
876
877 set_writetime(buf, 0);
878 refile_buffer(buf);
879
880 if (buf->b_count) {
881 if (--buf->b_count)
882 return;
883 wake_up(&buffer_wait);
884 return;
885 }
886 printk("VFS: brelse: Trying to free free buffer\n");
887 }
888
889
890
891
892
893 struct buffer_head * bread(dev_t dev, int block, int size)
894 {
895 struct buffer_head * bh;
896
897 if (!(bh = getblk(dev, block, size))) {
898 printk("VFS: bread: READ error on device %d/%d\n",
899 MAJOR(dev), MINOR(dev));
900 return NULL;
901 }
902 if (bh->b_uptodate)
903 return bh;
904 ll_rw_block(READ, 1, &bh);
905 wait_on_buffer(bh);
906 if (bh->b_uptodate)
907 return bh;
908 brelse(bh);
909 return NULL;
910 }
911
912
913
914
915
916
917
918 #define NBUF 16
919
920 struct buffer_head * breada(dev_t dev, int block, int bufsize,
921 unsigned int pos, unsigned int filesize)
922 {
923 struct buffer_head * bhlist[NBUF];
924 unsigned int blocks;
925 struct buffer_head * bh;
926 int index;
927 int i, j;
928
929 if (pos >= filesize)
930 return NULL;
931
932 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
933 return NULL;
934
935 index = BUFSIZE_INDEX(bh->b_size);
936
937 if (bh->b_uptodate)
938 return bh;
939
940 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
941
942 if (blocks > (read_ahead[MAJOR(dev)] >> index))
943 blocks = read_ahead[MAJOR(dev)] >> index;
944 if (blocks > NBUF)
945 blocks = NBUF;
946
947 bhlist[0] = bh;
948 j = 1;
949 for(i=1; i<blocks; i++) {
950 bh = getblk(dev,block+i,bufsize);
951 if (bh->b_uptodate) {
952 brelse(bh);
953 break;
954 }
955 bhlist[j++] = bh;
956 }
957
958
959 ll_rw_block(READ, j, bhlist);
960
961 for(i=1; i<j; i++)
962 brelse(bhlist[i]);
963
964
965 bh = bhlist[0];
966 wait_on_buffer(bh);
967 if (bh->b_uptodate)
968 return bh;
969 brelse(bh);
970 return NULL;
971 }
972
973
974
975
976 static void put_unused_buffer_head(struct buffer_head * bh)
977 {
978 struct wait_queue * wait;
979
980 wait = ((volatile struct buffer_head *) bh)->b_wait;
981 memset((void *) bh,0,sizeof(*bh));
982 ((volatile struct buffer_head *) bh)->b_wait = wait;
983 bh->b_next_free = unused_list;
984 unused_list = bh;
985 }
986
987 static void get_more_buffer_heads(void)
988 {
989 int i;
990 struct buffer_head * bh;
991
992 if (unused_list)
993 return;
994
995 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
996 return;
997
998 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
999 bh->b_next_free = unused_list;
1000 unused_list = bh++;
1001 }
1002 }
1003
1004 static struct buffer_head * get_unused_buffer_head(void)
1005 {
1006 struct buffer_head * bh;
1007
1008 get_more_buffer_heads();
1009 if (!unused_list)
1010 return NULL;
1011 bh = unused_list;
1012 unused_list = bh->b_next_free;
1013 bh->b_next_free = NULL;
1014 bh->b_data = NULL;
1015 bh->b_size = 0;
1016 bh->b_req = 0;
1017 return bh;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
1027 {
1028 struct buffer_head *bh, *head;
1029 unsigned long offset;
1030
1031 head = NULL;
1032 offset = PAGE_SIZE;
1033 while ((offset -= size) < PAGE_SIZE) {
1034 bh = get_unused_buffer_head();
1035 if (!bh)
1036 goto no_grow;
1037 bh->b_this_page = head;
1038 head = bh;
1039 bh->b_data = (char *) (page+offset);
1040 bh->b_size = size;
1041 bh->b_dev = 0xffff;
1042 }
1043 return head;
1044
1045
1046
1047 no_grow:
1048 bh = head;
1049 while (bh) {
1050 head = bh;
1051 bh = bh->b_this_page;
1052 put_unused_buffer_head(head);
1053 }
1054 return NULL;
1055 }
1056
1057 static void read_buffers(struct buffer_head * bh[], int nrbuf)
1058 {
1059 int i;
1060 int bhnum = 0;
1061 struct buffer_head * bhr[8];
1062
1063 for (i = 0 ; i < nrbuf ; i++) {
1064 if (bh[i] && !bh[i]->b_uptodate)
1065 bhr[bhnum++] = bh[i];
1066 }
1067 if (bhnum)
1068 ll_rw_block(READ, bhnum, bhr);
1069 for (i = 0 ; i < nrbuf ; i++) {
1070 if (bh[i]) {
1071 wait_on_buffer(bh[i]);
1072 }
1073 }
1074 }
1075
1076 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1077 dev_t dev, int *b, int size)
1078 {
1079 struct buffer_head * bh[8];
1080 unsigned long page;
1081 unsigned long offset;
1082 int block;
1083 int nrbuf;
1084
1085 page = (unsigned long) first->b_data;
1086 if (page & ~PAGE_MASK) {
1087 brelse(first);
1088 return 0;
1089 }
1090 mem_map[MAP_NR(page)]++;
1091 bh[0] = first;
1092 nrbuf = 1;
1093 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1094 block = *++b;
1095 if (!block)
1096 goto no_go;
1097 first = get_hash_table(dev, block, size);
1098 if (!first)
1099 goto no_go;
1100 bh[nrbuf++] = first;
1101 if (page+offset != (unsigned long) first->b_data)
1102 goto no_go;
1103 }
1104 read_buffers(bh,nrbuf);
1105 while (nrbuf-- > 0)
1106 brelse(bh[nrbuf]);
1107 free_page(address);
1108 ++current->mm->min_flt;
1109 return page;
1110 no_go:
1111 while (nrbuf-- > 0)
1112 brelse(bh[nrbuf]);
1113 free_page(page);
1114 return 0;
1115 }
1116
1117 static unsigned long try_to_load_aligned(unsigned long address,
1118 dev_t dev, int b[], int size)
1119 {
1120 struct buffer_head * bh, * tmp, * arr[8];
1121 unsigned long offset;
1122 int isize = BUFSIZE_INDEX(size);
1123 int * p;
1124 int block;
1125
1126 bh = create_buffers(address, size);
1127 if (!bh)
1128 return 0;
1129
1130 p = b;
1131 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1132 block = *(p++);
1133 if (!block)
1134 goto not_aligned;
1135 if (find_buffer(dev, block, size))
1136 goto not_aligned;
1137 }
1138 tmp = bh;
1139 p = b;
1140 block = 0;
1141 while (1) {
1142 arr[block++] = bh;
1143 bh->b_count = 1;
1144 bh->b_dirt = 0;
1145 bh->b_flushtime = 0;
1146 bh->b_uptodate = 0;
1147 bh->b_req = 0;
1148 bh->b_dev = dev;
1149 bh->b_blocknr = *(p++);
1150 bh->b_list = BUF_CLEAN;
1151 nr_buffers++;
1152 nr_buffers_size[isize]++;
1153 insert_into_queues(bh);
1154 if (bh->b_this_page)
1155 bh = bh->b_this_page;
1156 else
1157 break;
1158 }
1159 buffermem += PAGE_SIZE;
1160 bh->b_this_page = tmp;
1161 mem_map[MAP_NR(address)]++;
1162 buffer_pages[address >> PAGE_SHIFT] = bh;
1163 read_buffers(arr,block);
1164 while (block-- > 0)
1165 brelse(arr[block]);
1166 ++current->mm->maj_flt;
1167 return address;
1168 not_aligned:
1169 while ((tmp = bh) != NULL) {
1170 bh = bh->b_this_page;
1171 put_unused_buffer_head(tmp);
1172 }
1173 return 0;
1174 }
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 static inline unsigned long try_to_share_buffers(unsigned long address,
1188 dev_t dev, int *b, int size)
1189 {
1190 struct buffer_head * bh;
1191 int block;
1192
1193 block = b[0];
1194 if (!block)
1195 return 0;
1196 bh = get_hash_table(dev, block, size);
1197 if (bh)
1198 return check_aligned(bh, address, dev, b, size);
1199 return try_to_load_aligned(address, dev, b, size);
1200 }
1201
1202 #define COPYBLK(size,from,to) \
1203 __asm__ __volatile__("rep ; movsl": \
1204 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1205 :"cx","di","si")
1206
1207
1208
1209
1210
1211
1212
1213
1214 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
1215 {
1216 struct buffer_head * bh[8];
1217 unsigned long where;
1218 int i, j;
1219
1220 if (!(prot & PAGE_RW)) {
1221 where = try_to_share_buffers(address,dev,b,size);
1222 if (where)
1223 return where;
1224 }
1225 ++current->mm->maj_flt;
1226 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1227 bh[i] = NULL;
1228 if (b[i])
1229 bh[i] = getblk(dev, b[i], size);
1230 }
1231 read_buffers(bh,i);
1232 where = address;
1233 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
1234 if (bh[i]) {
1235 if (bh[i]->b_uptodate)
1236 COPYBLK(size, (unsigned long) bh[i]->b_data,address);
1237 brelse(bh[i]);
1238 }
1239 }
1240 return where;
1241 }
1242
1243
1244
1245
1246
1247 static int grow_buffers(int pri, int size)
1248 {
1249 unsigned long page;
1250 struct buffer_head *bh, *tmp;
1251 struct buffer_head * insert_point;
1252 int isize;
1253
1254 if ((size & 511) || (size > PAGE_SIZE)) {
1255 printk("VFS: grow_buffers: size = %d\n",size);
1256 return 0;
1257 }
1258
1259 isize = BUFSIZE_INDEX(size);
1260
1261 if (!(page = __get_free_page(pri)))
1262 return 0;
1263 bh = create_buffers(page, size);
1264 if (!bh) {
1265 free_page(page);
1266 return 0;
1267 }
1268
1269 insert_point = free_list[isize];
1270
1271 tmp = bh;
1272 while (1) {
1273 nr_free[isize]++;
1274 if (insert_point) {
1275 tmp->b_next_free = insert_point->b_next_free;
1276 tmp->b_prev_free = insert_point;
1277 insert_point->b_next_free->b_prev_free = tmp;
1278 insert_point->b_next_free = tmp;
1279 } else {
1280 tmp->b_prev_free = tmp;
1281 tmp->b_next_free = tmp;
1282 }
1283 insert_point = tmp;
1284 ++nr_buffers;
1285 if (tmp->b_this_page)
1286 tmp = tmp->b_this_page;
1287 else
1288 break;
1289 }
1290 free_list[isize] = bh;
1291 buffer_pages[page >> PAGE_SHIFT] = bh;
1292 tmp->b_this_page = bh;
1293 wake_up(&buffer_wait);
1294 buffermem += PAGE_SIZE;
1295 return 1;
1296 }
1297
1298
1299
1300
1301
1302 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1303 {
1304 unsigned long page;
1305 struct buffer_head * tmp, * p;
1306 int isize = BUFSIZE_INDEX(bh->b_size);
1307
1308 *bhp = bh;
1309 page = (unsigned long) bh->b_data;
1310 page &= PAGE_MASK;
1311 tmp = bh;
1312 do {
1313 if (!tmp)
1314 return 0;
1315 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1316 return 0;
1317 tmp = tmp->b_this_page;
1318 } while (tmp != bh);
1319 tmp = bh;
1320 do {
1321 p = tmp;
1322 tmp = tmp->b_this_page;
1323 nr_buffers--;
1324 nr_buffers_size[isize]--;
1325 if (p == *bhp)
1326 {
1327 *bhp = p->b_prev_free;
1328 if (p == *bhp)
1329 *bhp = NULL;
1330 }
1331 remove_from_queues(p);
1332 put_unused_buffer_head(p);
1333 } while (tmp != bh);
1334 buffermem -= PAGE_SIZE;
1335 buffer_pages[page >> PAGE_SHIFT] = NULL;
1336 free_page(page);
1337 return !mem_map[MAP_NR(page)];
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 static int maybe_shrink_lav_buffers(int size)
1354 {
1355 int nlist;
1356 int isize;
1357 int total_lav, total_n_buffers, n_sizes;
1358
1359
1360
1361
1362
1363
1364 total_lav = total_n_buffers = n_sizes = 0;
1365 for(nlist = 0; nlist < NR_SIZES; nlist++)
1366 {
1367 total_lav += buffers_lav[nlist];
1368 if(nr_buffers_size[nlist]) n_sizes++;
1369 total_n_buffers += nr_buffers_size[nlist];
1370 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1371 }
1372
1373
1374
1375
1376 isize = (size ? BUFSIZE_INDEX(size) : -1);
1377
1378 if (n_sizes > 1)
1379 for(nlist = 0; nlist < NR_SIZES; nlist++)
1380 {
1381 if(nlist == isize) continue;
1382 if(nr_buffers_size[nlist] &&
1383 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1384 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1385 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1386 return 1;
1387 }
1388 return 0;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397 int shrink_buffers(unsigned int priority)
1398 {
1399 if (priority < 2) {
1400 sync_buffers(0,0);
1401 }
1402
1403 if(priority == 2) wakeup_bdflush(1);
1404
1405 if(maybe_shrink_lav_buffers(0)) return 1;
1406
1407
1408 return shrink_specific_buffers(priority, 0);
1409 }
1410
1411 static int shrink_specific_buffers(unsigned int priority, int size)
1412 {
1413 struct buffer_head *bh;
1414 int nlist;
1415 int i, isize, isize1;
1416
1417 #ifdef DEBUG
1418 if(size) printk("Shrinking buffers of size %d\n", size);
1419 #endif
1420
1421
1422 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1423
1424 for(isize = 0; isize<NR_SIZES; isize++){
1425 if(isize1 != -1 && isize1 != isize) continue;
1426 bh = free_list[isize];
1427 if(!bh) continue;
1428 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1429 if (bh->b_count || !bh->b_this_page)
1430 continue;
1431 if (try_to_free(bh, &bh))
1432 return 1;
1433 if(!bh) break;
1434
1435 }
1436 }
1437
1438
1439
1440 for(nlist = 0; nlist < NR_LIST; nlist++) {
1441 repeat1:
1442 if(priority > 3 && nlist == BUF_SHARED) continue;
1443 bh = lru_list[nlist];
1444 if(!bh) continue;
1445 i = nr_buffers_type[nlist] >> priority;
1446 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1447
1448 if(bh->b_list != nlist) goto repeat1;
1449 if (bh->b_count || !bh->b_this_page)
1450 continue;
1451 if(size && bh->b_size != size) continue;
1452 if (bh->b_lock)
1453 if (priority)
1454 continue;
1455 else
1456 wait_on_buffer(bh);
1457 if (bh->b_dirt) {
1458 bh->b_count++;
1459 bh->b_flushtime = 0;
1460 ll_rw_block(WRITEA, 1, &bh);
1461 bh->b_count--;
1462 continue;
1463 }
1464 if (try_to_free(bh, &bh))
1465 return 1;
1466 if(!bh) break;
1467 }
1468 }
1469 return 0;
1470 }
1471
1472
1473 void show_buffers(void)
1474 {
1475 struct buffer_head * bh;
1476 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1477 int shared;
1478 int nlist, isize;
1479
1480 printk("Buffer memory: %6dkB\n",buffermem>>10);
1481 printk("Buffer heads: %6d\n",nr_buffer_heads);
1482 printk("Buffer blocks: %6d\n",nr_buffers);
1483
1484 for(nlist = 0; nlist < NR_LIST; nlist++) {
1485 shared = found = locked = dirty = used = lastused = 0;
1486 bh = lru_list[nlist];
1487 if(!bh) continue;
1488 do {
1489 found++;
1490 if (bh->b_lock)
1491 locked++;
1492 if (bh->b_dirt)
1493 dirty++;
1494 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1495 if (bh->b_count)
1496 used++, lastused = found;
1497 bh = bh->b_next_free;
1498 } while (bh != lru_list[nlist]);
1499 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1500 nlist, found, used, lastused, locked, dirty, shared);
1501 };
1502 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1503 for(isize = 0; isize<NR_SIZES; isize++){
1504 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1505 buffers_lav[isize], nr_free[isize]);
1506 for(nlist = 0; nlist < NR_LIST; nlist++)
1507 printk("%7d ", nr_buffers_st[isize][nlist]);
1508 printk("\n");
1509 }
1510 }
1511
1512
1513
1514
1515
1516 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1517 dev_t dev, unsigned int starting_block)
1518 {
1519 unsigned long page;
1520 struct buffer_head * tmp, * p;
1521
1522 *bhp = bh;
1523 page = (unsigned long) bh->b_data;
1524 page &= PAGE_MASK;
1525 if(mem_map[MAP_NR(page)] != 1) return 0;
1526 tmp = bh;
1527 do {
1528 if (!tmp)
1529 return 0;
1530
1531 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1532 return 0;
1533 tmp = tmp->b_this_page;
1534 } while (tmp != bh);
1535 tmp = bh;
1536
1537 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1538 tmp = tmp->b_this_page;
1539
1540
1541 bh = tmp;
1542 do {
1543 p = tmp;
1544 tmp = tmp->b_this_page;
1545 remove_from_queues(p);
1546 p->b_dev=dev;
1547 p->b_uptodate = 0;
1548 p->b_req = 0;
1549 p->b_blocknr=starting_block++;
1550 insert_into_queues(p);
1551 } while (tmp != bh);
1552 return 1;
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 static int reassign_cluster(dev_t dev,
1570 unsigned int starting_block, int size)
1571 {
1572 struct buffer_head *bh;
1573 int isize = BUFSIZE_INDEX(size);
1574 int i;
1575
1576
1577
1578
1579
1580 while(nr_free[isize] < 32) refill_freelist(size);
1581
1582 bh = free_list[isize];
1583 if(bh)
1584 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1585 if (!bh->b_this_page) continue;
1586 if (try_to_reassign(bh, &bh, dev, starting_block))
1587 return 4;
1588 }
1589 return 0;
1590 }
1591
1592
1593
1594
1595
1596 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1597 {
1598 struct buffer_head * bh, * tmp, * arr[8];
1599 int isize = BUFSIZE_INDEX(size);
1600 unsigned long offset;
1601 unsigned long page;
1602 int nblock;
1603
1604 page = get_free_page(GFP_NOBUFFER);
1605 if(!page) return 0;
1606
1607 bh = create_buffers(page, size);
1608 if (!bh) {
1609 free_page(page);
1610 return 0;
1611 };
1612 nblock = block;
1613 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1614 if (find_buffer(dev, nblock++, size))
1615 goto not_aligned;
1616 }
1617 tmp = bh;
1618 nblock = 0;
1619 while (1) {
1620 arr[nblock++] = bh;
1621 bh->b_count = 1;
1622 bh->b_dirt = 0;
1623 bh->b_flushtime = 0;
1624 bh->b_lock = 0;
1625 bh->b_uptodate = 0;
1626 bh->b_req = 0;
1627 bh->b_dev = dev;
1628 bh->b_list = BUF_CLEAN;
1629 bh->b_blocknr = block++;
1630 nr_buffers++;
1631 nr_buffers_size[isize]++;
1632 insert_into_queues(bh);
1633 if (bh->b_this_page)
1634 bh = bh->b_this_page;
1635 else
1636 break;
1637 }
1638 buffermem += PAGE_SIZE;
1639 buffer_pages[page >> PAGE_SHIFT] = bh;
1640 bh->b_this_page = tmp;
1641 while (nblock-- > 0)
1642 brelse(arr[nblock]);
1643 return 4;
1644 not_aligned:
1645 while ((tmp = bh) != NULL) {
1646 bh = bh->b_this_page;
1647 put_unused_buffer_head(tmp);
1648 }
1649 free_page(page);
1650 return 0;
1651 }
1652
1653 unsigned long generate_cluster(dev_t dev, int b[], int size)
1654 {
1655 int i, offset;
1656
1657 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1658 if(i && b[i]-1 != b[i-1]) return 0;
1659 if(find_buffer(dev, b[i], size)) return 0;
1660 };
1661
1662
1663
1664
1665
1666 if(maybe_shrink_lav_buffers(size))
1667 {
1668 int retval;
1669 retval = try_to_generate_cluster(dev, b[0], size);
1670 if(retval) return retval;
1671 };
1672
1673 if (nr_free_pages > min_free_pages)
1674 return try_to_generate_cluster(dev, b[0], size);
1675 else
1676 return reassign_cluster(dev, b[0], size);
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686 void buffer_init(void)
1687 {
1688 int i;
1689 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1690
1691 if (high_memory >= 4*1024*1024) {
1692 min_free_pages = 200;
1693 if(high_memory >= 16*1024*1024)
1694 nr_hash = 16381;
1695 else
1696 nr_hash = 4093;
1697 } else {
1698 min_free_pages = 20;
1699 nr_hash = 997;
1700 };
1701
1702 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1703 sizeof(struct buffer_head *));
1704
1705
1706 buffer_pages = (struct buffer_head **) vmalloc((high_memory >>PAGE_SHIFT) *
1707 sizeof(struct buffer_head *));
1708 for (i = 0 ; i < high_memory >> PAGE_SHIFT ; i++)
1709 buffer_pages[i] = NULL;
1710
1711 for (i = 0 ; i < nr_hash ; i++)
1712 hash_table[i] = NULL;
1713 lru_list[BUF_CLEAN] = 0;
1714 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1715 if (!free_list[isize])
1716 panic("VFS: Unable to initialize buffer free list!");
1717 return;
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727 struct wait_queue * bdflush_wait = NULL;
1728 struct wait_queue * bdflush_done = NULL;
1729
1730 static int bdflush_running = 0;
1731
1732 static void wakeup_bdflush(int wait)
1733 {
1734 if(!bdflush_running){
1735 printk("Warning - bdflush not running\n");
1736 sync_buffers(0,0);
1737 return;
1738 };
1739 wake_up(&bdflush_wait);
1740 if(wait) sleep_on(&bdflush_done);
1741 }
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 asmlinkage int sync_old_buffers(void)
1754 {
1755 int i, isize;
1756 int ndirty, nwritten;
1757 int nlist;
1758 int ncount;
1759 struct buffer_head * bh, *next;
1760
1761 sync_supers(0);
1762 sync_inodes(0);
1763
1764 ncount = 0;
1765 #ifdef DEBUG
1766 for(nlist = 0; nlist < NR_LIST; nlist++)
1767 #else
1768 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1769 #endif
1770 {
1771 ndirty = 0;
1772 nwritten = 0;
1773 repeat:
1774 bh = lru_list[nlist];
1775 if(bh)
1776 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1777
1778 if(bh->b_list != nlist) goto repeat;
1779 next = bh->b_next_free;
1780 if(!lru_list[nlist]) {
1781 printk("Dirty list empty %d\n", i);
1782 break;
1783 }
1784
1785
1786 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1787 {
1788 refile_buffer(bh);
1789 continue;
1790 }
1791
1792 if (bh->b_lock || !bh->b_dirt)
1793 continue;
1794 ndirty++;
1795 if(bh->b_flushtime > jiffies) continue;
1796 nwritten++;
1797 bh->b_count++;
1798 bh->b_flushtime = 0;
1799 #ifdef DEBUG
1800 if(nlist != BUF_DIRTY) ncount++;
1801 #endif
1802 ll_rw_block(WRITE, 1, &bh);
1803 bh->b_count--;
1804 }
1805 }
1806 #ifdef DEBUG
1807 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1808 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1809 #endif
1810
1811
1812
1813
1814 for(isize = 0; isize<NR_SIZES; isize++){
1815 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1816 buffer_usage[isize] = 0;
1817 };
1818 return 0;
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828 asmlinkage int sys_bdflush(int func, int data)
1829 {
1830 int i, error;
1831 int ndirty;
1832 int nlist;
1833 int ncount;
1834 struct buffer_head * bh, *next;
1835
1836 if(!suser()) return -EPERM;
1837
1838 if(func == 1)
1839 return sync_old_buffers();
1840
1841
1842 if(func >= 2){
1843 i = (func-2) >> 1;
1844 if (i < 0 || i >= N_PARAM) return -EINVAL;
1845 if((func & 1) == 0) {
1846 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1847 if(error) return error;
1848 put_fs_long(bdf_prm.data[i], data);
1849 return 0;
1850 };
1851 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1852 bdf_prm.data[i] = data;
1853 return 0;
1854 };
1855
1856 if(bdflush_running++) return -EBUSY;
1857
1858
1859
1860 while(1==1){
1861 #ifdef DEBUG
1862 printk("bdflush() activated...");
1863 #endif
1864
1865 ncount = 0;
1866 #ifdef DEBUG
1867 for(nlist = 0; nlist < NR_LIST; nlist++)
1868 #else
1869 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1870 #endif
1871 {
1872 ndirty = 0;
1873 repeat:
1874 bh = lru_list[nlist];
1875 if(bh)
1876 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1877 bh = next) {
1878
1879 if(bh->b_list != nlist) goto repeat;
1880 next = bh->b_next_free;
1881 if(!lru_list[nlist]) {
1882 printk("Dirty list empty %d\n", i);
1883 break;
1884 }
1885
1886
1887 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1888 {
1889 refile_buffer(bh);
1890 continue;
1891 }
1892
1893 if (bh->b_lock || !bh->b_dirt)
1894 continue;
1895
1896
1897 bh->b_count++;
1898 ndirty++;
1899 bh->b_flushtime = 0;
1900 ll_rw_block(WRITE, 1, &bh);
1901 #ifdef DEBUG
1902 if(nlist != BUF_DIRTY) ncount++;
1903 #endif
1904 bh->b_count--;
1905 }
1906 }
1907 #ifdef DEBUG
1908 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1909 printk("sleeping again.\n");
1910 #endif
1911 wake_up(&bdflush_done);
1912
1913
1914
1915
1916 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1917 bdf_prm.b_un.nfract/100) {
1918 if (current->signal & (1 << (SIGKILL-1))) {
1919 bdflush_running--;
1920 return 0;
1921 }
1922 current->signal = 0;
1923 interruptible_sleep_on(&bdflush_wait);
1924 }
1925 }
1926 }
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944