This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- sys_fdatasync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- __brelse
- __bforget
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- recover_reusable_buffer_heads
- get_unused_buffer_head
- create_buffers
- brw_page
- mark_buffer_uptodate
- unlock_buffer
- generic_readpage
- grow_buffers
- try_to_free_buffer
- age_buffer
- maybe_shrink_lav_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
- bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26 #include <linux/errno.h>
27 #include <linux/malloc.h>
28 #include <linux/pagemap.h>
29 #include <linux/swap.h>
30 #include <linux/swapctl.h>
31 #include <linux/smp.h>
32 #include <linux/smp_lock.h>
33
34 #include <asm/system.h>
35 #include <asm/segment.h>
36 #include <asm/io.h>
37
38 #define NR_SIZES 5
39 static char buffersize_index[17] =
40 {-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
41 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096, 8192};
42
43 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46 static int grow_buffers(int pri, int size);
47 static int shrink_specific_buffers(unsigned int priority, int size);
48 static int maybe_shrink_lav_buffers(int);
49
50 static int nr_hash = 0;
51 static struct buffer_head ** hash_table;
52 struct buffer_head ** buffer_pages;
53 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
54
55
56
57 static struct buffer_head * next_to_age[NR_LIST] = {NULL, };
58 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
59
60 static struct buffer_head * unused_list = NULL;
61 struct buffer_head * reuse_list = NULL;
62 static struct wait_queue * buffer_wait = NULL;
63
64 int nr_buffers = 0;
65 int nr_buffers_type[NR_LIST] = {0,};
66 int nr_buffers_size[NR_SIZES] = {0,};
67 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
68 int buffer_usage[NR_SIZES] = {0,};
69 int buffers_lav[NR_SIZES] = {0,};
70 int nr_free[NR_SIZES] = {0,};
71 int buffermem = 0;
72 int nr_buffer_heads = 0;
73 extern int *blksize_size[];
74
75
76
77
78
79 static void wakeup_bdflush(int);
80
81 #define N_PARAM 9
82 #define LAV
83
84 union bdflush_param{
85 struct {
86 int nfract;
87
88 int ndirty;
89
90 int nrefill;
91
92 int nref_dirt;
93
94 int clu_nfract;
95
96 int age_buffer;
97
98 int age_super;
99
100 int lav_const;
101
102 int lav_ratio;
103
104
105 } b_un;
106 unsigned int data[N_PARAM];
107 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
108
109
110
111
112
113
114
115 int bdflush_min[N_PARAM] = { 0, 10, 5, 60, 0, 100, 100, 1, 1};
116 int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
117
118
119
120
121
122
123
124
125
126
127 void __wait_on_buffer(struct buffer_head * bh)
128 {
129 struct wait_queue wait = { current, NULL };
130
131 bh->b_count++;
132 add_wait_queue(&bh->b_wait, &wait);
133 repeat:
134 current->state = TASK_UNINTERRUPTIBLE;
135 if (buffer_locked(bh)) {
136 schedule();
137 goto repeat;
138 }
139 remove_wait_queue(&bh->b_wait, &wait);
140 bh->b_count--;
141 current->state = TASK_RUNNING;
142 }
143
144
145
146
147
148
149
150
151
152
153
154 static int sync_buffers(kdev_t dev, int wait)
155 {
156 int i, retry, pass = 0, err = 0;
157 int nlist, ncount;
158 struct buffer_head * bh, *next;
159
160
161
162
163
164 repeat:
165 retry = 0;
166 repeat2:
167 ncount = 0;
168
169
170 for(nlist = 0; nlist < NR_LIST; nlist++)
171 {
172 repeat1:
173 bh = lru_list[nlist];
174 if(!bh) continue;
175 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
176 if(bh->b_list != nlist) goto repeat1;
177 next = bh->b_next_free;
178 if(!lru_list[nlist]) break;
179 if (dev && bh->b_dev != dev)
180 continue;
181 if (buffer_locked(bh))
182 {
183
184
185 if (!wait || !pass) {
186 retry = 1;
187 continue;
188 }
189 wait_on_buffer (bh);
190 goto repeat2;
191 }
192
193
194 if (wait && buffer_req(bh) && !buffer_locked(bh) &&
195 !buffer_dirty(bh) && !buffer_uptodate(bh)) {
196 err = 1;
197 continue;
198 }
199
200
201 if (!buffer_dirty(bh) || pass>=2)
202 continue;
203
204 if (buffer_locked(bh))
205 continue;
206 bh->b_count++;
207 bh->b_flushtime = 0;
208 ll_rw_block(WRITE, 1, &bh);
209
210 if(nlist != BUF_DIRTY) {
211 printk("[%d %s %ld] ", nlist,
212 kdevname(bh->b_dev), bh->b_blocknr);
213 ncount++;
214 };
215 bh->b_count--;
216 retry = 1;
217 }
218 }
219 if (ncount)
220 printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
221
222
223
224
225
226 if (wait && retry && ++pass<=2)
227 goto repeat;
228 return err;
229 }
230
231 void sync_dev(kdev_t dev)
232 {
233 sync_buffers(dev, 0);
234 sync_supers(dev);
235 sync_inodes(dev);
236 sync_buffers(dev, 0);
237 sync_dquots(dev, -1);
238 }
239
240 int fsync_dev(kdev_t dev)
241 {
242 sync_buffers(dev, 0);
243 sync_supers(dev);
244 sync_inodes(dev);
245 sync_dquots(dev, -1);
246 return sync_buffers(dev, 1);
247 }
248
249 asmlinkage int sys_sync(void)
250 {
251 fsync_dev(0);
252 return 0;
253 }
254
255 int file_fsync (struct inode *inode, struct file *filp)
256 {
257 return fsync_dev(inode->i_dev);
258 }
259
260 asmlinkage int sys_fsync(unsigned int fd)
261 {
262 struct file * file;
263 struct inode * inode;
264
265 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
266 return -EBADF;
267 if (!file->f_op || !file->f_op->fsync)
268 return -EINVAL;
269 if (file->f_op->fsync(inode,file))
270 return -EIO;
271 return 0;
272 }
273
274 asmlinkage int sys_fdatasync(unsigned int fd)
275 {
276 struct file * file;
277 struct inode * inode;
278
279 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
280 return -EBADF;
281 if (!file->f_op || !file->f_op->fsync)
282 return -EINVAL;
283
284 if (file->f_op->fsync(inode,file))
285 return -EIO;
286 return 0;
287 }
288
289 void invalidate_buffers(kdev_t dev)
290 {
291 int i;
292 int nlist;
293 struct buffer_head * bh;
294
295 for(nlist = 0; nlist < NR_LIST; nlist++) {
296 bh = lru_list[nlist];
297 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
298 if (bh->b_dev != dev)
299 continue;
300 wait_on_buffer(bh);
301 if (bh->b_dev != dev)
302 continue;
303 if (bh->b_count)
304 continue;
305 bh->b_flushtime = 0;
306 clear_bit(BH_Protected, &bh->b_state);
307 clear_bit(BH_Uptodate, &bh->b_state);
308 clear_bit(BH_Dirty, &bh->b_state);
309 clear_bit(BH_Req, &bh->b_state);
310 }
311 }
312 }
313
314 #define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))%nr_hash)
315 #define hash(dev,block) hash_table[_hashfn(dev,block)]
316
317 static inline void remove_from_hash_queue(struct buffer_head * bh)
318 {
319 if (bh->b_next)
320 bh->b_next->b_prev = bh->b_prev;
321 if (bh->b_prev)
322 bh->b_prev->b_next = bh->b_next;
323 if (hash(bh->b_dev,bh->b_blocknr) == bh)
324 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
325 bh->b_next = bh->b_prev = NULL;
326 }
327
328 static inline void remove_from_lru_list(struct buffer_head * bh)
329 {
330 if (!(bh->b_prev_free) || !(bh->b_next_free))
331 panic("VFS: LRU block list corrupted");
332 if (bh->b_dev == B_FREE)
333 panic("LRU list corrupted");
334 bh->b_prev_free->b_next_free = bh->b_next_free;
335 bh->b_next_free->b_prev_free = bh->b_prev_free;
336
337 if (lru_list[bh->b_list] == bh)
338 lru_list[bh->b_list] = bh->b_next_free;
339 if (lru_list[bh->b_list] == bh)
340 lru_list[bh->b_list] = NULL;
341 if (next_to_age[bh->b_list] == bh)
342 next_to_age[bh->b_list] = bh->b_next_free;
343 if (next_to_age[bh->b_list] == bh)
344 next_to_age[bh->b_list] = NULL;
345
346 bh->b_next_free = bh->b_prev_free = NULL;
347 }
348
349 static inline void remove_from_free_list(struct buffer_head * bh)
350 {
351 int isize = BUFSIZE_INDEX(bh->b_size);
352 if (!(bh->b_prev_free) || !(bh->b_next_free))
353 panic("VFS: Free block list corrupted");
354 if(bh->b_dev != B_FREE)
355 panic("Free list corrupted");
356 if(!free_list[isize])
357 panic("Free list empty");
358 nr_free[isize]--;
359 if(bh->b_next_free == bh)
360 free_list[isize] = NULL;
361 else {
362 bh->b_prev_free->b_next_free = bh->b_next_free;
363 bh->b_next_free->b_prev_free = bh->b_prev_free;
364 if (free_list[isize] == bh)
365 free_list[isize] = bh->b_next_free;
366 };
367 bh->b_next_free = bh->b_prev_free = NULL;
368 }
369
370 static inline void remove_from_queues(struct buffer_head * bh)
371 {
372 if(bh->b_dev == B_FREE) {
373 remove_from_free_list(bh);
374
375 return;
376 };
377 nr_buffers_type[bh->b_list]--;
378 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
379 remove_from_hash_queue(bh);
380 remove_from_lru_list(bh);
381 }
382
383 static inline void put_last_lru(struct buffer_head * bh)
384 {
385 if (!bh)
386 return;
387 if (bh == lru_list[bh->b_list]) {
388 lru_list[bh->b_list] = bh->b_next_free;
389 if (next_to_age[bh->b_list] == bh)
390 next_to_age[bh->b_list] = bh->b_next_free;
391 return;
392 }
393 if(bh->b_dev == B_FREE)
394 panic("Wrong block for lru list");
395 remove_from_lru_list(bh);
396
397
398 if(!lru_list[bh->b_list]) {
399 lru_list[bh->b_list] = bh;
400 lru_list[bh->b_list]->b_prev_free = bh;
401 };
402 if (!next_to_age[bh->b_list])
403 next_to_age[bh->b_list] = bh;
404
405 bh->b_next_free = lru_list[bh->b_list];
406 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
407 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
408 lru_list[bh->b_list]->b_prev_free = bh;
409 }
410
411 static inline void put_last_free(struct buffer_head * bh)
412 {
413 int isize;
414 if (!bh)
415 return;
416
417 isize = BUFSIZE_INDEX(bh->b_size);
418 bh->b_dev = B_FREE;
419
420 if(!free_list[isize]) {
421 free_list[isize] = bh;
422 bh->b_prev_free = bh;
423 };
424
425 nr_free[isize]++;
426 bh->b_next_free = free_list[isize];
427 bh->b_prev_free = free_list[isize]->b_prev_free;
428 free_list[isize]->b_prev_free->b_next_free = bh;
429 free_list[isize]->b_prev_free = bh;
430 }
431
432 static inline void insert_into_queues(struct buffer_head * bh)
433 {
434
435 if(bh->b_dev == B_FREE) {
436 put_last_free(bh);
437 return;
438 }
439 if(!lru_list[bh->b_list]) {
440 lru_list[bh->b_list] = bh;
441 bh->b_prev_free = bh;
442 }
443 if (!next_to_age[bh->b_list])
444 next_to_age[bh->b_list] = bh;
445 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
446 bh->b_next_free = lru_list[bh->b_list];
447 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
448 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
449 lru_list[bh->b_list]->b_prev_free = bh;
450 nr_buffers_type[bh->b_list]++;
451 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
452
453 bh->b_prev = NULL;
454 bh->b_next = NULL;
455 if (!(bh->b_dev))
456 return;
457 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
458 hash(bh->b_dev,bh->b_blocknr) = bh;
459 if (bh->b_next)
460 bh->b_next->b_prev = bh;
461 }
462
463 static inline struct buffer_head * find_buffer(kdev_t dev, int block, int size)
464 {
465 struct buffer_head * tmp;
466
467 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
468 if (tmp->b_blocknr == block && tmp->b_dev == dev)
469 if (tmp->b_size == size)
470 return tmp;
471 else {
472 printk("VFS: Wrong blocksize on device %s\n",
473 kdevname(dev));
474 return NULL;
475 }
476 return NULL;
477 }
478
479
480
481
482
483
484
485
486 struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
487 {
488 struct buffer_head * bh;
489
490 for (;;) {
491 if (!(bh=find_buffer(dev,block,size)))
492 return NULL;
493 bh->b_count++;
494 wait_on_buffer(bh);
495 if (bh->b_dev == dev && bh->b_blocknr == block
496 && bh->b_size == size)
497 return bh;
498 bh->b_count--;
499 }
500 }
501
502 void set_blocksize(kdev_t dev, int size)
503 {
504 int i, nlist;
505 struct buffer_head * bh, *bhnext;
506
507 if (!blksize_size[MAJOR(dev)])
508 return;
509
510 if (size > PAGE_SIZE)
511 size = 0;
512
513 switch (size) {
514 default: panic("Invalid blocksize passed to set_blocksize");
515 case 512: case 1024: case 2048: case 4096: case 8192: ;
516 }
517
518 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
519 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
520 return;
521 }
522 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
523 return;
524 sync_buffers(dev, 2);
525 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
526
527
528
529
530 for(nlist = 0; nlist < NR_LIST; nlist++) {
531 bh = lru_list[nlist];
532 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
533 if(!bh) break;
534 bhnext = bh->b_next_free;
535 if (bh->b_dev != dev)
536 continue;
537 if (bh->b_size == size)
538 continue;
539
540 wait_on_buffer(bh);
541 if (bh->b_dev == dev && bh->b_size != size) {
542 clear_bit(BH_Dirty, &bh->b_state);
543 clear_bit(BH_Uptodate, &bh->b_state);
544 clear_bit(BH_Req, &bh->b_state);
545 bh->b_flushtime = 0;
546 }
547 remove_from_hash_queue(bh);
548 }
549 }
550 }
551
552 #define BADNESS(bh) (buffer_dirty(bh) || buffer_locked(bh))
553
554 void refill_freelist(int size)
555 {
556 struct buffer_head * bh, * tmp;
557 struct buffer_head * candidate[NR_LIST];
558 unsigned int best_time, winner;
559 int isize = BUFSIZE_INDEX(size);
560 int buffers[NR_LIST];
561 int i;
562 int needed;
563
564
565
566
567
568 if (nr_free[isize] > 100)
569 return;
570
571
572
573
574
575
576 needed =bdf_prm.b_un.nrefill * size;
577
578 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
579 grow_buffers(GFP_BUFFER, size)) {
580 needed -= PAGE_SIZE;
581 }
582
583 if(needed <= 0) return;
584
585
586
587
588 while(maybe_shrink_lav_buffers(size))
589 {
590 if(!grow_buffers(GFP_BUFFER, size)) break;
591 needed -= PAGE_SIZE;
592 if(needed <= 0) return;
593 };
594
595
596
597
598
599
600
601 repeat0:
602 for(i=0; i<NR_LIST; i++){
603 if(i == BUF_DIRTY || i == BUF_SHARED ||
604 nr_buffers_type[i] == 0) {
605 candidate[i] = NULL;
606 buffers[i] = 0;
607 continue;
608 }
609 buffers[i] = nr_buffers_type[i];
610 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
611 {
612 if(buffers[i] < 0) panic("Here is the problem");
613 tmp = bh->b_next_free;
614 if (!bh) break;
615
616 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
617 buffer_dirty(bh)) {
618 refile_buffer(bh);
619 continue;
620 }
621
622 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
623 continue;
624
625
626
627
628
629 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
630 buffers[i] = 0;
631 break;
632 }
633
634 if (BADNESS(bh)) continue;
635 break;
636 };
637 if(!buffers[i]) candidate[i] = NULL;
638 else candidate[i] = bh;
639 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
640 }
641
642 repeat:
643 if(needed <= 0) return;
644
645
646
647 winner = best_time = UINT_MAX;
648 for(i=0; i<NR_LIST; i++){
649 if(!candidate[i]) continue;
650 if(candidate[i]->b_lru_time < best_time){
651 best_time = candidate[i]->b_lru_time;
652 winner = i;
653 }
654 }
655
656
657 if(winner != UINT_MAX) {
658 i = winner;
659 bh = candidate[i];
660 candidate[i] = bh->b_next_free;
661 if(candidate[i] == bh) candidate[i] = NULL;
662 if (bh->b_count || bh->b_size != size)
663 panic("Busy buffer in candidate list\n");
664 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1)
665 panic("Shared buffer in candidate list\n");
666 if (buffer_protected(bh))
667 panic("Protected buffer in candidate list\n");
668 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
669
670 if(bh->b_dev == B_FREE)
671 panic("Wrong list");
672 remove_from_queues(bh);
673 bh->b_dev = B_FREE;
674 put_last_free(bh);
675 needed -= bh->b_size;
676 buffers[i]--;
677 if(buffers[i] < 0) panic("Here is the problem");
678
679 if(buffers[i] == 0) candidate[i] = NULL;
680
681
682
683 if(candidate[i] && buffers[i] > 0){
684 if(buffers[i] <= 0) panic("Here is another problem");
685 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
686 if(buffers[i] < 0) panic("Here is the problem");
687 tmp = bh->b_next_free;
688 if (!bh) break;
689
690 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
691 buffer_dirty(bh)) {
692 refile_buffer(bh);
693 continue;
694 };
695
696 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
697 continue;
698
699
700
701
702
703 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
704 buffers[i] = 0;
705 break;
706 }
707
708 if (BADNESS(bh)) continue;
709 break;
710 };
711 if(!buffers[i]) candidate[i] = NULL;
712 else candidate[i] = bh;
713 if(candidate[i] && candidate[i]->b_count)
714 panic("Here is the problem");
715 }
716
717 goto repeat;
718 }
719
720 if(needed <= 0) return;
721
722
723
724 if (nr_free_pages > min_free_pages + 5) {
725 if (grow_buffers(GFP_BUFFER, size)) {
726 needed -= PAGE_SIZE;
727 goto repeat0;
728 };
729 }
730
731
732 if (!grow_buffers(GFP_ATOMIC, size))
733 wakeup_bdflush(1);
734 needed -= PAGE_SIZE;
735 goto repeat0;
736 }
737
738
739
740
741
742
743
744
745
746
747
748 struct buffer_head * getblk(kdev_t dev, int block, int size)
749 {
750 struct buffer_head * bh;
751 int isize = BUFSIZE_INDEX(size);
752
753
754 buffer_usage[isize]++;
755
756
757
758
759 repeat:
760 bh = get_hash_table(dev, block, size);
761 if (bh) {
762 if (!buffer_dirty(bh)) {
763 if (buffer_uptodate(bh))
764 put_last_lru(bh);
765 bh->b_flushtime = 0;
766 }
767 set_bit(BH_Touched, &bh->b_state);
768 return bh;
769 }
770
771 while(!free_list[isize]) refill_freelist(size);
772
773 if (find_buffer(dev,block,size))
774 goto repeat;
775
776 bh = free_list[isize];
777 remove_from_free_list(bh);
778
779
780
781 bh->b_count=1;
782 bh->b_flushtime=0;
783 bh->b_state=(1<<BH_Touched);
784 bh->b_dev=dev;
785 bh->b_blocknr=block;
786 insert_into_queues(bh);
787 return bh;
788 }
789
790 void set_writetime(struct buffer_head * buf, int flag)
791 {
792 int newtime;
793
794 if (buffer_dirty(buf)) {
795
796 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
797 bdf_prm.b_un.age_buffer);
798 if(!buf->b_flushtime || buf->b_flushtime > newtime)
799 buf->b_flushtime = newtime;
800 } else {
801 buf->b_flushtime = 0;
802 }
803 }
804
805
806 void refile_buffer(struct buffer_head * buf)
807 {
808 int dispose;
809
810 if(buf->b_dev == B_FREE) {
811 printk("Attempt to refile free buffer\n");
812 return;
813 }
814 if (buffer_dirty(buf))
815 dispose = BUF_DIRTY;
816 else if ((mem_map[MAP_NR((unsigned long) buf->b_data)].count > 1) || buffer_protected(buf))
817 dispose = BUF_SHARED;
818 else if (buffer_locked(buf))
819 dispose = BUF_LOCKED;
820 else if (buf->b_list == BUF_SHARED)
821 dispose = BUF_UNSHARED;
822 else
823 dispose = BUF_CLEAN;
824 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
825 if(dispose != buf->b_list) {
826 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
827 buf->b_lru_time = jiffies;
828 if(dispose == BUF_LOCKED &&
829 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
830 dispose = BUF_LOCKED1;
831 remove_from_queues(buf);
832 buf->b_list = dispose;
833 insert_into_queues(buf);
834 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
835 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
836 bdf_prm.b_un.nfract/100)
837 wakeup_bdflush(0);
838 }
839 }
840
841
842
843
844 void __brelse(struct buffer_head * buf)
845 {
846 wait_on_buffer(buf);
847
848
849 set_writetime(buf, 0);
850 refile_buffer(buf);
851
852 if (buf->b_count) {
853 buf->b_count--;
854 return;
855 }
856 printk("VFS: brelse: Trying to free free buffer\n");
857 }
858
859
860
861
862
863
864 void __bforget(struct buffer_head * buf)
865 {
866 wait_on_buffer(buf);
867 mark_buffer_clean(buf);
868 clear_bit(BH_Protected, &buf->b_state);
869 buf->b_count--;
870 remove_from_hash_queue(buf);
871 buf->b_dev = NODEV;
872 refile_buffer(buf);
873 }
874
875
876
877
878
879 struct buffer_head * bread(kdev_t dev, int block, int size)
880 {
881 struct buffer_head * bh;
882
883 if (!(bh = getblk(dev, block, size))) {
884 printk("VFS: bread: READ error on device %s\n",
885 kdevname(dev));
886 return NULL;
887 }
888 if (buffer_uptodate(bh))
889 return bh;
890 ll_rw_block(READ, 1, &bh);
891 wait_on_buffer(bh);
892 if (buffer_uptodate(bh))
893 return bh;
894 brelse(bh);
895 return NULL;
896 }
897
898
899
900
901
902
903
904 #define NBUF 16
905
906 struct buffer_head * breada(kdev_t dev, int block, int bufsize,
907 unsigned int pos, unsigned int filesize)
908 {
909 struct buffer_head * bhlist[NBUF];
910 unsigned int blocks;
911 struct buffer_head * bh;
912 int index;
913 int i, j;
914
915 if (pos >= filesize)
916 return NULL;
917
918 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
919 return NULL;
920
921 index = BUFSIZE_INDEX(bh->b_size);
922
923 if (buffer_uptodate(bh))
924 return bh;
925
926 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
927
928 if (blocks > (read_ahead[MAJOR(dev)] >> index))
929 blocks = read_ahead[MAJOR(dev)] >> index;
930 if (blocks > NBUF)
931 blocks = NBUF;
932
933 bhlist[0] = bh;
934 j = 1;
935 for(i=1; i<blocks; i++) {
936 bh = getblk(dev,block+i,bufsize);
937 if (buffer_uptodate(bh)) {
938 brelse(bh);
939 break;
940 }
941 bhlist[j++] = bh;
942 }
943
944
945 ll_rw_block(READ, j, bhlist);
946
947 for(i=1; i<j; i++)
948 brelse(bhlist[i]);
949
950
951 bh = bhlist[0];
952 wait_on_buffer(bh);
953 if (buffer_uptodate(bh))
954 return bh;
955 brelse(bh);
956 return NULL;
957 }
958
959
960
961
962 static void put_unused_buffer_head(struct buffer_head * bh)
963 {
964 struct wait_queue * wait;
965
966 wait = ((volatile struct buffer_head *) bh)->b_wait;
967 memset(bh,0,sizeof(*bh));
968 ((volatile struct buffer_head *) bh)->b_wait = wait;
969 bh->b_next_free = unused_list;
970 unused_list = bh;
971 wake_up(&buffer_wait);
972 }
973
974 static void get_more_buffer_heads(void)
975 {
976 int i;
977 struct buffer_head * bh;
978
979 for (;;) {
980 if (unused_list)
981 return;
982
983
984
985
986
987
988 bh = (struct buffer_head *) get_free_page(GFP_ATOMIC);
989 if (bh)
990 break;
991
992
993
994
995
996
997 sleep_on(&buffer_wait);
998 }
999
1000 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
1001 bh->b_next_free = unused_list;
1002 unused_list = bh++;
1003 }
1004 }
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 static inline void recover_reusable_buffer_heads(void)
1018 {
1019 struct buffer_head *bh;
1020 unsigned long flags;
1021
1022 save_flags(flags);
1023 while (reuse_list) {
1024 cli();
1025 bh = reuse_list;
1026 reuse_list = bh->b_next_free;
1027 restore_flags(flags);
1028 put_unused_buffer_head(bh);
1029 }
1030 }
1031
1032 static struct buffer_head * get_unused_buffer_head(void)
1033 {
1034 struct buffer_head * bh;
1035
1036 recover_reusable_buffer_heads();
1037 get_more_buffer_heads();
1038 if (!unused_list)
1039 return NULL;
1040 bh = unused_list;
1041 unused_list = bh->b_next_free;
1042 bh->b_next_free = NULL;
1043 bh->b_data = NULL;
1044 bh->b_size = 0;
1045 bh->b_state = 0;
1046 return bh;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
1056 {
1057 struct buffer_head *bh, *head;
1058 unsigned long offset;
1059
1060 head = NULL;
1061 offset = PAGE_SIZE;
1062 while ((offset -= size) < PAGE_SIZE) {
1063 bh = get_unused_buffer_head();
1064 if (!bh)
1065 goto no_grow;
1066 bh->b_this_page = head;
1067 head = bh;
1068 bh->b_data = (char *) (page+offset);
1069 bh->b_size = size;
1070 bh->b_dev = B_FREE;
1071 }
1072 return head;
1073
1074
1075
1076 no_grow:
1077 bh = head;
1078 while (bh) {
1079 head = bh;
1080 bh = bh->b_this_page;
1081 put_unused_buffer_head(head);
1082 }
1083 return NULL;
1084 }
1085
1086 int brw_page(int rw, unsigned long address, kdev_t dev, int b[], int size, int bmap)
1087 {
1088 struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];
1089 int block, nr;
1090 struct page *page;
1091
1092 page = mem_map + MAP_NR(address);
1093 page->uptodate = 0;
1094 bh = create_buffers(address, size);
1095 if (!bh)
1096 return -ENOMEM;
1097 nr = 0;
1098 next = bh;
1099 do {
1100 struct buffer_head * tmp;
1101 block = *(b++);
1102
1103 set_bit(BH_FreeOnIO, &next->b_state);
1104 next->b_list = BUF_CLEAN;
1105 next->b_dev = dev;
1106 next->b_blocknr = block;
1107 next->b_count = 1;
1108 next->b_flushtime = 0;
1109 set_bit(BH_Uptodate, &next->b_state);
1110
1111
1112
1113
1114
1115
1116 if (bmap && !block) {
1117 memset(next->b_data, 0, size);
1118 next->b_count--;
1119 continue;
1120 }
1121 tmp = get_hash_table(dev, block, size);
1122 if (tmp) {
1123 if (!buffer_uptodate(tmp)) {
1124 if (rw == READ)
1125 ll_rw_block(READ, 1, &tmp);
1126 wait_on_buffer(tmp);
1127 }
1128 if (rw == READ)
1129 memcpy(next->b_data, tmp->b_data, size);
1130 else {
1131 memcpy(tmp->b_data, next->b_data, size);
1132 mark_buffer_dirty(tmp, 0);
1133 }
1134 brelse(tmp);
1135 next->b_count--;
1136 continue;
1137 }
1138 if (rw == READ)
1139 clear_bit(BH_Uptodate, &next->b_state);
1140 else
1141 set_bit(BH_Dirty, &next->b_state);
1142 arr[nr++] = next;
1143 } while (prev = next, (next = next->b_this_page) != NULL);
1144 prev->b_this_page = bh;
1145
1146 if (nr)
1147 ll_rw_block(rw, nr, arr);
1148 else {
1149 page->locked = 0;
1150 page->uptodate = 1;
1151 wake_up(&page->wait);
1152 next = bh;
1153 do {
1154 next->b_next_free = reuse_list;
1155 reuse_list = next;
1156 next = next->b_this_page;
1157 } while (next != bh);
1158 }
1159 ++current->maj_flt;
1160 return 0;
1161 }
1162
1163 void mark_buffer_uptodate(struct buffer_head * bh, int on)
1164 {
1165 if (on) {
1166 struct buffer_head *tmp = bh;
1167 int page_uptodate = 1;
1168 set_bit(BH_Uptodate, &bh->b_state);
1169 do {
1170 if (!test_bit(BH_Uptodate, &tmp->b_state)) {
1171 page_uptodate = 0;
1172 break;
1173 }
1174 tmp=tmp->b_this_page;
1175 } while (tmp && tmp != bh);
1176 if (page_uptodate)
1177 mem_map[MAP_NR(bh->b_data)].uptodate = 1;
1178 } else
1179 clear_bit(BH_Uptodate, &bh->b_state);
1180 }
1181
1182 void unlock_buffer(struct buffer_head * bh)
1183 {
1184 struct buffer_head *tmp;
1185 unsigned long flags;
1186 struct page *page;
1187
1188 clear_bit(BH_Lock, &bh->b_state);
1189 wake_up(&bh->b_wait);
1190
1191 if (!test_bit(BH_FreeOnIO, &bh->b_state))
1192 return;
1193 page = mem_map + MAP_NR(bh->b_data);
1194 if (!page->locked) {
1195 printk ("Whoops: unlock_buffer: "
1196 "async io complete on unlocked page\n");
1197 return;
1198 }
1199 if (bh->b_count != 1) {
1200 printk ("Whoops: unlock_buffer: b_count != 1 on async io.\n");
1201 return;
1202 }
1203
1204
1205
1206
1207 bh->b_count--;
1208 for (tmp = bh; tmp=tmp->b_this_page, tmp!=bh; ) {
1209 if (test_bit(BH_Lock, &tmp->b_state) || tmp->b_count)
1210 return;
1211 }
1212
1213
1214 save_flags(flags);
1215 page->locked = 0;
1216 wake_up(&page->wait);
1217 cli();
1218 tmp = bh;
1219 do {
1220 if (!test_bit(BH_FreeOnIO, &tmp->b_state)) {
1221 printk ("Whoops: unlock_buffer: "
1222 "async IO mismatch on page.\n");
1223 restore_flags(flags);
1224 return;
1225 }
1226 tmp->b_next_free = reuse_list;
1227 reuse_list = tmp;
1228 clear_bit(BH_FreeOnIO, &tmp->b_state);
1229 tmp = tmp->b_this_page;
1230 } while (tmp != bh);
1231 restore_flags(flags);
1232 if (page->free_after) {
1233 extern int nr_async_pages;
1234 nr_async_pages--;
1235 page->free_after = 0;
1236 free_page(page_address(page));
1237 }
1238 wake_up(&buffer_wait);
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248 int generic_readpage(struct inode * inode, struct page * page)
1249 {
1250 unsigned long block, address;
1251 int *p, nr[PAGE_SIZE/512];
1252 int i;
1253
1254 address = page_address(page);
1255 page->count++;
1256 page->locked = 1;
1257
1258 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
1259 block = page->offset >> inode->i_sb->s_blocksize_bits;
1260 p = nr;
1261 do {
1262 *p = inode->i_op->bmap(inode, block);
1263 i--;
1264 block++;
1265 p++;
1266 } while (i > 0);
1267
1268
1269 brw_page(READ, address, inode->i_dev, nr, inode->i_sb->s_blocksize, 1);
1270 free_page(address);
1271 return 0;
1272 }
1273
1274
1275
1276
1277
1278 static int grow_buffers(int pri, int size)
1279 {
1280 unsigned long page;
1281 struct buffer_head *bh, *tmp;
1282 struct buffer_head * insert_point;
1283 int isize;
1284
1285 if ((size & 511) || (size > PAGE_SIZE)) {
1286 printk("VFS: grow_buffers: size = %d\n",size);
1287 return 0;
1288 }
1289
1290 isize = BUFSIZE_INDEX(size);
1291
1292 if (!(page = __get_free_page(pri)))
1293 return 0;
1294 bh = create_buffers(page, size);
1295 if (!bh) {
1296 free_page(page);
1297 return 0;
1298 }
1299
1300 insert_point = free_list[isize];
1301
1302 tmp = bh;
1303 while (1) {
1304 nr_free[isize]++;
1305 if (insert_point) {
1306 tmp->b_next_free = insert_point->b_next_free;
1307 tmp->b_prev_free = insert_point;
1308 insert_point->b_next_free->b_prev_free = tmp;
1309 insert_point->b_next_free = tmp;
1310 } else {
1311 tmp->b_prev_free = tmp;
1312 tmp->b_next_free = tmp;
1313 }
1314 insert_point = tmp;
1315 ++nr_buffers;
1316 if (tmp->b_this_page)
1317 tmp = tmp->b_this_page;
1318 else
1319 break;
1320 }
1321 free_list[isize] = bh;
1322 buffer_pages[MAP_NR(page)] = bh;
1323 tmp->b_this_page = bh;
1324 buffermem += PAGE_SIZE;
1325 return 1;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335 int try_to_free_buffer(struct buffer_head * bh, struct buffer_head ** bhp,
1336 int priority)
1337 {
1338 unsigned long page;
1339 struct buffer_head * tmp, * p;
1340 int isize = BUFSIZE_INDEX(bh->b_size);
1341
1342 *bhp = bh;
1343 page = (unsigned long) bh->b_data;
1344 page &= PAGE_MASK;
1345 tmp = bh;
1346 do {
1347 if (!tmp)
1348 return 0;
1349 if (tmp->b_count || buffer_protected(tmp) ||
1350 buffer_dirty(tmp) || buffer_locked(tmp) || tmp->b_wait)
1351 return 0;
1352 if (priority && buffer_touched(tmp))
1353 return 0;
1354 tmp = tmp->b_this_page;
1355 } while (tmp != bh);
1356 tmp = bh;
1357 do {
1358 p = tmp;
1359 tmp = tmp->b_this_page;
1360 nr_buffers--;
1361 nr_buffers_size[isize]--;
1362 if (p == *bhp)
1363 {
1364 *bhp = p->b_prev_free;
1365 if (p == *bhp)
1366 *bhp = NULL;
1367 }
1368 remove_from_queues(p);
1369 put_unused_buffer_head(p);
1370 } while (tmp != bh);
1371 buffermem -= PAGE_SIZE;
1372 buffer_pages[MAP_NR(page)] = NULL;
1373 free_page(page);
1374 return !mem_map[MAP_NR(page)].count;
1375 }
1376
1377
1378
1379 static inline void age_buffer(struct buffer_head *bh)
1380 {
1381 struct buffer_head *tmp = bh;
1382 int touched = 0;
1383
1384
1385
1386
1387
1388
1389
1390
1391 if (clear_bit(BH_Has_aged, &bh->b_state))
1392 return;
1393
1394 do {
1395 touched |= clear_bit(BH_Touched, &tmp->b_state);
1396 tmp = tmp->b_this_page;
1397 set_bit(BH_Has_aged, &tmp->b_state);
1398 } while (tmp != bh);
1399 clear_bit(BH_Has_aged, &bh->b_state);
1400
1401 if (touched)
1402 touch_page(mem_map + MAP_NR((unsigned long) bh->b_data));
1403 else
1404 age_page(mem_map + MAP_NR((unsigned long) bh->b_data));
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 static int maybe_shrink_lav_buffers(int size)
1420 {
1421 int nlist;
1422 int isize;
1423 int total_lav, total_n_buffers, n_sizes;
1424
1425
1426
1427
1428
1429
1430 total_lav = total_n_buffers = n_sizes = 0;
1431 for(nlist = 0; nlist < NR_SIZES; nlist++)
1432 {
1433 total_lav += buffers_lav[nlist];
1434 if(nr_buffers_size[nlist]) n_sizes++;
1435 total_n_buffers += nr_buffers_size[nlist];
1436 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1437 }
1438
1439
1440
1441
1442 isize = (size ? BUFSIZE_INDEX(size) : -1);
1443
1444 if (n_sizes > 1)
1445 for(nlist = 0; nlist < NR_SIZES; nlist++)
1446 {
1447 if(nlist == isize) continue;
1448 if(nr_buffers_size[nlist] &&
1449 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1450 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1451 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1452 return 1;
1453 }
1454 return 0;
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 static int shrink_specific_buffers(unsigned int priority, int size)
1470 {
1471 struct buffer_head *bh;
1472 int nlist;
1473 int i, isize, isize1;
1474
1475 #ifdef DEBUG
1476 if(size) printk("Shrinking buffers of size %d\n", size);
1477 #endif
1478
1479
1480 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1481
1482 for(isize = 0; isize<NR_SIZES; isize++){
1483 if(isize1 != -1 && isize1 != isize) continue;
1484 bh = free_list[isize];
1485 if(!bh) continue;
1486 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1487 if (bh->b_count || buffer_protected(bh) ||
1488 !bh->b_this_page)
1489 continue;
1490 if (!age_of((unsigned long) bh->b_data) &&
1491 try_to_free_buffer(bh, &bh, 6))
1492 return 1;
1493 if(!bh) break;
1494
1495
1496 }
1497 }
1498
1499
1500
1501 for(nlist = 0; nlist < NR_LIST; nlist++) {
1502 repeat1:
1503 if(priority > 2 && nlist == BUF_SHARED) continue;
1504 i = nr_buffers_type[nlist];
1505 i = ((BUFFEROUT_WEIGHT * i) >> 10) >> priority;
1506 for ( ; i > 0; i-- ) {
1507 bh = next_to_age[nlist];
1508 if (!bh)
1509 break;
1510 next_to_age[nlist] = bh->b_next_free;
1511
1512
1513 age_buffer(bh);
1514
1515
1516 if(bh->b_list != nlist) goto repeat1;
1517 if (bh->b_count || buffer_protected(bh) ||
1518 !bh->b_this_page)
1519 continue;
1520 if(size && bh->b_size != size) continue;
1521 if (buffer_locked(bh))
1522 if (priority)
1523 continue;
1524 else
1525 wait_on_buffer(bh);
1526 if (buffer_dirty(bh)) {
1527 bh->b_count++;
1528 bh->b_flushtime = 0;
1529 ll_rw_block(WRITEA, 1, &bh);
1530 bh->b_count--;
1531 continue;
1532 }
1533
1534
1535
1536 if ((age_of((unsigned long) bh->b_data) >>
1537 (6-priority)) > 0)
1538 continue;
1539 if (try_to_free_buffer(bh, &bh, 0))
1540 return 1;
1541 if(!bh) break;
1542 }
1543 }
1544 return 0;
1545 }
1546
1547
1548
1549
1550 void show_buffers(void)
1551 {
1552 struct buffer_head * bh;
1553 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1554 int protected = 0;
1555 int shared;
1556 int nlist, isize;
1557
1558 printk("Buffer memory: %6dkB\n",buffermem>>10);
1559 printk("Buffer heads: %6d\n",nr_buffer_heads);
1560 printk("Buffer blocks: %6d\n",nr_buffers);
1561
1562 for(nlist = 0; nlist < NR_LIST; nlist++) {
1563 shared = found = locked = dirty = used = lastused = protected = 0;
1564 bh = lru_list[nlist];
1565 if(!bh) continue;
1566 do {
1567 found++;
1568 if (buffer_locked(bh))
1569 locked++;
1570 if (buffer_protected(bh))
1571 protected++;
1572 if (buffer_dirty(bh))
1573 dirty++;
1574 if(mem_map[MAP_NR(((unsigned long) bh->b_data))].count !=1) shared++;
1575 if (bh->b_count)
1576 used++, lastused = found;
1577 bh = bh->b_next_free;
1578 } while (bh != lru_list[nlist]);
1579 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, "
1580 "%d protected, %d dirty %d shrd\n",
1581 nlist, found, used, lastused, locked, protected, dirty, shared);
1582 };
1583 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared \n");
1584 for(isize = 0; isize<NR_SIZES; isize++){
1585 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1586 buffers_lav[isize], nr_free[isize]);
1587 for(nlist = 0; nlist < NR_LIST; nlist++)
1588 printk("%7d ", nr_buffers_st[isize][nlist]);
1589 printk("\n");
1590 }
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1601 kdev_t dev, unsigned int starting_block)
1602 {
1603 unsigned long page;
1604 struct buffer_head * tmp, * p;
1605
1606 *bhp = bh;
1607 page = (unsigned long) bh->b_data;
1608 page &= PAGE_MASK;
1609 if(mem_map[MAP_NR(page)].count != 1) return 0;
1610 tmp = bh;
1611 do {
1612 if (!tmp)
1613 return 0;
1614
1615 if (tmp->b_count || buffer_protected(tmp) ||
1616 buffer_dirty(tmp) || buffer_locked(tmp))
1617 return 0;
1618 tmp = tmp->b_this_page;
1619 } while (tmp != bh);
1620 tmp = bh;
1621
1622 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1623 tmp = tmp->b_this_page;
1624
1625
1626 bh = tmp;
1627 do {
1628 p = tmp;
1629 tmp = tmp->b_this_page;
1630 remove_from_queues(p);
1631 p->b_dev = dev;
1632 mark_buffer_uptodate(p, 0);
1633 clear_bit(BH_Req, &p->b_state);
1634 p->b_blocknr = starting_block++;
1635 insert_into_queues(p);
1636 } while (tmp != bh);
1637 return 1;
1638 }
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 static int reassign_cluster(kdev_t dev,
1655 unsigned int starting_block, int size)
1656 {
1657 struct buffer_head *bh;
1658 int isize = BUFSIZE_INDEX(size);
1659 int i;
1660
1661
1662
1663
1664
1665 while(nr_free[isize] < 32) refill_freelist(size);
1666
1667 bh = free_list[isize];
1668 if(bh)
1669 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1670 if (!bh->b_this_page) continue;
1671 if (try_to_reassign(bh, &bh, dev, starting_block))
1672 return 4;
1673 }
1674 return 0;
1675 }
1676
1677
1678
1679
1680
1681 static unsigned long try_to_generate_cluster(kdev_t dev, int block, int size)
1682 {
1683 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1684 int isize = BUFSIZE_INDEX(size);
1685 unsigned long offset;
1686 unsigned long page;
1687 int nblock;
1688
1689 page = get_free_page(GFP_NOBUFFER);
1690 if(!page) return 0;
1691
1692 bh = create_buffers(page, size);
1693 if (!bh) {
1694 free_page(page);
1695 return 0;
1696 };
1697 nblock = block;
1698 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1699 if (find_buffer(dev, nblock++, size))
1700 goto not_aligned;
1701 }
1702 tmp = bh;
1703 nblock = 0;
1704 while (1) {
1705 arr[nblock++] = bh;
1706 bh->b_count = 1;
1707 bh->b_flushtime = 0;
1708 bh->b_state = 0;
1709 bh->b_dev = dev;
1710 bh->b_list = BUF_CLEAN;
1711 bh->b_blocknr = block++;
1712 nr_buffers++;
1713 nr_buffers_size[isize]++;
1714 insert_into_queues(bh);
1715 if (bh->b_this_page)
1716 bh = bh->b_this_page;
1717 else
1718 break;
1719 }
1720 buffermem += PAGE_SIZE;
1721 buffer_pages[MAP_NR(page)] = bh;
1722 bh->b_this_page = tmp;
1723 while (nblock-- > 0)
1724 brelse(arr[nblock]);
1725 return 4;
1726 not_aligned:
1727 while ((tmp = bh) != NULL) {
1728 bh = bh->b_this_page;
1729 put_unused_buffer_head(tmp);
1730 }
1731 free_page(page);
1732 return 0;
1733 }
1734
1735 unsigned long generate_cluster(kdev_t dev, int b[], int size)
1736 {
1737 int i, offset;
1738
1739 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1740 if(i && b[i]-1 != b[i-1]) return 0;
1741 if(find_buffer(dev, b[i], size)) return 0;
1742 };
1743
1744
1745
1746
1747
1748 if(maybe_shrink_lav_buffers(size))
1749 {
1750 int retval;
1751 retval = try_to_generate_cluster(dev, b[0], size);
1752 if(retval) return retval;
1753 };
1754
1755 if (nr_free_pages > min_free_pages*2)
1756 return try_to_generate_cluster(dev, b[0], size);
1757 else
1758 return reassign_cluster(dev, b[0], size);
1759 }
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 void buffer_init(void)
1772 {
1773 int i;
1774 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1775 long memsize = MAP_NR(high_memory) << PAGE_SHIFT;
1776
1777 if (memsize >= 4*1024*1024) {
1778 if(memsize >= 16*1024*1024)
1779 nr_hash = 16381;
1780 else
1781 nr_hash = 4093;
1782 } else {
1783 nr_hash = 997;
1784 };
1785
1786 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1787 sizeof(struct buffer_head *));
1788
1789
1790 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1791 sizeof(struct buffer_head *));
1792 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1793 buffer_pages[i] = NULL;
1794
1795 for (i = 0 ; i < nr_hash ; i++)
1796 hash_table[i] = NULL;
1797 lru_list[BUF_CLEAN] = 0;
1798 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1799 if (!free_list[isize])
1800 panic("VFS: Unable to initialize buffer free list!");
1801 return;
1802 }
1803
1804
1805
1806
1807
1808
1809
1810
1811 struct wait_queue * bdflush_wait = NULL;
1812 struct wait_queue * bdflush_done = NULL;
1813
1814 static void wakeup_bdflush(int wait)
1815 {
1816 wake_up(&bdflush_wait);
1817 if(wait) sleep_on(&bdflush_done);
1818 }
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 asmlinkage int sync_old_buffers(void)
1830 {
1831 int i, isize;
1832 int ndirty, nwritten;
1833 int nlist;
1834 int ncount;
1835 struct buffer_head * bh, *next;
1836
1837 sync_supers(0);
1838 sync_inodes(0);
1839
1840 ncount = 0;
1841 #ifdef DEBUG
1842 for(nlist = 0; nlist < NR_LIST; nlist++)
1843 #else
1844 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1845 #endif
1846 {
1847 ndirty = 0;
1848 nwritten = 0;
1849 repeat:
1850 bh = lru_list[nlist];
1851 if(bh)
1852 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1853
1854 if(bh->b_list != nlist) goto repeat;
1855 next = bh->b_next_free;
1856 if(!lru_list[nlist]) {
1857 printk("Dirty list empty %d\n", i);
1858 break;
1859 }
1860
1861
1862 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1863 {
1864 refile_buffer(bh);
1865 continue;
1866 }
1867
1868 if (buffer_locked(bh) || !buffer_dirty(bh))
1869 continue;
1870 ndirty++;
1871 if(bh->b_flushtime > jiffies) continue;
1872 nwritten++;
1873 bh->b_count++;
1874 bh->b_flushtime = 0;
1875 #ifdef DEBUG
1876 if(nlist != BUF_DIRTY) ncount++;
1877 #endif
1878 ll_rw_block(WRITE, 1, &bh);
1879 bh->b_count--;
1880 }
1881 }
1882 #ifdef DEBUG
1883 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1884 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1885 #endif
1886
1887
1888
1889
1890 for(isize = 0; isize<NR_SIZES; isize++){
1891 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1892 buffer_usage[isize] = 0;
1893 };
1894 return 0;
1895 }
1896
1897
1898
1899
1900
1901
1902
1903 asmlinkage int sys_bdflush(int func, long data)
1904 {
1905 int i, error;
1906
1907 if (!suser())
1908 return -EPERM;
1909
1910 if (func == 1)
1911 return sync_old_buffers();
1912
1913
1914 if (func >= 2) {
1915 i = (func-2) >> 1;
1916 if (i < 0 || i >= N_PARAM)
1917 return -EINVAL;
1918 if((func & 1) == 0) {
1919 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1920 if (error)
1921 return error;
1922 put_user(bdf_prm.data[i], (int*)data);
1923 return 0;
1924 };
1925 if (data < bdflush_min[i] || data > bdflush_max[i])
1926 return -EINVAL;
1927 bdf_prm.data[i] = data;
1928 return 0;
1929 };
1930
1931
1932
1933
1934
1935 return 0;
1936 }
1937
1938
1939
1940
1941
1942 int bdflush(void * unused)
1943 {
1944 int i;
1945 int ndirty;
1946 int nlist;
1947 int ncount;
1948 struct buffer_head * bh, *next;
1949
1950
1951
1952
1953
1954
1955
1956 current->session = 1;
1957 current->pgrp = 1;
1958 sprintf(current->comm, "kflushd");
1959
1960
1961
1962
1963
1964
1965
1966 #ifdef __SMP__
1967 lock_kernel();
1968 syscall_count++;
1969 #endif
1970
1971 for (;;) {
1972 #ifdef DEBUG
1973 printk("bdflush() activated...");
1974 #endif
1975
1976 ncount = 0;
1977 #ifdef DEBUG
1978 for(nlist = 0; nlist < NR_LIST; nlist++)
1979 #else
1980 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1981 #endif
1982 {
1983 ndirty = 0;
1984 repeat:
1985 bh = lru_list[nlist];
1986 if(bh)
1987 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1988 bh = next) {
1989
1990 if(bh->b_list != nlist) goto repeat;
1991 next = bh->b_next_free;
1992 if(!lru_list[nlist]) {
1993 printk("Dirty list empty %d\n", i);
1994 break;
1995 }
1996
1997
1998 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1999 {
2000 refile_buffer(bh);
2001 continue;
2002 }
2003
2004 if (buffer_locked(bh) || !buffer_dirty(bh))
2005 continue;
2006
2007
2008 bh->b_count++;
2009 ndirty++;
2010 bh->b_flushtime = 0;
2011 ll_rw_block(WRITE, 1, &bh);
2012 #ifdef DEBUG
2013 if(nlist != BUF_DIRTY) ncount++;
2014 #endif
2015 bh->b_count--;
2016 }
2017 }
2018 #ifdef DEBUG
2019 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
2020 printk("sleeping again.\n");
2021 #endif
2022 wake_up(&bdflush_done);
2023
2024
2025
2026
2027 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
2028 bdf_prm.b_un.nfract/100) {
2029 current->signal = 0;
2030 interruptible_sleep_on(&bdflush_wait);
2031 }
2032 }
2033 }
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051