This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- __brelse
- __bforget
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- bread_page
- bwrite_page
- grow_buffers
- try_to_free
- age_buffer
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
- bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26 #include <linux/errno.h>
27 #include <linux/malloc.h>
28 #include <linux/swapctl.h>
29
30 #include <asm/system.h>
31 #include <asm/segment.h>
32 #include <asm/io.h>
33
34 #define NR_SIZES 4
35 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
36 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
37
38 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
39 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
40
41 static int grow_buffers(int pri, int size);
42 static int shrink_specific_buffers(unsigned int priority, int size);
43 static int maybe_shrink_lav_buffers(int);
44
45 static int nr_hash = 0;
46 static struct buffer_head ** hash_table;
47 struct buffer_head ** buffer_pages;
48 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
49
50
51
52 static struct buffer_head * next_to_age[NR_LIST] = {NULL, };
53 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
54 static struct buffer_head * unused_list = NULL;
55 static struct wait_queue * buffer_wait = NULL;
56
57 int nr_buffers = 0;
58 int nr_buffers_type[NR_LIST] = {0,};
59 int nr_buffers_size[NR_SIZES] = {0,};
60 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
61 int buffer_usage[NR_SIZES] = {0,};
62 int buffers_lav[NR_SIZES] = {0,};
63 int nr_free[NR_SIZES] = {0,};
64 int buffermem = 0;
65 int nr_buffer_heads = 0;
66 extern int *blksize_size[];
67
68
69 static void wakeup_bdflush(int);
70
71 #define N_PARAM 9
72 #define LAV
73
74 static union bdflush_param{
75 struct {
76 int nfract;
77
78 int ndirty;
79
80 int nrefill;
81
82 int nref_dirt;
83
84 int clu_nfract;
85
86 int age_buffer;
87
88 int age_super;
89
90 int lav_const;
91
92 int lav_ratio;
93
94
95 } b_un;
96 unsigned int data[N_PARAM];
97 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
98
99
100
101
102
103
104
105 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
106 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
107
108
109
110
111
112
113
114
115
116
117 void __wait_on_buffer(struct buffer_head * bh)
118 {
119 struct wait_queue wait = { current, NULL };
120
121 bh->b_count++;
122 add_wait_queue(&bh->b_wait, &wait);
123 repeat:
124 current->state = TASK_UNINTERRUPTIBLE;
125 if (buffer_locked(bh)) {
126 schedule();
127 goto repeat;
128 }
129 remove_wait_queue(&bh->b_wait, &wait);
130 bh->b_count--;
131 current->state = TASK_RUNNING;
132 }
133
134
135
136
137
138
139
140
141
142
143
144 static int sync_buffers(kdev_t dev, int wait)
145 {
146 int i, retry, pass = 0, err = 0;
147 int nlist, ncount;
148 struct buffer_head * bh, *next;
149
150
151
152
153
154 repeat:
155 retry = 0;
156 repeat2:
157 ncount = 0;
158
159
160 for(nlist = 0; nlist < NR_LIST; nlist++)
161 {
162 repeat1:
163 bh = lru_list[nlist];
164 if(!bh) continue;
165 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
166 if(bh->b_list != nlist) goto repeat1;
167 next = bh->b_next_free;
168 if(!lru_list[nlist]) break;
169 if (dev && bh->b_dev != dev)
170 continue;
171 if (buffer_locked(bh))
172 {
173
174
175 if (!wait || !pass) {
176 retry = 1;
177 continue;
178 }
179 wait_on_buffer (bh);
180 goto repeat2;
181 }
182
183
184 if (wait && buffer_req(bh) && !buffer_locked(bh) &&
185 !buffer_dirty(bh) && !buffer_uptodate(bh)) {
186 err = 1;
187 continue;
188 }
189
190
191 if (!buffer_dirty(bh) || pass>=2)
192 continue;
193
194 if (buffer_locked(bh))
195 continue;
196 bh->b_count++;
197 bh->b_flushtime = 0;
198 ll_rw_block(WRITE, 1, &bh);
199
200 if(nlist != BUF_DIRTY) {
201 printk("[%d %s %ld] ", nlist,
202 kdevname(bh->b_dev), bh->b_blocknr);
203 ncount++;
204 };
205 bh->b_count--;
206 retry = 1;
207 }
208 }
209 if (ncount)
210 printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
211
212
213
214
215
216 if (wait && retry && ++pass<=2)
217 goto repeat;
218 return err;
219 }
220
221 void sync_dev(kdev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 sync_buffers(dev, 0);
227 sync_dquots(dev, -1);
228 }
229
230 int fsync_dev(kdev_t dev)
231 {
232 sync_buffers(dev, 0);
233 sync_supers(dev);
234 sync_inodes(dev);
235 sync_dquots(dev, -1);
236 return sync_buffers(dev, 1);
237 }
238
239 asmlinkage int sys_sync(void)
240 {
241 fsync_dev(0);
242 return 0;
243 }
244
245 int file_fsync (struct inode *inode, struct file *filp)
246 {
247 return fsync_dev(inode->i_dev);
248 }
249
250 asmlinkage int sys_fsync(unsigned int fd)
251 {
252 struct file * file;
253 struct inode * inode;
254
255 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
256 return -EBADF;
257 if (!file->f_op || !file->f_op->fsync)
258 return -EINVAL;
259 if (file->f_op->fsync(inode,file))
260 return -EIO;
261 return 0;
262 }
263
264 void invalidate_buffers(kdev_t dev)
265 {
266 int i;
267 int nlist;
268 struct buffer_head * bh;
269
270 for(nlist = 0; nlist < NR_LIST; nlist++) {
271 bh = lru_list[nlist];
272 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
273 if (bh->b_dev != dev)
274 continue;
275 wait_on_buffer(bh);
276 if (bh->b_dev != dev)
277 continue;
278 if (bh->b_count)
279 continue;
280 bh->b_flushtime = 0;
281 clear_bit(BH_Uptodate, &bh->b_state);
282 clear_bit(BH_Dirty, &bh->b_state);
283 clear_bit(BH_Req, &bh->b_state);
284 }
285 }
286 }
287
288 #define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))%nr_hash)
289 #define hash(dev,block) hash_table[_hashfn(dev,block)]
290
291 static inline void remove_from_hash_queue(struct buffer_head * bh)
292 {
293 if (bh->b_next)
294 bh->b_next->b_prev = bh->b_prev;
295 if (bh->b_prev)
296 bh->b_prev->b_next = bh->b_next;
297 if (hash(bh->b_dev,bh->b_blocknr) == bh)
298 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
299 bh->b_next = bh->b_prev = NULL;
300 }
301
302 static inline void remove_from_lru_list(struct buffer_head * bh)
303 {
304 if (!(bh->b_prev_free) || !(bh->b_next_free))
305 panic("VFS: LRU block list corrupted");
306 if (bh->b_dev == B_FREE)
307 panic("LRU list corrupted");
308 bh->b_prev_free->b_next_free = bh->b_next_free;
309 bh->b_next_free->b_prev_free = bh->b_prev_free;
310
311 if (lru_list[bh->b_list] == bh)
312 lru_list[bh->b_list] = bh->b_next_free;
313 if (lru_list[bh->b_list] == bh)
314 lru_list[bh->b_list] = NULL;
315 if (next_to_age[bh->b_list] == bh)
316 next_to_age[bh->b_list] = bh->b_next_free;
317 if (next_to_age[bh->b_list] == bh)
318 next_to_age[bh->b_list] = NULL;
319
320 bh->b_next_free = bh->b_prev_free = NULL;
321 }
322
323 static inline void remove_from_free_list(struct buffer_head * bh)
324 {
325 int isize = BUFSIZE_INDEX(bh->b_size);
326 if (!(bh->b_prev_free) || !(bh->b_next_free))
327 panic("VFS: Free block list corrupted");
328 if(bh->b_dev != B_FREE)
329 panic("Free list corrupted");
330 if(!free_list[isize])
331 panic("Free list empty");
332 nr_free[isize]--;
333 if(bh->b_next_free == bh)
334 free_list[isize] = NULL;
335 else {
336 bh->b_prev_free->b_next_free = bh->b_next_free;
337 bh->b_next_free->b_prev_free = bh->b_prev_free;
338 if (free_list[isize] == bh)
339 free_list[isize] = bh->b_next_free;
340 };
341 bh->b_next_free = bh->b_prev_free = NULL;
342 }
343
344 static inline void remove_from_queues(struct buffer_head * bh)
345 {
346 if(bh->b_dev == B_FREE) {
347 remove_from_free_list(bh);
348
349 return;
350 };
351 nr_buffers_type[bh->b_list]--;
352 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
353 remove_from_hash_queue(bh);
354 remove_from_lru_list(bh);
355 }
356
357 static inline void put_last_lru(struct buffer_head * bh)
358 {
359 if (!bh)
360 return;
361 if (bh == lru_list[bh->b_list]) {
362 lru_list[bh->b_list] = bh->b_next_free;
363 if (next_to_age[bh->b_list] == bh)
364 next_to_age[bh->b_list] = bh->b_next_free;
365 return;
366 }
367 if(bh->b_dev == B_FREE)
368 panic("Wrong block for lru list");
369 remove_from_lru_list(bh);
370
371
372 if(!lru_list[bh->b_list]) {
373 lru_list[bh->b_list] = bh;
374 lru_list[bh->b_list]->b_prev_free = bh;
375 };
376 if (!next_to_age[bh->b_list])
377 next_to_age[bh->b_list] = bh;
378
379 bh->b_next_free = lru_list[bh->b_list];
380 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
381 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
382 lru_list[bh->b_list]->b_prev_free = bh;
383 }
384
385 static inline void put_last_free(struct buffer_head * bh)
386 {
387 int isize;
388 if (!bh)
389 return;
390
391 isize = BUFSIZE_INDEX(bh->b_size);
392 bh->b_dev = B_FREE;
393
394 if(!free_list[isize]) {
395 free_list[isize] = bh;
396 bh->b_prev_free = bh;
397 };
398
399 nr_free[isize]++;
400 bh->b_next_free = free_list[isize];
401 bh->b_prev_free = free_list[isize]->b_prev_free;
402 free_list[isize]->b_prev_free->b_next_free = bh;
403 free_list[isize]->b_prev_free = bh;
404 }
405
406 static inline void insert_into_queues(struct buffer_head * bh)
407 {
408
409 if(bh->b_dev == B_FREE) {
410 put_last_free(bh);
411 return;
412 }
413 if(!lru_list[bh->b_list]) {
414 lru_list[bh->b_list] = bh;
415 bh->b_prev_free = bh;
416 }
417 if (!next_to_age[bh->b_list])
418 next_to_age[bh->b_list] = bh;
419 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
420 bh->b_next_free = lru_list[bh->b_list];
421 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
422 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
423 lru_list[bh->b_list]->b_prev_free = bh;
424 nr_buffers_type[bh->b_list]++;
425 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
426
427 bh->b_prev = NULL;
428 bh->b_next = NULL;
429 if (!(bh->b_dev))
430 return;
431 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
432 hash(bh->b_dev,bh->b_blocknr) = bh;
433 if (bh->b_next)
434 bh->b_next->b_prev = bh;
435 }
436
437 static inline struct buffer_head * find_buffer(kdev_t dev, int block, int size)
438 {
439 struct buffer_head * tmp;
440
441 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
442 if (tmp->b_dev == dev && tmp->b_blocknr == block)
443 if (tmp->b_size == size)
444 return tmp;
445 else {
446 printk("VFS: Wrong blocksize on device %s\n",
447 kdevname(dev));
448 return NULL;
449 }
450 return NULL;
451 }
452
453
454
455
456
457
458
459
460 struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
461 {
462 struct buffer_head * bh;
463
464 for (;;) {
465 if (!(bh=find_buffer(dev,block,size)))
466 return NULL;
467 bh->b_count++;
468 wait_on_buffer(bh);
469 if (bh->b_dev == dev && bh->b_blocknr == block
470 && bh->b_size == size)
471 return bh;
472 bh->b_count--;
473 }
474 }
475
476 void set_blocksize(kdev_t dev, int size)
477 {
478 int i, nlist;
479 struct buffer_head * bh, *bhnext;
480
481 if (!blksize_size[MAJOR(dev)])
482 return;
483
484 switch(size) {
485 default: panic("Invalid blocksize passed to set_blocksize");
486 case 512: case 1024: case 2048: case 4096:;
487 }
488
489 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
490 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
491 return;
492 }
493 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
494 return;
495 sync_buffers(dev, 2);
496 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
497
498
499
500
501 for(nlist = 0; nlist < NR_LIST; nlist++) {
502 bh = lru_list[nlist];
503 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
504 if(!bh) break;
505 bhnext = bh->b_next_free;
506 if (bh->b_dev != dev)
507 continue;
508 if (bh->b_size == size)
509 continue;
510
511 wait_on_buffer(bh);
512 if (bh->b_dev == dev && bh->b_size != size) {
513 clear_bit(BH_Dirty, &bh->b_state);
514 clear_bit(BH_Uptodate, &bh->b_state);
515 clear_bit(BH_Req, &bh->b_state);
516 bh->b_flushtime = 0;
517 }
518 remove_from_hash_queue(bh);
519 }
520 }
521 }
522
523 #define BADNESS(bh) (buffer_dirty(bh) || buffer_locked(bh))
524
525 void refill_freelist(int size)
526 {
527 struct buffer_head * bh, * tmp;
528 struct buffer_head * candidate[NR_LIST];
529 unsigned int best_time, winner;
530 int isize = BUFSIZE_INDEX(size);
531 int buffers[NR_LIST];
532 int i;
533 int needed;
534
535
536
537
538
539 if (nr_free[isize] > 100)
540 return;
541
542
543
544
545
546
547 needed =bdf_prm.b_un.nrefill * size;
548
549 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
550 grow_buffers(GFP_BUFFER, size)) {
551 needed -= PAGE_SIZE;
552 }
553
554 if(needed <= 0) return;
555
556
557
558
559 while(maybe_shrink_lav_buffers(size))
560 {
561 if(!grow_buffers(GFP_BUFFER, size)) break;
562 needed -= PAGE_SIZE;
563 if(needed <= 0) return;
564 };
565
566
567
568
569
570
571
572 repeat0:
573 for(i=0; i<NR_LIST; i++){
574 if(i == BUF_DIRTY || i == BUF_SHARED ||
575 nr_buffers_type[i] == 0) {
576 candidate[i] = NULL;
577 buffers[i] = 0;
578 continue;
579 }
580 buffers[i] = nr_buffers_type[i];
581 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
582 {
583 if(buffers[i] < 0) panic("Here is the problem");
584 tmp = bh->b_next_free;
585 if (!bh) break;
586
587 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
588 buffer_dirty(bh)) {
589 refile_buffer(bh);
590 continue;
591 }
592
593 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
594 continue;
595
596
597
598
599
600 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
601 buffers[i] = 0;
602 break;
603 }
604
605 if (BADNESS(bh)) continue;
606 break;
607 };
608 if(!buffers[i]) candidate[i] = NULL;
609 else candidate[i] = bh;
610 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
611 }
612
613 repeat:
614 if(needed <= 0) return;
615
616
617
618 winner = best_time = UINT_MAX;
619 for(i=0; i<NR_LIST; i++){
620 if(!candidate[i]) continue;
621 if(candidate[i]->b_lru_time < best_time){
622 best_time = candidate[i]->b_lru_time;
623 winner = i;
624 }
625 }
626
627
628 if(winner != UINT_MAX) {
629 i = winner;
630 bh = candidate[i];
631 candidate[i] = bh->b_next_free;
632 if(candidate[i] == bh) candidate[i] = NULL;
633 if (bh->b_count || bh->b_size != size)
634 panic("Busy buffer in candidate list\n");
635 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1)
636 panic("Shared buffer in candidate list\n");
637 if (buffer_protected(bh))
638 panic("Protected buffer in candidate list\n");
639 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
640
641 if(bh->b_dev == B_FREE)
642 panic("Wrong list");
643 remove_from_queues(bh);
644 bh->b_dev = B_FREE;
645 put_last_free(bh);
646 needed -= bh->b_size;
647 buffers[i]--;
648 if(buffers[i] < 0) panic("Here is the problem");
649
650 if(buffers[i] == 0) candidate[i] = NULL;
651
652
653
654 if(candidate[i] && buffers[i] > 0){
655 if(buffers[i] <= 0) panic("Here is another problem");
656 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
657 if(buffers[i] < 0) panic("Here is the problem");
658 tmp = bh->b_next_free;
659 if (!bh) break;
660
661 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
662 buffer_dirty(bh)) {
663 refile_buffer(bh);
664 continue;
665 };
666
667 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
668 continue;
669
670
671
672
673
674 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
675 buffers[i] = 0;
676 break;
677 }
678
679 if (BADNESS(bh)) continue;
680 break;
681 };
682 if(!buffers[i]) candidate[i] = NULL;
683 else candidate[i] = bh;
684 if(candidate[i] && candidate[i]->b_count)
685 panic("Here is the problem");
686 }
687
688 goto repeat;
689 }
690
691 if(needed <= 0) return;
692
693
694
695 if (nr_free_pages > min_free_pages + 5) {
696 if (grow_buffers(GFP_BUFFER, size)) {
697 needed -= PAGE_SIZE;
698 goto repeat0;
699 };
700 }
701
702
703 if (!grow_buffers(GFP_ATOMIC, size))
704 wakeup_bdflush(1);
705 needed -= PAGE_SIZE;
706 goto repeat0;
707 }
708
709
710
711
712
713
714
715
716
717
718
719 struct buffer_head * getblk(kdev_t dev, int block, int size)
720 {
721 struct buffer_head * bh;
722 int isize = BUFSIZE_INDEX(size);
723
724
725 buffer_usage[isize]++;
726
727
728
729
730 repeat:
731 bh = get_hash_table(dev, block, size);
732 if (bh) {
733 if (!buffer_dirty(bh)) {
734 if (buffer_uptodate(bh))
735 put_last_lru(bh);
736 bh->b_flushtime = 0;
737 }
738 set_bit(BH_Touched, &bh->b_state);
739 return bh;
740 }
741
742 while(!free_list[isize]) refill_freelist(size);
743
744 if (find_buffer(dev,block,size))
745 goto repeat;
746
747 bh = free_list[isize];
748 remove_from_free_list(bh);
749
750
751
752 bh->b_count=1;
753 bh->b_flushtime=0;
754 bh->b_state=(1<<BH_Touched);
755 bh->b_dev=dev;
756 bh->b_blocknr=block;
757 insert_into_queues(bh);
758 return bh;
759 }
760
761 void set_writetime(struct buffer_head * buf, int flag)
762 {
763 int newtime;
764
765 if (buffer_dirty(buf)) {
766
767 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
768 bdf_prm.b_un.age_buffer);
769 if(!buf->b_flushtime || buf->b_flushtime > newtime)
770 buf->b_flushtime = newtime;
771 } else {
772 buf->b_flushtime = 0;
773 }
774 }
775
776
777 void refile_buffer(struct buffer_head * buf)
778 {
779 int dispose;
780
781 if(buf->b_dev == B_FREE) {
782 printk("Attempt to refile free buffer\n");
783 return;
784 }
785 if (buffer_dirty(buf))
786 dispose = BUF_DIRTY;
787 else if ((mem_map[MAP_NR((unsigned long) buf->b_data)].count > 1) || buffer_protected(buf))
788 dispose = BUF_SHARED;
789 else if (buffer_locked(buf))
790 dispose = BUF_LOCKED;
791 else if (buf->b_list == BUF_SHARED)
792 dispose = BUF_UNSHARED;
793 else
794 dispose = BUF_CLEAN;
795 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
796 if(dispose != buf->b_list) {
797 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
798 buf->b_lru_time = jiffies;
799 if(dispose == BUF_LOCKED &&
800 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
801 dispose = BUF_LOCKED1;
802 remove_from_queues(buf);
803 buf->b_list = dispose;
804 insert_into_queues(buf);
805 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
806 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
807 bdf_prm.b_un.nfract/100)
808 wakeup_bdflush(0);
809 }
810 }
811
812
813
814
815 void __brelse(struct buffer_head * buf)
816 {
817 wait_on_buffer(buf);
818
819
820 set_writetime(buf, 0);
821 refile_buffer(buf);
822
823 if (buf->b_count) {
824 if (!--buf->b_count)
825 wake_up(&buffer_wait);
826 return;
827 }
828 printk("VFS: brelse: Trying to free free buffer\n");
829 }
830
831
832
833
834
835
836 void __bforget(struct buffer_head * buf)
837 {
838 wait_on_buffer(buf);
839 mark_buffer_clean(buf);
840 buf->b_count--;
841 remove_from_hash_queue(buf);
842 buf->b_dev = NODEV;
843 refile_buffer(buf);
844 wake_up(&buffer_wait);
845 }
846
847
848
849
850
851 struct buffer_head * bread(kdev_t dev, int block, int size)
852 {
853 struct buffer_head * bh;
854
855 if (!(bh = getblk(dev, block, size))) {
856 printk("VFS: bread: READ error on device %s\n",
857 kdevname(dev));
858 return NULL;
859 }
860 if (buffer_uptodate(bh))
861 return bh;
862 ll_rw_block(READ, 1, &bh);
863 wait_on_buffer(bh);
864 if (buffer_uptodate(bh))
865 return bh;
866 brelse(bh);
867 return NULL;
868 }
869
870
871
872
873
874
875
876 #define NBUF 16
877
878 struct buffer_head * breada(kdev_t dev, int block, int bufsize,
879 unsigned int pos, unsigned int filesize)
880 {
881 struct buffer_head * bhlist[NBUF];
882 unsigned int blocks;
883 struct buffer_head * bh;
884 int index;
885 int i, j;
886
887 if (pos >= filesize)
888 return NULL;
889
890 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
891 return NULL;
892
893 index = BUFSIZE_INDEX(bh->b_size);
894
895 if (buffer_uptodate(bh))
896 return bh;
897
898 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
899
900 if (blocks > (read_ahead[MAJOR(dev)] >> index))
901 blocks = read_ahead[MAJOR(dev)] >> index;
902 if (blocks > NBUF)
903 blocks = NBUF;
904
905 bhlist[0] = bh;
906 j = 1;
907 for(i=1; i<blocks; i++) {
908 bh = getblk(dev,block+i,bufsize);
909 if (buffer_uptodate(bh)) {
910 brelse(bh);
911 break;
912 }
913 bhlist[j++] = bh;
914 }
915
916
917 ll_rw_block(READ, j, bhlist);
918
919 for(i=1; i<j; i++)
920 brelse(bhlist[i]);
921
922
923 bh = bhlist[0];
924 wait_on_buffer(bh);
925 if (buffer_uptodate(bh))
926 return bh;
927 brelse(bh);
928 return NULL;
929 }
930
931
932
933
934 static void put_unused_buffer_head(struct buffer_head * bh)
935 {
936 struct wait_queue * wait;
937
938 wait = ((volatile struct buffer_head *) bh)->b_wait;
939 memset(bh,0,sizeof(*bh));
940 ((volatile struct buffer_head *) bh)->b_wait = wait;
941 bh->b_next_free = unused_list;
942 unused_list = bh;
943 }
944
945 static void get_more_buffer_heads(void)
946 {
947 int i;
948 struct buffer_head * bh;
949
950 if (unused_list)
951 return;
952
953 if (!(bh = (struct buffer_head*) get_free_page(GFP_KERNEL)))
954 return;
955
956 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
957 bh->b_next_free = unused_list;
958 unused_list = bh++;
959 }
960 }
961
962 static struct buffer_head * get_unused_buffer_head(void)
963 {
964 struct buffer_head * bh;
965
966 get_more_buffer_heads();
967 if (!unused_list)
968 return NULL;
969 bh = unused_list;
970 unused_list = bh->b_next_free;
971 bh->b_next_free = NULL;
972 bh->b_data = NULL;
973 bh->b_size = 0;
974 bh->b_state = 0;
975 return bh;
976 }
977
978
979
980
981
982
983
984 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
985 {
986 struct buffer_head *bh, *head;
987 unsigned long offset;
988
989 head = NULL;
990 offset = PAGE_SIZE;
991 while ((offset -= size) < PAGE_SIZE) {
992 bh = get_unused_buffer_head();
993 if (!bh)
994 goto no_grow;
995 bh->b_this_page = head;
996 head = bh;
997 bh->b_data = (char *) (page+offset);
998 bh->b_size = size;
999 bh->b_dev = B_FREE;
1000 }
1001 return head;
1002
1003
1004
1005 no_grow:
1006 bh = head;
1007 while (bh) {
1008 head = bh;
1009 bh = bh->b_this_page;
1010 put_unused_buffer_head(head);
1011 }
1012 return NULL;
1013 }
1014
1015 static void read_buffers(struct buffer_head * bh[], int nrbuf)
1016 {
1017 ll_rw_block(READ, nrbuf, bh);
1018 bh += nrbuf;
1019 do {
1020 nrbuf--;
1021 bh--;
1022 wait_on_buffer(*bh);
1023 } while (nrbuf > 0);
1024 }
1025
1026 int bread_page(unsigned long address, kdev_t dev, int b[], int size)
1027 {
1028 struct buffer_head *bh, *next, *arr[MAX_BUF_PER_PAGE];
1029 int block, nr;
1030
1031 bh = create_buffers(address, size);
1032 if (!bh)
1033 return -ENOMEM;
1034 nr = 0;
1035 next = bh;
1036 do {
1037 struct buffer_head * tmp;
1038 block = *(b++);
1039 if (!block) {
1040 memset(next->b_data, 0, size);
1041 continue;
1042 }
1043 tmp = get_hash_table(dev, block, size);
1044 if (tmp) {
1045 memcpy(next->b_data, tmp->b_data, size);
1046 brelse(tmp);
1047 continue;
1048 }
1049 arr[nr++] = next;
1050 next->b_dev = dev;
1051 next->b_blocknr = block;
1052 next->b_count = 1;
1053 next->b_flushtime = 0;
1054 clear_bit(BH_Dirty, &next->b_state);
1055 clear_bit(BH_Uptodate, &next->b_state);
1056 clear_bit(BH_Req, &next->b_state);
1057 next->b_list = BUF_CLEAN;
1058 } while ((next = next->b_this_page) != NULL);
1059
1060 if (nr)
1061 read_buffers(arr,nr);
1062 ++current->maj_flt;
1063
1064 while ((next = bh) != NULL) {
1065 bh = bh->b_this_page;
1066 put_unused_buffer_head(next);
1067 }
1068 return 0;
1069 }
1070
1071 #if 0
1072
1073
1074
1075
1076 void bwrite_page(unsigned long address, kdev_t dev, int b[], int size)
1077 {
1078 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1079 int i, j;
1080
1081 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1082 bh[i] = NULL;
1083 if (b[i])
1084 bh[i] = getblk(dev, b[i], size);
1085 }
1086 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, address += size) {
1087 if (bh[i]) {
1088 memcpy(bh[i]->b_data, (void *) address, size);
1089 mark_buffer_uptodate(bh[i], 1);
1090 mark_buffer_dirty(bh[i], 0);
1091 brelse(bh[i]);
1092 } else
1093 memset((void *) address, 0, size);
1094 }
1095 }
1096 #endif
1097
1098
1099
1100
1101
1102 static int grow_buffers(int pri, int size)
1103 {
1104 unsigned long page;
1105 struct buffer_head *bh, *tmp;
1106 struct buffer_head * insert_point;
1107 int isize;
1108
1109 if ((size & 511) || (size > PAGE_SIZE)) {
1110 printk("VFS: grow_buffers: size = %d\n",size);
1111 return 0;
1112 }
1113
1114 isize = BUFSIZE_INDEX(size);
1115
1116 if (!(page = __get_free_page(pri)))
1117 return 0;
1118 bh = create_buffers(page, size);
1119 if (!bh) {
1120 free_page(page);
1121 return 0;
1122 }
1123
1124 insert_point = free_list[isize];
1125
1126 tmp = bh;
1127 while (1) {
1128 nr_free[isize]++;
1129 if (insert_point) {
1130 tmp->b_next_free = insert_point->b_next_free;
1131 tmp->b_prev_free = insert_point;
1132 insert_point->b_next_free->b_prev_free = tmp;
1133 insert_point->b_next_free = tmp;
1134 } else {
1135 tmp->b_prev_free = tmp;
1136 tmp->b_next_free = tmp;
1137 }
1138 insert_point = tmp;
1139 ++nr_buffers;
1140 if (tmp->b_this_page)
1141 tmp = tmp->b_this_page;
1142 else
1143 break;
1144 }
1145 free_list[isize] = bh;
1146 buffer_pages[MAP_NR(page)] = bh;
1147 tmp->b_this_page = bh;
1148 wake_up(&buffer_wait);
1149 buffermem += PAGE_SIZE;
1150 return 1;
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp,
1161 int priority)
1162 {
1163 unsigned long page;
1164 struct buffer_head * tmp, * p;
1165 int isize = BUFSIZE_INDEX(bh->b_size);
1166
1167 *bhp = bh;
1168 page = (unsigned long) bh->b_data;
1169 page &= PAGE_MASK;
1170 tmp = bh;
1171 do {
1172 if (!tmp)
1173 return 0;
1174 if (tmp->b_count || buffer_protected(tmp) ||
1175 buffer_dirty(tmp) || buffer_locked(tmp) || tmp->b_wait)
1176 return 0;
1177 if (priority && buffer_touched(tmp))
1178 return 0;
1179 tmp = tmp->b_this_page;
1180 } while (tmp != bh);
1181 tmp = bh;
1182 do {
1183 p = tmp;
1184 tmp = tmp->b_this_page;
1185 nr_buffers--;
1186 nr_buffers_size[isize]--;
1187 if (p == *bhp)
1188 {
1189 *bhp = p->b_prev_free;
1190 if (p == *bhp)
1191 *bhp = NULL;
1192 }
1193 remove_from_queues(p);
1194 put_unused_buffer_head(p);
1195 } while (tmp != bh);
1196 buffermem -= PAGE_SIZE;
1197 buffer_pages[MAP_NR(page)] = NULL;
1198 free_page(page);
1199 return !mem_map[MAP_NR(page)].count;
1200 }
1201
1202
1203
1204 static inline void age_buffer(struct buffer_head *bh)
1205 {
1206 struct buffer_head *tmp = bh;
1207 int touched = 0;
1208
1209
1210
1211
1212
1213
1214
1215
1216 if (clear_bit(BH_Has_aged, &bh->b_state))
1217 return;
1218
1219 do {
1220 touched |= clear_bit(BH_Touched, &tmp->b_state);
1221 tmp = tmp->b_this_page;
1222 set_bit(BH_Has_aged, &tmp->b_state);
1223 } while (tmp != bh);
1224 clear_bit(BH_Has_aged, &bh->b_state);
1225
1226 if (touched)
1227 touch_page((unsigned long) bh->b_data);
1228 else
1229 age_page((unsigned long) bh->b_data);
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 static int maybe_shrink_lav_buffers(int size)
1245 {
1246 int nlist;
1247 int isize;
1248 int total_lav, total_n_buffers, n_sizes;
1249
1250
1251
1252
1253
1254
1255 total_lav = total_n_buffers = n_sizes = 0;
1256 for(nlist = 0; nlist < NR_SIZES; nlist++)
1257 {
1258 total_lav += buffers_lav[nlist];
1259 if(nr_buffers_size[nlist]) n_sizes++;
1260 total_n_buffers += nr_buffers_size[nlist];
1261 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1262 }
1263
1264
1265
1266
1267 isize = (size ? BUFSIZE_INDEX(size) : -1);
1268
1269 if (n_sizes > 1)
1270 for(nlist = 0; nlist < NR_SIZES; nlist++)
1271 {
1272 if(nlist == isize) continue;
1273 if(nr_buffers_size[nlist] &&
1274 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1275 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1276 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1277 return 1;
1278 }
1279 return 0;
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 int shrink_buffers(unsigned int priority, unsigned long limit)
1294 {
1295 if (priority < 2) {
1296 sync_buffers(0,0);
1297 }
1298
1299 if(priority == 2) wakeup_bdflush(1);
1300
1301 if(maybe_shrink_lav_buffers(0)) return 1;
1302
1303
1304 return shrink_specific_buffers(priority, 0);
1305 }
1306
1307 static int shrink_specific_buffers(unsigned int priority, int size)
1308 {
1309 struct buffer_head *bh;
1310 int nlist;
1311 int i, isize, isize1;
1312
1313 #ifdef DEBUG
1314 if(size) printk("Shrinking buffers of size %d\n", size);
1315 #endif
1316
1317
1318 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1319
1320 for(isize = 0; isize<NR_SIZES; isize++){
1321 if(isize1 != -1 && isize1 != isize) continue;
1322 bh = free_list[isize];
1323 if(!bh) continue;
1324 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1325 if (bh->b_count || buffer_protected(bh) ||
1326 !bh->b_this_page)
1327 continue;
1328 if (!age_of((unsigned long) bh->b_data) &&
1329 try_to_free(bh, &bh, 6))
1330 return 1;
1331 if(!bh) break;
1332
1333
1334 }
1335 }
1336
1337
1338
1339 for(nlist = 0; nlist < NR_LIST; nlist++) {
1340 repeat1:
1341 if(priority > 2 && nlist == BUF_SHARED) continue;
1342 i = nr_buffers_type[nlist];
1343 i = ((BUFFEROUT_WEIGHT * i) >> 10) >> priority;
1344 for ( ; i > 0; i-- ) {
1345 bh = next_to_age[nlist];
1346 if (!bh)
1347 break;
1348 next_to_age[nlist] = bh->b_next_free;
1349
1350
1351 age_buffer(bh);
1352
1353
1354 if(bh->b_list != nlist) goto repeat1;
1355 if (bh->b_count || buffer_protected(bh) ||
1356 !bh->b_this_page)
1357 continue;
1358 if(size && bh->b_size != size) continue;
1359 if (buffer_locked(bh))
1360 if (priority)
1361 continue;
1362 else
1363 wait_on_buffer(bh);
1364 if (buffer_dirty(bh)) {
1365 bh->b_count++;
1366 bh->b_flushtime = 0;
1367 ll_rw_block(WRITEA, 1, &bh);
1368 bh->b_count--;
1369 continue;
1370 }
1371
1372
1373
1374 if ((age_of((unsigned long) bh->b_data) >>
1375 (6-priority)) > 0)
1376 continue;
1377 if (try_to_free(bh, &bh, 0))
1378 return 1;
1379 if(!bh) break;
1380 }
1381 }
1382 return 0;
1383 }
1384
1385
1386
1387
1388 void show_buffers(void)
1389 {
1390 struct buffer_head * bh;
1391 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1392 int protected = 0;
1393 int shared;
1394 int nlist, isize;
1395
1396 printk("Buffer memory: %6dkB\n",buffermem>>10);
1397 printk("Buffer heads: %6d\n",nr_buffer_heads);
1398 printk("Buffer blocks: %6d\n",nr_buffers);
1399
1400 for(nlist = 0; nlist < NR_LIST; nlist++) {
1401 shared = found = locked = dirty = used = lastused = protected = 0;
1402 bh = lru_list[nlist];
1403 if(!bh) continue;
1404 do {
1405 found++;
1406 if (buffer_locked(bh))
1407 locked++;
1408 if (buffer_protected(bh))
1409 protected++;
1410 if (buffer_dirty(bh))
1411 dirty++;
1412 if(mem_map[MAP_NR(((unsigned long) bh->b_data))].count !=1) shared++;
1413 if (bh->b_count)
1414 used++, lastused = found;
1415 bh = bh->b_next_free;
1416 } while (bh != lru_list[nlist]);
1417 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, "
1418 "%d protected, %d dirty %d shrd\n",
1419 nlist, found, used, lastused, locked, protected, dirty, shared);
1420 };
1421 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared \n");
1422 for(isize = 0; isize<NR_SIZES; isize++){
1423 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1424 buffers_lav[isize], nr_free[isize]);
1425 for(nlist = 0; nlist < NR_LIST; nlist++)
1426 printk("%7d ", nr_buffers_st[isize][nlist]);
1427 printk("\n");
1428 }
1429 }
1430
1431
1432
1433
1434
1435
1436
1437
1438 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1439 kdev_t dev, unsigned int starting_block)
1440 {
1441 unsigned long page;
1442 struct buffer_head * tmp, * p;
1443
1444 *bhp = bh;
1445 page = (unsigned long) bh->b_data;
1446 page &= PAGE_MASK;
1447 if(mem_map[MAP_NR(page)].count != 1) return 0;
1448 tmp = bh;
1449 do {
1450 if (!tmp)
1451 return 0;
1452
1453 if (tmp->b_count || buffer_protected(tmp) ||
1454 buffer_dirty(tmp) || buffer_locked(tmp))
1455 return 0;
1456 tmp = tmp->b_this_page;
1457 } while (tmp != bh);
1458 tmp = bh;
1459
1460 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1461 tmp = tmp->b_this_page;
1462
1463
1464 bh = tmp;
1465 do {
1466 p = tmp;
1467 tmp = tmp->b_this_page;
1468 remove_from_queues(p);
1469 p->b_dev = dev;
1470 mark_buffer_uptodate(p, 0);
1471 clear_bit(BH_Req, &p->b_state);
1472 p->b_blocknr = starting_block++;
1473 insert_into_queues(p);
1474 } while (tmp != bh);
1475 return 1;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 static int reassign_cluster(kdev_t dev,
1493 unsigned int starting_block, int size)
1494 {
1495 struct buffer_head *bh;
1496 int isize = BUFSIZE_INDEX(size);
1497 int i;
1498
1499
1500
1501
1502
1503 while(nr_free[isize] < 32) refill_freelist(size);
1504
1505 bh = free_list[isize];
1506 if(bh)
1507 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1508 if (!bh->b_this_page) continue;
1509 if (try_to_reassign(bh, &bh, dev, starting_block))
1510 return 4;
1511 }
1512 return 0;
1513 }
1514
1515
1516
1517
1518
1519 static unsigned long try_to_generate_cluster(kdev_t dev, int block, int size)
1520 {
1521 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1522 int isize = BUFSIZE_INDEX(size);
1523 unsigned long offset;
1524 unsigned long page;
1525 int nblock;
1526
1527 page = get_free_page(GFP_NOBUFFER);
1528 if(!page) return 0;
1529
1530 bh = create_buffers(page, size);
1531 if (!bh) {
1532 free_page(page);
1533 return 0;
1534 };
1535 nblock = block;
1536 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1537 if (find_buffer(dev, nblock++, size))
1538 goto not_aligned;
1539 }
1540 tmp = bh;
1541 nblock = 0;
1542 while (1) {
1543 arr[nblock++] = bh;
1544 bh->b_count = 1;
1545 bh->b_flushtime = 0;
1546 bh->b_state = 0;
1547 bh->b_dev = dev;
1548 bh->b_list = BUF_CLEAN;
1549 bh->b_blocknr = block++;
1550 nr_buffers++;
1551 nr_buffers_size[isize]++;
1552 insert_into_queues(bh);
1553 if (bh->b_this_page)
1554 bh = bh->b_this_page;
1555 else
1556 break;
1557 }
1558 buffermem += PAGE_SIZE;
1559 buffer_pages[MAP_NR(page)] = bh;
1560 bh->b_this_page = tmp;
1561 while (nblock-- > 0)
1562 brelse(arr[nblock]);
1563 return 4;
1564 not_aligned:
1565 while ((tmp = bh) != NULL) {
1566 bh = bh->b_this_page;
1567 put_unused_buffer_head(tmp);
1568 }
1569 free_page(page);
1570 return 0;
1571 }
1572
1573 unsigned long generate_cluster(kdev_t dev, int b[], int size)
1574 {
1575 int i, offset;
1576
1577 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1578 if(i && b[i]-1 != b[i-1]) return 0;
1579 if(find_buffer(dev, b[i], size)) return 0;
1580 };
1581
1582
1583
1584
1585
1586 if(maybe_shrink_lav_buffers(size))
1587 {
1588 int retval;
1589 retval = try_to_generate_cluster(dev, b[0], size);
1590 if(retval) return retval;
1591 };
1592
1593 if (nr_free_pages > min_free_pages*2)
1594 return try_to_generate_cluster(dev, b[0], size);
1595 else
1596 return reassign_cluster(dev, b[0], size);
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 void buffer_init(void)
1610 {
1611 int i;
1612 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1613 long memsize = MAP_NR(high_memory) << PAGE_SHIFT;
1614
1615 if (memsize >= 4*1024*1024) {
1616 if(memsize >= 16*1024*1024)
1617 nr_hash = 16381;
1618 else
1619 nr_hash = 4093;
1620 } else {
1621 nr_hash = 997;
1622 };
1623
1624 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1625 sizeof(struct buffer_head *));
1626
1627
1628 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1629 sizeof(struct buffer_head *));
1630 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1631 buffer_pages[i] = NULL;
1632
1633 for (i = 0 ; i < nr_hash ; i++)
1634 hash_table[i] = NULL;
1635 lru_list[BUF_CLEAN] = 0;
1636 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1637 if (!free_list[isize])
1638 panic("VFS: Unable to initialize buffer free list!");
1639 return;
1640 }
1641
1642
1643
1644
1645
1646
1647
1648
1649 struct wait_queue * bdflush_wait = NULL;
1650 struct wait_queue * bdflush_done = NULL;
1651
1652 static void wakeup_bdflush(int wait)
1653 {
1654 wake_up(&bdflush_wait);
1655 if(wait) sleep_on(&bdflush_done);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 asmlinkage int sync_old_buffers(void)
1668 {
1669 int i, isize;
1670 int ndirty, nwritten;
1671 int nlist;
1672 int ncount;
1673 struct buffer_head * bh, *next;
1674
1675 sync_supers(0);
1676 sync_inodes(0);
1677
1678 ncount = 0;
1679 #ifdef DEBUG
1680 for(nlist = 0; nlist < NR_LIST; nlist++)
1681 #else
1682 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1683 #endif
1684 {
1685 ndirty = 0;
1686 nwritten = 0;
1687 repeat:
1688 bh = lru_list[nlist];
1689 if(bh)
1690 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1691
1692 if(bh->b_list != nlist) goto repeat;
1693 next = bh->b_next_free;
1694 if(!lru_list[nlist]) {
1695 printk("Dirty list empty %d\n", i);
1696 break;
1697 }
1698
1699
1700 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1701 {
1702 refile_buffer(bh);
1703 continue;
1704 }
1705
1706 if (buffer_locked(bh) || !buffer_dirty(bh))
1707 continue;
1708 ndirty++;
1709 if(bh->b_flushtime > jiffies) continue;
1710 nwritten++;
1711 bh->b_count++;
1712 bh->b_flushtime = 0;
1713 #ifdef DEBUG
1714 if(nlist != BUF_DIRTY) ncount++;
1715 #endif
1716 ll_rw_block(WRITE, 1, &bh);
1717 bh->b_count--;
1718 }
1719 }
1720 #ifdef DEBUG
1721 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1722 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1723 #endif
1724
1725
1726
1727
1728 for(isize = 0; isize<NR_SIZES; isize++){
1729 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1730 buffer_usage[isize] = 0;
1731 };
1732 return 0;
1733 }
1734
1735
1736
1737
1738
1739
1740
1741 asmlinkage int sys_bdflush(int func, long data)
1742 {
1743 int i, error;
1744
1745 if (!suser())
1746 return -EPERM;
1747
1748 if (func == 1)
1749 return sync_old_buffers();
1750
1751
1752 if (func >= 2) {
1753 i = (func-2) >> 1;
1754 if (i < 0 || i >= N_PARAM)
1755 return -EINVAL;
1756 if((func & 1) == 0) {
1757 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1758 if (error)
1759 return error;
1760 put_user(bdf_prm.data[i], (int*)data);
1761 return 0;
1762 };
1763 if (data < bdflush_min[i] || data > bdflush_max[i])
1764 return -EINVAL;
1765 bdf_prm.data[i] = data;
1766 return 0;
1767 };
1768
1769
1770
1771
1772
1773 return 0;
1774 }
1775
1776
1777
1778
1779
1780 int bdflush(void * unused) {
1781
1782 int i;
1783 int ndirty;
1784 int nlist;
1785 int ncount;
1786 struct buffer_head * bh, *next;
1787
1788
1789
1790
1791
1792 sprintf(current->comm, "kernel bdflush");
1793
1794 for (;;) {
1795 #ifdef DEBUG
1796 printk("bdflush() activated...");
1797 #endif
1798
1799 ncount = 0;
1800 #ifdef DEBUG
1801 for(nlist = 0; nlist < NR_LIST; nlist++)
1802 #else
1803 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1804 #endif
1805 {
1806 ndirty = 0;
1807 repeat:
1808 bh = lru_list[nlist];
1809 if(bh)
1810 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1811 bh = next) {
1812
1813 if(bh->b_list != nlist) goto repeat;
1814 next = bh->b_next_free;
1815 if(!lru_list[nlist]) {
1816 printk("Dirty list empty %d\n", i);
1817 break;
1818 }
1819
1820
1821 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1822 {
1823 refile_buffer(bh);
1824 continue;
1825 }
1826
1827 if (buffer_locked(bh) || !buffer_dirty(bh))
1828 continue;
1829
1830
1831 bh->b_count++;
1832 ndirty++;
1833 bh->b_flushtime = 0;
1834 ll_rw_block(WRITE, 1, &bh);
1835 #ifdef DEBUG
1836 if(nlist != BUF_DIRTY) ncount++;
1837 #endif
1838 bh->b_count--;
1839 }
1840 }
1841 #ifdef DEBUG
1842 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1843 printk("sleeping again.\n");
1844 #endif
1845 wake_up(&bdflush_done);
1846
1847
1848
1849
1850 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1851 bdf_prm.b_un.nfract/100) {
1852 current->signal = 0;
1853 interruptible_sleep_on(&bdflush_wait);
1854 }
1855 }
1856 }
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874