This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- __brelse
- __bforget
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- bread_page
- bwrite_page
- grow_buffers
- try_to_free
- age_buffer
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
- bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26 #include <linux/errno.h>
27 #include <linux/malloc.h>
28 #include <linux/swapctl.h>
29
30 #include <asm/system.h>
31 #include <asm/segment.h>
32 #include <asm/io.h>
33
34 #define NR_SIZES 4
35 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
36 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
37
38 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
39 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
40
41 static int grow_buffers(int pri, int size);
42 static int shrink_specific_buffers(unsigned int priority, int size);
43 static int maybe_shrink_lav_buffers(int);
44
45 static int nr_hash = 0;
46 static struct buffer_head ** hash_table;
47 struct buffer_head ** buffer_pages;
48 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
49
50
51
52 static struct buffer_head * next_to_age[NR_LIST] = {NULL, };
53 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
54 static struct buffer_head * unused_list = NULL;
55 static struct wait_queue * buffer_wait = NULL;
56
57 int nr_buffers = 0;
58 int nr_buffers_type[NR_LIST] = {0,};
59 int nr_buffers_size[NR_SIZES] = {0,};
60 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
61 int buffer_usage[NR_SIZES] = {0,};
62 int buffers_lav[NR_SIZES] = {0,};
63 int nr_free[NR_SIZES] = {0,};
64 int buffermem = 0;
65 int nr_buffer_heads = 0;
66 extern int *blksize_size[];
67
68
69 static void wakeup_bdflush(int);
70
71 #define N_PARAM 9
72 #define LAV
73
74 static union bdflush_param{
75 struct {
76 int nfract;
77
78 int ndirty;
79
80 int nrefill;
81
82 int nref_dirt;
83
84 int clu_nfract;
85
86 int age_buffer;
87
88 int age_super;
89
90 int lav_const;
91
92 int lav_ratio;
93
94
95 } b_un;
96 unsigned int data[N_PARAM];
97 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
98
99
100
101
102
103
104
105 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
106 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
107
108
109
110
111
112
113
114
115
116
117 void __wait_on_buffer(struct buffer_head * bh)
118 {
119 struct wait_queue wait = { current, NULL };
120
121 bh->b_count++;
122 add_wait_queue(&bh->b_wait, &wait);
123 repeat:
124 current->state = TASK_UNINTERRUPTIBLE;
125 if (buffer_locked(bh)) {
126 schedule();
127 goto repeat;
128 }
129 remove_wait_queue(&bh->b_wait, &wait);
130 bh->b_count--;
131 current->state = TASK_RUNNING;
132 }
133
134
135
136
137
138
139
140
141
142
143
144 static int sync_buffers(kdev_t dev, int wait)
145 {
146 int i, retry, pass = 0, err = 0;
147 int nlist, ncount;
148 struct buffer_head * bh, *next;
149
150
151
152
153
154 repeat:
155 retry = 0;
156 repeat2:
157 ncount = 0;
158
159
160 for(nlist = 0; nlist < NR_LIST; nlist++)
161 {
162 repeat1:
163 bh = lru_list[nlist];
164 if(!bh) continue;
165 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
166 if(bh->b_list != nlist) goto repeat1;
167 next = bh->b_next_free;
168 if(!lru_list[nlist]) break;
169 if (dev && bh->b_dev != dev)
170 continue;
171 if (buffer_locked(bh))
172 {
173
174
175 if (!wait || !pass) {
176 retry = 1;
177 continue;
178 }
179 wait_on_buffer (bh);
180 goto repeat2;
181 }
182
183
184 if (wait && buffer_req(bh) && !buffer_locked(bh) &&
185 !buffer_dirty(bh) && !buffer_uptodate(bh)) {
186 err = 1;
187 continue;
188 }
189
190
191 if (!buffer_dirty(bh) || pass>=2)
192 continue;
193
194 if (buffer_locked(bh))
195 continue;
196 bh->b_count++;
197 bh->b_flushtime = 0;
198 ll_rw_block(WRITE, 1, &bh);
199
200 if(nlist != BUF_DIRTY) {
201 printk("[%d %s %ld] ", nlist,
202 kdevname(bh->b_dev), bh->b_blocknr);
203 ncount++;
204 };
205 bh->b_count--;
206 retry = 1;
207 }
208 }
209 if (ncount)
210 printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
211
212
213
214
215
216 if (wait && retry && ++pass<=2)
217 goto repeat;
218 return err;
219 }
220
221 void sync_dev(kdev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 sync_buffers(dev, 0);
227 sync_dquots(dev, -1);
228 }
229
230 int fsync_dev(kdev_t dev)
231 {
232 sync_buffers(dev, 0);
233 sync_supers(dev);
234 sync_inodes(dev);
235 sync_dquots(dev, -1);
236 return sync_buffers(dev, 1);
237 }
238
239 asmlinkage int sys_sync(void)
240 {
241 fsync_dev(0);
242 return 0;
243 }
244
245 int file_fsync (struct inode *inode, struct file *filp)
246 {
247 return fsync_dev(inode->i_dev);
248 }
249
250 asmlinkage int sys_fsync(unsigned int fd)
251 {
252 struct file * file;
253 struct inode * inode;
254
255 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
256 return -EBADF;
257 if (!file->f_op || !file->f_op->fsync)
258 return -EINVAL;
259 if (file->f_op->fsync(inode,file))
260 return -EIO;
261 return 0;
262 }
263
264 void invalidate_buffers(kdev_t dev)
265 {
266 int i;
267 int nlist;
268 struct buffer_head * bh;
269
270 for(nlist = 0; nlist < NR_LIST; nlist++) {
271 bh = lru_list[nlist];
272 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
273 if (bh->b_dev != dev)
274 continue;
275 wait_on_buffer(bh);
276 if (bh->b_dev != dev)
277 continue;
278 if (bh->b_count)
279 continue;
280 bh->b_flushtime = 0;
281 clear_bit(BH_Uptodate, &bh->b_state);
282 clear_bit(BH_Dirty, &bh->b_state);
283 clear_bit(BH_Req, &bh->b_state);
284 }
285 }
286 }
287
288 #define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))%nr_hash)
289 #define hash(dev,block) hash_table[_hashfn(dev,block)]
290
291 static inline void remove_from_hash_queue(struct buffer_head * bh)
292 {
293 if (bh->b_next)
294 bh->b_next->b_prev = bh->b_prev;
295 if (bh->b_prev)
296 bh->b_prev->b_next = bh->b_next;
297 if (hash(bh->b_dev,bh->b_blocknr) == bh)
298 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
299 bh->b_next = bh->b_prev = NULL;
300 }
301
302 static inline void remove_from_lru_list(struct buffer_head * bh)
303 {
304 if (!(bh->b_prev_free) || !(bh->b_next_free))
305 panic("VFS: LRU block list corrupted");
306 if (bh->b_dev == B_FREE)
307 panic("LRU list corrupted");
308 bh->b_prev_free->b_next_free = bh->b_next_free;
309 bh->b_next_free->b_prev_free = bh->b_prev_free;
310
311 if (lru_list[bh->b_list] == bh)
312 lru_list[bh->b_list] = bh->b_next_free;
313 if (lru_list[bh->b_list] == bh)
314 lru_list[bh->b_list] = NULL;
315 if (next_to_age[bh->b_list] == bh)
316 next_to_age[bh->b_list] = bh->b_next_free;
317 if (next_to_age[bh->b_list] == bh)
318 next_to_age[bh->b_list] = NULL;
319
320 bh->b_next_free = bh->b_prev_free = NULL;
321 }
322
323 static inline void remove_from_free_list(struct buffer_head * bh)
324 {
325 int isize = BUFSIZE_INDEX(bh->b_size);
326 if (!(bh->b_prev_free) || !(bh->b_next_free))
327 panic("VFS: Free block list corrupted");
328 if(bh->b_dev != B_FREE)
329 panic("Free list corrupted");
330 if(!free_list[isize])
331 panic("Free list empty");
332 nr_free[isize]--;
333 if(bh->b_next_free == bh)
334 free_list[isize] = NULL;
335 else {
336 bh->b_prev_free->b_next_free = bh->b_next_free;
337 bh->b_next_free->b_prev_free = bh->b_prev_free;
338 if (free_list[isize] == bh)
339 free_list[isize] = bh->b_next_free;
340 };
341 bh->b_next_free = bh->b_prev_free = NULL;
342 }
343
344 static inline void remove_from_queues(struct buffer_head * bh)
345 {
346 if(bh->b_dev == B_FREE) {
347 remove_from_free_list(bh);
348
349 return;
350 };
351 nr_buffers_type[bh->b_list]--;
352 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
353 remove_from_hash_queue(bh);
354 remove_from_lru_list(bh);
355 }
356
357 static inline void put_last_lru(struct buffer_head * bh)
358 {
359 if (!bh)
360 return;
361 if (bh == lru_list[bh->b_list]) {
362 lru_list[bh->b_list] = bh->b_next_free;
363 if (next_to_age[bh->b_list] == bh)
364 next_to_age[bh->b_list] = bh->b_next_free;
365 return;
366 }
367 if(bh->b_dev == B_FREE)
368 panic("Wrong block for lru list");
369 remove_from_lru_list(bh);
370
371
372 if(!lru_list[bh->b_list]) {
373 lru_list[bh->b_list] = bh;
374 lru_list[bh->b_list]->b_prev_free = bh;
375 };
376 if (!next_to_age[bh->b_list])
377 next_to_age[bh->b_list] = bh;
378
379 bh->b_next_free = lru_list[bh->b_list];
380 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
381 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
382 lru_list[bh->b_list]->b_prev_free = bh;
383 }
384
385 static inline void put_last_free(struct buffer_head * bh)
386 {
387 int isize;
388 if (!bh)
389 return;
390
391 isize = BUFSIZE_INDEX(bh->b_size);
392 bh->b_dev = B_FREE;
393
394 if(!free_list[isize]) {
395 free_list[isize] = bh;
396 bh->b_prev_free = bh;
397 };
398
399 nr_free[isize]++;
400 bh->b_next_free = free_list[isize];
401 bh->b_prev_free = free_list[isize]->b_prev_free;
402 free_list[isize]->b_prev_free->b_next_free = bh;
403 free_list[isize]->b_prev_free = bh;
404 }
405
406 static inline void insert_into_queues(struct buffer_head * bh)
407 {
408
409 if(bh->b_dev == B_FREE) {
410 put_last_free(bh);
411 return;
412 }
413 if(!lru_list[bh->b_list]) {
414 lru_list[bh->b_list] = bh;
415 bh->b_prev_free = bh;
416 }
417 if (!next_to_age[bh->b_list])
418 next_to_age[bh->b_list] = bh;
419 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
420 bh->b_next_free = lru_list[bh->b_list];
421 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
422 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
423 lru_list[bh->b_list]->b_prev_free = bh;
424 nr_buffers_type[bh->b_list]++;
425 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
426
427 bh->b_prev = NULL;
428 bh->b_next = NULL;
429 if (!(bh->b_dev))
430 return;
431 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
432 hash(bh->b_dev,bh->b_blocknr) = bh;
433 if (bh->b_next)
434 bh->b_next->b_prev = bh;
435 }
436
437 static inline struct buffer_head * find_buffer(kdev_t dev, int block, int size)
438 {
439 struct buffer_head * tmp;
440
441 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
442 if (tmp->b_dev == dev && tmp->b_blocknr == block)
443 if (tmp->b_size == size)
444 return tmp;
445 else {
446 printk("VFS: Wrong blocksize on device %s\n",
447 kdevname(dev));
448 return NULL;
449 }
450 return NULL;
451 }
452
453
454
455
456
457
458
459
460 struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
461 {
462 struct buffer_head * bh;
463
464 for (;;) {
465 if (!(bh=find_buffer(dev,block,size)))
466 return NULL;
467 bh->b_count++;
468 wait_on_buffer(bh);
469 if (bh->b_dev == dev && bh->b_blocknr == block
470 && bh->b_size == size)
471 return bh;
472 bh->b_count--;
473 }
474 }
475
476 void set_blocksize(kdev_t dev, int size)
477 {
478 int i, nlist;
479 struct buffer_head * bh, *bhnext;
480
481 if (!blksize_size[MAJOR(dev)])
482 return;
483
484 switch(size) {
485 default: panic("Invalid blocksize passed to set_blocksize");
486 case 512: case 1024: case 2048: case 4096:;
487 }
488
489 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
490 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
491 return;
492 }
493 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
494 return;
495 sync_buffers(dev, 2);
496 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
497
498
499
500
501 for(nlist = 0; nlist < NR_LIST; nlist++) {
502 bh = lru_list[nlist];
503 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
504 if(!bh) break;
505 bhnext = bh->b_next_free;
506 if (bh->b_dev != dev)
507 continue;
508 if (bh->b_size == size)
509 continue;
510
511 wait_on_buffer(bh);
512 if (bh->b_dev == dev && bh->b_size != size) {
513 clear_bit(BH_Dirty, &bh->b_state);
514 clear_bit(BH_Uptodate, &bh->b_state);
515 clear_bit(BH_Req, &bh->b_state);
516 bh->b_flushtime = 0;
517 }
518 remove_from_hash_queue(bh);
519 }
520 }
521 }
522
523 #define BADNESS(bh) (buffer_dirty(bh) || buffer_locked(bh))
524
525 void refill_freelist(int size)
526 {
527 struct buffer_head * bh, * tmp;
528 struct buffer_head * candidate[NR_LIST];
529 unsigned int best_time, winner;
530 int isize = BUFSIZE_INDEX(size);
531 int buffers[NR_LIST];
532 int i;
533 int needed;
534
535
536
537
538
539 if (nr_free[isize] > 100)
540 return;
541
542
543
544
545
546
547 needed =bdf_prm.b_un.nrefill * size;
548
549 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
550 grow_buffers(GFP_BUFFER, size)) {
551 needed -= PAGE_SIZE;
552 }
553
554 if(needed <= 0) return;
555
556
557
558
559 while(maybe_shrink_lav_buffers(size))
560 {
561 if(!grow_buffers(GFP_BUFFER, size)) break;
562 needed -= PAGE_SIZE;
563 if(needed <= 0) return;
564 };
565
566
567
568
569
570
571
572 repeat0:
573 for(i=0; i<NR_LIST; i++){
574 if(i == BUF_DIRTY || i == BUF_SHARED ||
575 nr_buffers_type[i] == 0) {
576 candidate[i] = NULL;
577 buffers[i] = 0;
578 continue;
579 }
580 buffers[i] = nr_buffers_type[i];
581 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
582 {
583 if(buffers[i] < 0) panic("Here is the problem");
584 tmp = bh->b_next_free;
585 if (!bh) break;
586
587 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
588 buffer_dirty(bh)) {
589 refile_buffer(bh);
590 continue;
591 }
592
593 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
594 continue;
595
596
597
598
599
600 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
601 buffers[i] = 0;
602 break;
603 }
604
605 if (BADNESS(bh)) continue;
606 break;
607 };
608 if(!buffers[i]) candidate[i] = NULL;
609 else candidate[i] = bh;
610 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
611 }
612
613 repeat:
614 if(needed <= 0) return;
615
616
617
618 winner = best_time = UINT_MAX;
619 for(i=0; i<NR_LIST; i++){
620 if(!candidate[i]) continue;
621 if(candidate[i]->b_lru_time < best_time){
622 best_time = candidate[i]->b_lru_time;
623 winner = i;
624 }
625 }
626
627
628 if(winner != UINT_MAX) {
629 i = winner;
630 bh = candidate[i];
631 candidate[i] = bh->b_next_free;
632 if(candidate[i] == bh) candidate[i] = NULL;
633 if (bh->b_count || bh->b_size != size)
634 panic("Busy buffer in candidate list\n");
635 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1)
636 panic("Shared buffer in candidate list\n");
637 if (buffer_protected(bh))
638 panic("Protected buffer in candidate list\n");
639 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
640
641 if(bh->b_dev == B_FREE)
642 panic("Wrong list");
643 remove_from_queues(bh);
644 bh->b_dev = B_FREE;
645 put_last_free(bh);
646 needed -= bh->b_size;
647 buffers[i]--;
648 if(buffers[i] < 0) panic("Here is the problem");
649
650 if(buffers[i] == 0) candidate[i] = NULL;
651
652
653
654 if(candidate[i] && buffers[i] > 0){
655 if(buffers[i] <= 0) panic("Here is another problem");
656 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
657 if(buffers[i] < 0) panic("Here is the problem");
658 tmp = bh->b_next_free;
659 if (!bh) break;
660
661 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
662 buffer_dirty(bh)) {
663 refile_buffer(bh);
664 continue;
665 };
666
667 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
668 continue;
669
670
671
672
673
674 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
675 buffers[i] = 0;
676 break;
677 }
678
679 if (BADNESS(bh)) continue;
680 break;
681 };
682 if(!buffers[i]) candidate[i] = NULL;
683 else candidate[i] = bh;
684 if(candidate[i] && candidate[i]->b_count)
685 panic("Here is the problem");
686 }
687
688 goto repeat;
689 }
690
691 if(needed <= 0) return;
692
693
694
695 if (nr_free_pages > min_free_pages + 5) {
696 if (grow_buffers(GFP_BUFFER, size)) {
697 needed -= PAGE_SIZE;
698 goto repeat0;
699 };
700 }
701
702
703 if (!grow_buffers(GFP_ATOMIC, size))
704 wakeup_bdflush(1);
705 needed -= PAGE_SIZE;
706 goto repeat0;
707 }
708
709
710
711
712
713
714
715
716
717
718
719 struct buffer_head * getblk(kdev_t dev, int block, int size)
720 {
721 struct buffer_head * bh;
722 int isize = BUFSIZE_INDEX(size);
723
724
725 buffer_usage[isize]++;
726
727
728
729
730 repeat:
731 bh = get_hash_table(dev, block, size);
732 if (bh) {
733 if (!buffer_dirty(bh)) {
734 if (buffer_uptodate(bh))
735 put_last_lru(bh);
736 bh->b_flushtime = 0;
737 }
738 set_bit(BH_Touched, &bh->b_state);
739 return bh;
740 }
741
742 while(!free_list[isize]) refill_freelist(size);
743
744 if (find_buffer(dev,block,size))
745 goto repeat;
746
747 bh = free_list[isize];
748 remove_from_free_list(bh);
749
750
751
752 bh->b_count=1;
753 bh->b_flushtime=0;
754 bh->b_state=(1<<BH_Touched);
755 bh->b_dev=dev;
756 bh->b_blocknr=block;
757 insert_into_queues(bh);
758 return bh;
759 }
760
761 void set_writetime(struct buffer_head * buf, int flag)
762 {
763 int newtime;
764
765 if (buffer_dirty(buf)) {
766
767 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
768 bdf_prm.b_un.age_buffer);
769 if(!buf->b_flushtime || buf->b_flushtime > newtime)
770 buf->b_flushtime = newtime;
771 } else {
772 buf->b_flushtime = 0;
773 }
774 }
775
776
777 void refile_buffer(struct buffer_head * buf)
778 {
779 int dispose;
780
781 if(buf->b_dev == B_FREE) {
782 printk("Attempt to refile free buffer\n");
783 return;
784 }
785 if (buffer_dirty(buf))
786 dispose = BUF_DIRTY;
787 else if ((mem_map[MAP_NR((unsigned long) buf->b_data)].count > 1) || buffer_protected(buf))
788 dispose = BUF_SHARED;
789 else if (buffer_locked(buf))
790 dispose = BUF_LOCKED;
791 else if (buf->b_list == BUF_SHARED)
792 dispose = BUF_UNSHARED;
793 else
794 dispose = BUF_CLEAN;
795 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
796 if(dispose != buf->b_list) {
797 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
798 buf->b_lru_time = jiffies;
799 if(dispose == BUF_LOCKED &&
800 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
801 dispose = BUF_LOCKED1;
802 remove_from_queues(buf);
803 buf->b_list = dispose;
804 insert_into_queues(buf);
805 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
806 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
807 bdf_prm.b_un.nfract/100)
808 wakeup_bdflush(0);
809 }
810 }
811
812
813
814
815 void __brelse(struct buffer_head * buf)
816 {
817 wait_on_buffer(buf);
818
819
820 set_writetime(buf, 0);
821 refile_buffer(buf);
822
823 if (buf->b_count) {
824 if (!--buf->b_count)
825 wake_up(&buffer_wait);
826 return;
827 }
828 printk("VFS: brelse: Trying to free free buffer\n");
829 }
830
831
832
833
834
835
836 void __bforget(struct buffer_head * buf)
837 {
838 wait_on_buffer(buf);
839 mark_buffer_clean(buf);
840 buf->b_count--;
841 remove_from_hash_queue(buf);
842 buf->b_dev = NODEV;
843 refile_buffer(buf);
844 wake_up(&buffer_wait);
845 }
846
847
848
849
850
851 struct buffer_head * bread(kdev_t dev, int block, int size)
852 {
853 struct buffer_head * bh;
854
855 if (!(bh = getblk(dev, block, size))) {
856 printk("VFS: bread: READ error on device %s\n",
857 kdevname(dev));
858 return NULL;
859 }
860 if (buffer_uptodate(bh))
861 return bh;
862 ll_rw_block(READ, 1, &bh);
863 wait_on_buffer(bh);
864 if (buffer_uptodate(bh))
865 return bh;
866 brelse(bh);
867 return NULL;
868 }
869
870
871
872
873
874
875
876 #define NBUF 16
877
878 struct buffer_head * breada(kdev_t dev, int block, int bufsize,
879 unsigned int pos, unsigned int filesize)
880 {
881 struct buffer_head * bhlist[NBUF];
882 unsigned int blocks;
883 struct buffer_head * bh;
884 int index;
885 int i, j;
886
887 if (pos >= filesize)
888 return NULL;
889
890 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
891 return NULL;
892
893 index = BUFSIZE_INDEX(bh->b_size);
894
895 if (buffer_uptodate(bh))
896 return bh;
897
898 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
899
900 if (blocks > (read_ahead[MAJOR(dev)] >> index))
901 blocks = read_ahead[MAJOR(dev)] >> index;
902 if (blocks > NBUF)
903 blocks = NBUF;
904
905 bhlist[0] = bh;
906 j = 1;
907 for(i=1; i<blocks; i++) {
908 bh = getblk(dev,block+i,bufsize);
909 if (buffer_uptodate(bh)) {
910 brelse(bh);
911 break;
912 }
913 bhlist[j++] = bh;
914 }
915
916
917 ll_rw_block(READ, j, bhlist);
918
919 for(i=1; i<j; i++)
920 brelse(bhlist[i]);
921
922
923 bh = bhlist[0];
924 wait_on_buffer(bh);
925 if (buffer_uptodate(bh))
926 return bh;
927 brelse(bh);
928 return NULL;
929 }
930
931
932
933
934 static void put_unused_buffer_head(struct buffer_head * bh)
935 {
936 struct wait_queue * wait;
937
938 wait = ((volatile struct buffer_head *) bh)->b_wait;
939 memset(bh,0,sizeof(*bh));
940 ((volatile struct buffer_head *) bh)->b_wait = wait;
941 bh->b_next_free = unused_list;
942 unused_list = bh;
943 }
944
945 static void get_more_buffer_heads(void)
946 {
947 int i;
948 struct buffer_head * bh;
949
950 if (unused_list)
951 return;
952
953 if (!(bh = (struct buffer_head*) get_free_page(GFP_KERNEL)))
954 return;
955
956 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
957 bh->b_next_free = unused_list;
958 unused_list = bh++;
959 }
960 }
961
962 static struct buffer_head * get_unused_buffer_head(void)
963 {
964 struct buffer_head * bh;
965
966 get_more_buffer_heads();
967 if (!unused_list)
968 return NULL;
969 bh = unused_list;
970 unused_list = bh->b_next_free;
971 bh->b_next_free = NULL;
972 bh->b_data = NULL;
973 bh->b_size = 0;
974 bh->b_state = 0;
975 return bh;
976 }
977
978
979
980
981
982
983
984 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
985 {
986 struct buffer_head *bh, *head;
987 unsigned long offset;
988
989 head = NULL;
990 offset = PAGE_SIZE;
991 while ((offset -= size) < PAGE_SIZE) {
992 bh = get_unused_buffer_head();
993 if (!bh)
994 goto no_grow;
995 bh->b_this_page = head;
996 head = bh;
997 bh->b_data = (char *) (page+offset);
998 bh->b_size = size;
999 bh->b_dev = B_FREE;
1000 }
1001 return head;
1002
1003
1004
1005 no_grow:
1006 bh = head;
1007 while (bh) {
1008 head = bh;
1009 bh = bh->b_this_page;
1010 put_unused_buffer_head(head);
1011 }
1012 return NULL;
1013 }
1014
1015 static void read_buffers(struct buffer_head * bh[], int nrbuf)
1016 {
1017 ll_rw_block(READ, nrbuf, bh);
1018 bh += nrbuf;
1019 do {
1020 nrbuf--;
1021 bh--;
1022 wait_on_buffer(*bh);
1023 } while (nrbuf > 0);
1024 }
1025
1026 int bread_page(unsigned long address, kdev_t dev, int b[], int size)
1027 {
1028 struct buffer_head *bh, *next, *arr[MAX_BUF_PER_PAGE];
1029 int block, nr;
1030
1031 bh = create_buffers(address, size);
1032 if (!bh)
1033 return -ENOMEM;
1034 nr = 0;
1035 next = bh;
1036 do {
1037 struct buffer_head * tmp;
1038 block = *(b++);
1039 if (!block) {
1040 memset(next->b_data, 0, size);
1041 continue;
1042 }
1043 tmp = get_hash_table(dev, block, size);
1044 if (tmp) {
1045 if (!buffer_uptodate(tmp)) {
1046 ll_rw_block(READ, 1, &tmp);
1047 wait_on_buffer(tmp);
1048 }
1049 memcpy(next->b_data, tmp->b_data, size);
1050 brelse(tmp);
1051 continue;
1052 }
1053 arr[nr++] = next;
1054 next->b_dev = dev;
1055 next->b_blocknr = block;
1056 next->b_count = 1;
1057 next->b_flushtime = 0;
1058 clear_bit(BH_Dirty, &next->b_state);
1059 clear_bit(BH_Uptodate, &next->b_state);
1060 clear_bit(BH_Req, &next->b_state);
1061 next->b_list = BUF_CLEAN;
1062 } while ((next = next->b_this_page) != NULL);
1063
1064 if (nr)
1065 read_buffers(arr,nr);
1066 ++current->maj_flt;
1067
1068 while ((next = bh) != NULL) {
1069 bh = bh->b_this_page;
1070 put_unused_buffer_head(next);
1071 }
1072 return 0;
1073 }
1074
1075 #if 0
1076
1077
1078
1079
1080 void bwrite_page(unsigned long address, kdev_t dev, int b[], int size)
1081 {
1082 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1083 int i, j;
1084
1085 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1086 bh[i] = NULL;
1087 if (b[i])
1088 bh[i] = getblk(dev, b[i], size);
1089 }
1090 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, address += size) {
1091 if (bh[i]) {
1092 memcpy(bh[i]->b_data, (void *) address, size);
1093 mark_buffer_uptodate(bh[i], 1);
1094 mark_buffer_dirty(bh[i], 0);
1095 brelse(bh[i]);
1096 } else
1097 memset((void *) address, 0, size);
1098 }
1099 }
1100 #endif
1101
1102
1103
1104
1105
1106 static int grow_buffers(int pri, int size)
1107 {
1108 unsigned long page;
1109 struct buffer_head *bh, *tmp;
1110 struct buffer_head * insert_point;
1111 int isize;
1112
1113 if ((size & 511) || (size > PAGE_SIZE)) {
1114 printk("VFS: grow_buffers: size = %d\n",size);
1115 return 0;
1116 }
1117
1118 isize = BUFSIZE_INDEX(size);
1119
1120 if (!(page = __get_free_page(pri)))
1121 return 0;
1122 bh = create_buffers(page, size);
1123 if (!bh) {
1124 free_page(page);
1125 return 0;
1126 }
1127
1128 insert_point = free_list[isize];
1129
1130 tmp = bh;
1131 while (1) {
1132 nr_free[isize]++;
1133 if (insert_point) {
1134 tmp->b_next_free = insert_point->b_next_free;
1135 tmp->b_prev_free = insert_point;
1136 insert_point->b_next_free->b_prev_free = tmp;
1137 insert_point->b_next_free = tmp;
1138 } else {
1139 tmp->b_prev_free = tmp;
1140 tmp->b_next_free = tmp;
1141 }
1142 insert_point = tmp;
1143 ++nr_buffers;
1144 if (tmp->b_this_page)
1145 tmp = tmp->b_this_page;
1146 else
1147 break;
1148 }
1149 free_list[isize] = bh;
1150 buffer_pages[MAP_NR(page)] = bh;
1151 tmp->b_this_page = bh;
1152 wake_up(&buffer_wait);
1153 buffermem += PAGE_SIZE;
1154 return 1;
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp,
1165 int priority)
1166 {
1167 unsigned long page;
1168 struct buffer_head * tmp, * p;
1169 int isize = BUFSIZE_INDEX(bh->b_size);
1170
1171 *bhp = bh;
1172 page = (unsigned long) bh->b_data;
1173 page &= PAGE_MASK;
1174 tmp = bh;
1175 do {
1176 if (!tmp)
1177 return 0;
1178 if (tmp->b_count || buffer_protected(tmp) ||
1179 buffer_dirty(tmp) || buffer_locked(tmp) || tmp->b_wait)
1180 return 0;
1181 if (priority && buffer_touched(tmp))
1182 return 0;
1183 tmp = tmp->b_this_page;
1184 } while (tmp != bh);
1185 tmp = bh;
1186 do {
1187 p = tmp;
1188 tmp = tmp->b_this_page;
1189 nr_buffers--;
1190 nr_buffers_size[isize]--;
1191 if (p == *bhp)
1192 {
1193 *bhp = p->b_prev_free;
1194 if (p == *bhp)
1195 *bhp = NULL;
1196 }
1197 remove_from_queues(p);
1198 put_unused_buffer_head(p);
1199 } while (tmp != bh);
1200 buffermem -= PAGE_SIZE;
1201 buffer_pages[MAP_NR(page)] = NULL;
1202 free_page(page);
1203 return !mem_map[MAP_NR(page)].count;
1204 }
1205
1206
1207
1208 static inline void age_buffer(struct buffer_head *bh)
1209 {
1210 struct buffer_head *tmp = bh;
1211 int touched = 0;
1212
1213
1214
1215
1216
1217
1218
1219
1220 if (clear_bit(BH_Has_aged, &bh->b_state))
1221 return;
1222
1223 do {
1224 touched |= clear_bit(BH_Touched, &tmp->b_state);
1225 tmp = tmp->b_this_page;
1226 set_bit(BH_Has_aged, &tmp->b_state);
1227 } while (tmp != bh);
1228 clear_bit(BH_Has_aged, &bh->b_state);
1229
1230 if (touched)
1231 touch_page((unsigned long) bh->b_data);
1232 else
1233 age_page((unsigned long) bh->b_data);
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 static int maybe_shrink_lav_buffers(int size)
1249 {
1250 int nlist;
1251 int isize;
1252 int total_lav, total_n_buffers, n_sizes;
1253
1254
1255
1256
1257
1258
1259 total_lav = total_n_buffers = n_sizes = 0;
1260 for(nlist = 0; nlist < NR_SIZES; nlist++)
1261 {
1262 total_lav += buffers_lav[nlist];
1263 if(nr_buffers_size[nlist]) n_sizes++;
1264 total_n_buffers += nr_buffers_size[nlist];
1265 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1266 }
1267
1268
1269
1270
1271 isize = (size ? BUFSIZE_INDEX(size) : -1);
1272
1273 if (n_sizes > 1)
1274 for(nlist = 0; nlist < NR_SIZES; nlist++)
1275 {
1276 if(nlist == isize) continue;
1277 if(nr_buffers_size[nlist] &&
1278 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1279 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1280 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1281 return 1;
1282 }
1283 return 0;
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 int shrink_buffers(unsigned int priority, unsigned long limit)
1298 {
1299 if (priority < 2) {
1300 sync_buffers(0,0);
1301 }
1302
1303 if(priority == 2) wakeup_bdflush(1);
1304
1305 if(maybe_shrink_lav_buffers(0)) return 1;
1306
1307
1308 return shrink_specific_buffers(priority, 0);
1309 }
1310
1311 static int shrink_specific_buffers(unsigned int priority, int size)
1312 {
1313 struct buffer_head *bh;
1314 int nlist;
1315 int i, isize, isize1;
1316
1317 #ifdef DEBUG
1318 if(size) printk("Shrinking buffers of size %d\n", size);
1319 #endif
1320
1321
1322 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1323
1324 for(isize = 0; isize<NR_SIZES; isize++){
1325 if(isize1 != -1 && isize1 != isize) continue;
1326 bh = free_list[isize];
1327 if(!bh) continue;
1328 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1329 if (bh->b_count || buffer_protected(bh) ||
1330 !bh->b_this_page)
1331 continue;
1332 if (!age_of((unsigned long) bh->b_data) &&
1333 try_to_free(bh, &bh, 6))
1334 return 1;
1335 if(!bh) break;
1336
1337
1338 }
1339 }
1340
1341
1342
1343 for(nlist = 0; nlist < NR_LIST; nlist++) {
1344 repeat1:
1345 if(priority > 2 && nlist == BUF_SHARED) continue;
1346 i = nr_buffers_type[nlist];
1347 i = ((BUFFEROUT_WEIGHT * i) >> 10) >> priority;
1348 for ( ; i > 0; i-- ) {
1349 bh = next_to_age[nlist];
1350 if (!bh)
1351 break;
1352 next_to_age[nlist] = bh->b_next_free;
1353
1354
1355 age_buffer(bh);
1356
1357
1358 if(bh->b_list != nlist) goto repeat1;
1359 if (bh->b_count || buffer_protected(bh) ||
1360 !bh->b_this_page)
1361 continue;
1362 if(size && bh->b_size != size) continue;
1363 if (buffer_locked(bh))
1364 if (priority)
1365 continue;
1366 else
1367 wait_on_buffer(bh);
1368 if (buffer_dirty(bh)) {
1369 bh->b_count++;
1370 bh->b_flushtime = 0;
1371 ll_rw_block(WRITEA, 1, &bh);
1372 bh->b_count--;
1373 continue;
1374 }
1375
1376
1377
1378 if ((age_of((unsigned long) bh->b_data) >>
1379 (6-priority)) > 0)
1380 continue;
1381 if (try_to_free(bh, &bh, 0))
1382 return 1;
1383 if(!bh) break;
1384 }
1385 }
1386 return 0;
1387 }
1388
1389
1390
1391
1392 void show_buffers(void)
1393 {
1394 struct buffer_head * bh;
1395 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1396 int protected = 0;
1397 int shared;
1398 int nlist, isize;
1399
1400 printk("Buffer memory: %6dkB\n",buffermem>>10);
1401 printk("Buffer heads: %6d\n",nr_buffer_heads);
1402 printk("Buffer blocks: %6d\n",nr_buffers);
1403
1404 for(nlist = 0; nlist < NR_LIST; nlist++) {
1405 shared = found = locked = dirty = used = lastused = protected = 0;
1406 bh = lru_list[nlist];
1407 if(!bh) continue;
1408 do {
1409 found++;
1410 if (buffer_locked(bh))
1411 locked++;
1412 if (buffer_protected(bh))
1413 protected++;
1414 if (buffer_dirty(bh))
1415 dirty++;
1416 if(mem_map[MAP_NR(((unsigned long) bh->b_data))].count !=1) shared++;
1417 if (bh->b_count)
1418 used++, lastused = found;
1419 bh = bh->b_next_free;
1420 } while (bh != lru_list[nlist]);
1421 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, "
1422 "%d protected, %d dirty %d shrd\n",
1423 nlist, found, used, lastused, locked, protected, dirty, shared);
1424 };
1425 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared \n");
1426 for(isize = 0; isize<NR_SIZES; isize++){
1427 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1428 buffers_lav[isize], nr_free[isize]);
1429 for(nlist = 0; nlist < NR_LIST; nlist++)
1430 printk("%7d ", nr_buffers_st[isize][nlist]);
1431 printk("\n");
1432 }
1433 }
1434
1435
1436
1437
1438
1439
1440
1441
1442 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1443 kdev_t dev, unsigned int starting_block)
1444 {
1445 unsigned long page;
1446 struct buffer_head * tmp, * p;
1447
1448 *bhp = bh;
1449 page = (unsigned long) bh->b_data;
1450 page &= PAGE_MASK;
1451 if(mem_map[MAP_NR(page)].count != 1) return 0;
1452 tmp = bh;
1453 do {
1454 if (!tmp)
1455 return 0;
1456
1457 if (tmp->b_count || buffer_protected(tmp) ||
1458 buffer_dirty(tmp) || buffer_locked(tmp))
1459 return 0;
1460 tmp = tmp->b_this_page;
1461 } while (tmp != bh);
1462 tmp = bh;
1463
1464 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1465 tmp = tmp->b_this_page;
1466
1467
1468 bh = tmp;
1469 do {
1470 p = tmp;
1471 tmp = tmp->b_this_page;
1472 remove_from_queues(p);
1473 p->b_dev = dev;
1474 mark_buffer_uptodate(p, 0);
1475 clear_bit(BH_Req, &p->b_state);
1476 p->b_blocknr = starting_block++;
1477 insert_into_queues(p);
1478 } while (tmp != bh);
1479 return 1;
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 static int reassign_cluster(kdev_t dev,
1497 unsigned int starting_block, int size)
1498 {
1499 struct buffer_head *bh;
1500 int isize = BUFSIZE_INDEX(size);
1501 int i;
1502
1503
1504
1505
1506
1507 while(nr_free[isize] < 32) refill_freelist(size);
1508
1509 bh = free_list[isize];
1510 if(bh)
1511 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1512 if (!bh->b_this_page) continue;
1513 if (try_to_reassign(bh, &bh, dev, starting_block))
1514 return 4;
1515 }
1516 return 0;
1517 }
1518
1519
1520
1521
1522
1523 static unsigned long try_to_generate_cluster(kdev_t dev, int block, int size)
1524 {
1525 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1526 int isize = BUFSIZE_INDEX(size);
1527 unsigned long offset;
1528 unsigned long page;
1529 int nblock;
1530
1531 page = get_free_page(GFP_NOBUFFER);
1532 if(!page) return 0;
1533
1534 bh = create_buffers(page, size);
1535 if (!bh) {
1536 free_page(page);
1537 return 0;
1538 };
1539 nblock = block;
1540 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1541 if (find_buffer(dev, nblock++, size))
1542 goto not_aligned;
1543 }
1544 tmp = bh;
1545 nblock = 0;
1546 while (1) {
1547 arr[nblock++] = bh;
1548 bh->b_count = 1;
1549 bh->b_flushtime = 0;
1550 bh->b_state = 0;
1551 bh->b_dev = dev;
1552 bh->b_list = BUF_CLEAN;
1553 bh->b_blocknr = block++;
1554 nr_buffers++;
1555 nr_buffers_size[isize]++;
1556 insert_into_queues(bh);
1557 if (bh->b_this_page)
1558 bh = bh->b_this_page;
1559 else
1560 break;
1561 }
1562 buffermem += PAGE_SIZE;
1563 buffer_pages[MAP_NR(page)] = bh;
1564 bh->b_this_page = tmp;
1565 while (nblock-- > 0)
1566 brelse(arr[nblock]);
1567 return 4;
1568 not_aligned:
1569 while ((tmp = bh) != NULL) {
1570 bh = bh->b_this_page;
1571 put_unused_buffer_head(tmp);
1572 }
1573 free_page(page);
1574 return 0;
1575 }
1576
1577 unsigned long generate_cluster(kdev_t dev, int b[], int size)
1578 {
1579 int i, offset;
1580
1581 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1582 if(i && b[i]-1 != b[i-1]) return 0;
1583 if(find_buffer(dev, b[i], size)) return 0;
1584 };
1585
1586
1587
1588
1589
1590 if(maybe_shrink_lav_buffers(size))
1591 {
1592 int retval;
1593 retval = try_to_generate_cluster(dev, b[0], size);
1594 if(retval) return retval;
1595 };
1596
1597 if (nr_free_pages > min_free_pages*2)
1598 return try_to_generate_cluster(dev, b[0], size);
1599 else
1600 return reassign_cluster(dev, b[0], size);
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 void buffer_init(void)
1614 {
1615 int i;
1616 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1617 long memsize = MAP_NR(high_memory) << PAGE_SHIFT;
1618
1619 if (memsize >= 4*1024*1024) {
1620 if(memsize >= 16*1024*1024)
1621 nr_hash = 16381;
1622 else
1623 nr_hash = 4093;
1624 } else {
1625 nr_hash = 997;
1626 };
1627
1628 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1629 sizeof(struct buffer_head *));
1630
1631
1632 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1633 sizeof(struct buffer_head *));
1634 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1635 buffer_pages[i] = NULL;
1636
1637 for (i = 0 ; i < nr_hash ; i++)
1638 hash_table[i] = NULL;
1639 lru_list[BUF_CLEAN] = 0;
1640 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1641 if (!free_list[isize])
1642 panic("VFS: Unable to initialize buffer free list!");
1643 return;
1644 }
1645
1646
1647
1648
1649
1650
1651
1652
1653 struct wait_queue * bdflush_wait = NULL;
1654 struct wait_queue * bdflush_done = NULL;
1655
1656 static void wakeup_bdflush(int wait)
1657 {
1658 wake_up(&bdflush_wait);
1659 if(wait) sleep_on(&bdflush_done);
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671 asmlinkage int sync_old_buffers(void)
1672 {
1673 int i, isize;
1674 int ndirty, nwritten;
1675 int nlist;
1676 int ncount;
1677 struct buffer_head * bh, *next;
1678
1679 sync_supers(0);
1680 sync_inodes(0);
1681
1682 ncount = 0;
1683 #ifdef DEBUG
1684 for(nlist = 0; nlist < NR_LIST; nlist++)
1685 #else
1686 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1687 #endif
1688 {
1689 ndirty = 0;
1690 nwritten = 0;
1691 repeat:
1692 bh = lru_list[nlist];
1693 if(bh)
1694 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1695
1696 if(bh->b_list != nlist) goto repeat;
1697 next = bh->b_next_free;
1698 if(!lru_list[nlist]) {
1699 printk("Dirty list empty %d\n", i);
1700 break;
1701 }
1702
1703
1704 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1705 {
1706 refile_buffer(bh);
1707 continue;
1708 }
1709
1710 if (buffer_locked(bh) || !buffer_dirty(bh))
1711 continue;
1712 ndirty++;
1713 if(bh->b_flushtime > jiffies) continue;
1714 nwritten++;
1715 bh->b_count++;
1716 bh->b_flushtime = 0;
1717 #ifdef DEBUG
1718 if(nlist != BUF_DIRTY) ncount++;
1719 #endif
1720 ll_rw_block(WRITE, 1, &bh);
1721 bh->b_count--;
1722 }
1723 }
1724 #ifdef DEBUG
1725 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1726 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1727 #endif
1728
1729
1730
1731
1732 for(isize = 0; isize<NR_SIZES; isize++){
1733 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1734 buffer_usage[isize] = 0;
1735 };
1736 return 0;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745 asmlinkage int sys_bdflush(int func, long data)
1746 {
1747 int i, error;
1748
1749 if (!suser())
1750 return -EPERM;
1751
1752 if (func == 1)
1753 return sync_old_buffers();
1754
1755
1756 if (func >= 2) {
1757 i = (func-2) >> 1;
1758 if (i < 0 || i >= N_PARAM)
1759 return -EINVAL;
1760 if((func & 1) == 0) {
1761 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1762 if (error)
1763 return error;
1764 put_user(bdf_prm.data[i], (int*)data);
1765 return 0;
1766 };
1767 if (data < bdflush_min[i] || data > bdflush_max[i])
1768 return -EINVAL;
1769 bdf_prm.data[i] = data;
1770 return 0;
1771 };
1772
1773
1774
1775
1776
1777 return 0;
1778 }
1779
1780
1781
1782
1783
1784 int bdflush(void * unused) {
1785
1786 int i;
1787 int ndirty;
1788 int nlist;
1789 int ncount;
1790 struct buffer_head * bh, *next;
1791
1792
1793
1794
1795
1796 current->session = 1;
1797 current->pgrp = 1;
1798 sprintf(current->comm, "kernel bdflush");
1799
1800 for (;;) {
1801 #ifdef DEBUG
1802 printk("bdflush() activated...");
1803 #endif
1804
1805 ncount = 0;
1806 #ifdef DEBUG
1807 for(nlist = 0; nlist < NR_LIST; nlist++)
1808 #else
1809 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1810 #endif
1811 {
1812 ndirty = 0;
1813 repeat:
1814 bh = lru_list[nlist];
1815 if(bh)
1816 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1817 bh = next) {
1818
1819 if(bh->b_list != nlist) goto repeat;
1820 next = bh->b_next_free;
1821 if(!lru_list[nlist]) {
1822 printk("Dirty list empty %d\n", i);
1823 break;
1824 }
1825
1826
1827 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1828 {
1829 refile_buffer(bh);
1830 continue;
1831 }
1832
1833 if (buffer_locked(bh) || !buffer_dirty(bh))
1834 continue;
1835
1836
1837 bh->b_count++;
1838 ndirty++;
1839 bh->b_flushtime = 0;
1840 ll_rw_block(WRITE, 1, &bh);
1841 #ifdef DEBUG
1842 if(nlist != BUF_DIRTY) ncount++;
1843 #endif
1844 bh->b_count--;
1845 }
1846 }
1847 #ifdef DEBUG
1848 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1849 printk("sleeping again.\n");
1850 #endif
1851 wake_up(&bdflush_done);
1852
1853
1854
1855
1856 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1857 bdf_prm.b_un.nfract/100) {
1858 current->signal = 0;
1859 interruptible_sleep_on(&bdflush_wait);
1860 }
1861 }
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880