This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/string.h>
23 #include <linux/locks.h>
24 #include <linux/errno.h>
25 #include <linux/malloc.h>
26
27 #include <asm/system.h>
28 #include <asm/segment.h>
29 #include <asm/io.h>
30
31 #define NR_SIZES 4
32 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
34
35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
36
37 static int grow_buffers(int pri, int size);
38 static int shrink_specific_buffers(unsigned int priority, int size);
39 static int maybe_shrink_lav_buffers(int);
40
41 static int nr_hash = 0;
42 static struct buffer_head ** hash_table;
43 struct buffer_head ** buffer_pages;
44 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
45 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
46 static struct buffer_head * unused_list = NULL;
47 static struct wait_queue * buffer_wait = NULL;
48
49 int nr_buffers = 0;
50 int nr_buffers_type[NR_LIST] = {0,};
51 int nr_buffers_size[NR_SIZES] = {0,};
52 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
53 int buffer_usage[NR_SIZES] = {0,};
54 int buffers_lav[NR_SIZES] = {0,};
55 int nr_free[NR_SIZES] = {0,};
56 int buffermem = 0;
57 int nr_buffer_heads = 0;
58 extern int *blksize_size[];
59
60
61 static void wakeup_bdflush(int);
62
63 #define N_PARAM 9
64 #define LAV
65
66 static union bdflush_param{
67 struct {
68 int nfract;
69
70 int ndirty;
71
72 int nrefill;
73
74 int nref_dirt;
75
76 int clu_nfract;
77
78 int age_buffer;
79
80 int age_super;
81
82 int lav_const;
83
84 int lav_ratio;
85
86
87 } b_un;
88 unsigned int data[N_PARAM];
89 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
90
91
92
93
94
95
96
97 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
98 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
99
100
101
102
103
104
105
106
107
108
109 void __wait_on_buffer(struct buffer_head * bh)
110 {
111 struct wait_queue wait = { current, NULL };
112
113 bh->b_count++;
114 add_wait_queue(&bh->b_wait, &wait);
115 repeat:
116 current->state = TASK_UNINTERRUPTIBLE;
117 if (bh->b_lock) {
118 schedule();
119 goto repeat;
120 }
121 remove_wait_queue(&bh->b_wait, &wait);
122 bh->b_count--;
123 current->state = TASK_RUNNING;
124 }
125
126
127
128
129
130
131
132
133
134
135
136 static int sync_buffers(dev_t dev, int wait)
137 {
138 int i, retry, pass = 0, err = 0;
139 int nlist, ncount;
140 struct buffer_head * bh, *next;
141
142
143
144
145
146 repeat:
147 retry = 0;
148 repeat2:
149 ncount = 0;
150
151
152 for(nlist = 0; nlist < NR_LIST; nlist++)
153 {
154 repeat1:
155 bh = lru_list[nlist];
156 if(!bh) continue;
157 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
158 if(bh->b_list != nlist) goto repeat1;
159 next = bh->b_next_free;
160 if(!lru_list[nlist]) break;
161 if (dev && bh->b_dev != dev)
162 continue;
163 if (bh->b_lock)
164 {
165
166
167 if (!wait || !pass) {
168 retry = 1;
169 continue;
170 }
171 wait_on_buffer (bh);
172 goto repeat2;
173 }
174
175
176 if (wait && bh->b_req && !bh->b_lock &&
177 !bh->b_dirt && !bh->b_uptodate) {
178 err = 1;
179 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
180 continue;
181 }
182
183
184 if (!bh->b_dirt || pass>=2)
185 continue;
186
187 if (bh->b_lock)
188 continue;
189 bh->b_count++;
190 bh->b_flushtime = 0;
191 ll_rw_block(WRITE, 1, &bh);
192
193 if(nlist != BUF_DIRTY) {
194 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
195 ncount++;
196 };
197 bh->b_count--;
198 retry = 1;
199 }
200 }
201 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
202
203
204
205
206
207 if (wait && retry && ++pass<=2)
208 goto repeat;
209 return err;
210 }
211
212 void sync_dev(dev_t dev)
213 {
214 sync_buffers(dev, 0);
215 sync_supers(dev);
216 sync_inodes(dev);
217 sync_buffers(dev, 0);
218 }
219
220 int fsync_dev(dev_t dev)
221 {
222 sync_buffers(dev, 0);
223 sync_supers(dev);
224 sync_inodes(dev);
225 return sync_buffers(dev, 1);
226 }
227
228 asmlinkage int sys_sync(void)
229 {
230 sync_dev(0);
231 return 0;
232 }
233
234 int file_fsync (struct inode *inode, struct file *filp)
235 {
236 return fsync_dev(inode->i_dev);
237 }
238
239 asmlinkage int sys_fsync(unsigned int fd)
240 {
241 struct file * file;
242 struct inode * inode;
243
244 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
245 return -EBADF;
246 if (!file->f_op || !file->f_op->fsync)
247 return -EINVAL;
248 if (file->f_op->fsync(inode,file))
249 return -EIO;
250 return 0;
251 }
252
253 void invalidate_buffers(dev_t dev)
254 {
255 int i;
256 int nlist;
257 struct buffer_head * bh;
258
259 for(nlist = 0; nlist < NR_LIST; nlist++) {
260 bh = lru_list[nlist];
261 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
262 bh = bh->b_next_free) {
263 if (bh->b_dev != dev)
264 continue;
265 wait_on_buffer(bh);
266 if (bh->b_dev == dev)
267 bh->b_flushtime = bh->b_uptodate =
268 bh->b_dirt = bh->b_req = 0;
269 }
270 }
271 }
272
273 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
274 #define hash(dev,block) hash_table[_hashfn(dev,block)]
275
276 static inline void remove_from_hash_queue(struct buffer_head * bh)
277 {
278 if (bh->b_next)
279 bh->b_next->b_prev = bh->b_prev;
280 if (bh->b_prev)
281 bh->b_prev->b_next = bh->b_next;
282 if (hash(bh->b_dev,bh->b_blocknr) == bh)
283 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
284 bh->b_next = bh->b_prev = NULL;
285 }
286
287 static inline void remove_from_lru_list(struct buffer_head * bh)
288 {
289 if (!(bh->b_prev_free) || !(bh->b_next_free))
290 panic("VFS: LRU block list corrupted");
291 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
292 bh->b_prev_free->b_next_free = bh->b_next_free;
293 bh->b_next_free->b_prev_free = bh->b_prev_free;
294
295 if (lru_list[bh->b_list] == bh)
296 lru_list[bh->b_list] = bh->b_next_free;
297 if(lru_list[bh->b_list] == bh)
298 lru_list[bh->b_list] = NULL;
299 bh->b_next_free = bh->b_prev_free = NULL;
300 }
301
302 static inline void remove_from_free_list(struct buffer_head * bh)
303 {
304 int isize = BUFSIZE_INDEX(bh->b_size);
305 if (!(bh->b_prev_free) || !(bh->b_next_free))
306 panic("VFS: Free block list corrupted");
307 if(bh->b_dev != 0xffff) panic("Free list corrupted");
308 if(!free_list[isize])
309 panic("Free list empty");
310 nr_free[isize]--;
311 if(bh->b_next_free == bh)
312 free_list[isize] = NULL;
313 else {
314 bh->b_prev_free->b_next_free = bh->b_next_free;
315 bh->b_next_free->b_prev_free = bh->b_prev_free;
316 if (free_list[isize] == bh)
317 free_list[isize] = bh->b_next_free;
318 };
319 bh->b_next_free = bh->b_prev_free = NULL;
320 }
321
322 static inline void remove_from_queues(struct buffer_head * bh)
323 {
324 if(bh->b_dev == 0xffff) {
325 remove_from_free_list(bh);
326
327 return;
328 };
329 nr_buffers_type[bh->b_list]--;
330 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
331 remove_from_hash_queue(bh);
332 remove_from_lru_list(bh);
333 }
334
335 static inline void put_last_lru(struct buffer_head * bh)
336 {
337 if (!bh)
338 return;
339 if (bh == lru_list[bh->b_list]) {
340 lru_list[bh->b_list] = bh->b_next_free;
341 return;
342 }
343 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
344 remove_from_lru_list(bh);
345
346
347 if(!lru_list[bh->b_list]) {
348 lru_list[bh->b_list] = bh;
349 lru_list[bh->b_list]->b_prev_free = bh;
350 };
351
352 bh->b_next_free = lru_list[bh->b_list];
353 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
354 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
355 lru_list[bh->b_list]->b_prev_free = bh;
356 }
357
358 static inline void put_last_free(struct buffer_head * bh)
359 {
360 int isize;
361 if (!bh)
362 return;
363
364 isize = BUFSIZE_INDEX(bh->b_size);
365 bh->b_dev = 0xffff;
366
367
368 if(!free_list[isize]) {
369 free_list[isize] = bh;
370 bh->b_prev_free = bh;
371 };
372
373 nr_free[isize]++;
374 bh->b_next_free = free_list[isize];
375 bh->b_prev_free = free_list[isize]->b_prev_free;
376 free_list[isize]->b_prev_free->b_next_free = bh;
377 free_list[isize]->b_prev_free = bh;
378 }
379
380 static inline void insert_into_queues(struct buffer_head * bh)
381 {
382
383
384 if(bh->b_dev == 0xffff) {
385 put_last_free(bh);
386 return;
387 };
388 if(!lru_list[bh->b_list]) {
389 lru_list[bh->b_list] = bh;
390 bh->b_prev_free = bh;
391 };
392 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
393 bh->b_next_free = lru_list[bh->b_list];
394 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
395 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
396 lru_list[bh->b_list]->b_prev_free = bh;
397 nr_buffers_type[bh->b_list]++;
398 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
399
400 bh->b_prev = NULL;
401 bh->b_next = NULL;
402 if (!bh->b_dev)
403 return;
404 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
405 hash(bh->b_dev,bh->b_blocknr) = bh;
406 if (bh->b_next)
407 bh->b_next->b_prev = bh;
408 }
409
410 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
411 {
412 struct buffer_head * tmp;
413
414 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
415 if (tmp->b_dev==dev && tmp->b_blocknr==block)
416 if (tmp->b_size == size)
417 return tmp;
418 else {
419 printk("VFS: Wrong blocksize on device %d/%d\n",
420 MAJOR(dev), MINOR(dev));
421 return NULL;
422 }
423 return NULL;
424 }
425
426
427
428
429
430
431
432
433 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
434 {
435 struct buffer_head * bh;
436
437 for (;;) {
438 if (!(bh=find_buffer(dev,block,size)))
439 return NULL;
440 bh->b_count++;
441 wait_on_buffer(bh);
442 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
443 return bh;
444 bh->b_count--;
445 }
446 }
447
448 void set_blocksize(dev_t dev, int size)
449 {
450 int i, nlist;
451 struct buffer_head * bh, *bhnext;
452
453 if (!blksize_size[MAJOR(dev)])
454 return;
455
456 switch(size) {
457 default: panic("Invalid blocksize passed to set_blocksize");
458 case 512: case 1024: case 2048: case 4096:;
459 }
460
461 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
462 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
463 return;
464 }
465 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
466 return;
467 sync_buffers(dev, 2);
468 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
469
470
471
472
473 for(nlist = 0; nlist < NR_LIST; nlist++) {
474 bh = lru_list[nlist];
475 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
476 if(!bh) break;
477 bhnext = bh->b_next_free;
478 if (bh->b_dev != dev)
479 continue;
480 if (bh->b_size == size)
481 continue;
482
483 wait_on_buffer(bh);
484 if (bh->b_dev == dev && bh->b_size != size) {
485 bh->b_uptodate = bh->b_dirt = bh->b_req =
486 bh->b_flushtime = 0;
487 };
488 remove_from_hash_queue(bh);
489 }
490 }
491 }
492
493 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
494
495 void refill_freelist(int size)
496 {
497 struct buffer_head * bh, * tmp;
498 struct buffer_head * candidate[NR_LIST];
499 unsigned int best_time, winner;
500 int isize = BUFSIZE_INDEX(size);
501 int buffers[NR_LIST];
502 int i;
503 int needed;
504
505
506
507
508
509 if (nr_free[isize] > 100)
510 return;
511
512
513
514
515
516
517 needed =bdf_prm.b_un.nrefill * size;
518
519 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
520 grow_buffers(GFP_BUFFER, size)) {
521 needed -= PAGE_SIZE;
522 }
523
524 if(needed <= 0) return;
525
526
527
528
529 while(maybe_shrink_lav_buffers(size))
530 {
531 if(!grow_buffers(GFP_BUFFER, size)) break;
532 needed -= PAGE_SIZE;
533 if(needed <= 0) return;
534 };
535
536
537
538
539
540
541
542 repeat0:
543 for(i=0; i<NR_LIST; i++){
544 if(i == BUF_DIRTY || i == BUF_SHARED ||
545 nr_buffers_type[i] == 0) {
546 candidate[i] = NULL;
547 buffers[i] = 0;
548 continue;
549 }
550 buffers[i] = nr_buffers_type[i];
551 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
552 {
553 if(buffers[i] < 0) panic("Here is the problem");
554 tmp = bh->b_next_free;
555 if (!bh) break;
556
557 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
558 bh->b_dirt) {
559 refile_buffer(bh);
560 continue;
561 };
562
563 if (bh->b_count || bh->b_size != size)
564 continue;
565
566
567
568
569
570 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
571 buffers[i] = 0;
572 break;
573 }
574
575 if (BADNESS(bh)) continue;
576 break;
577 };
578 if(!buffers[i]) candidate[i] = NULL;
579 else candidate[i] = bh;
580 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
581 }
582
583 repeat:
584 if(needed <= 0) return;
585
586
587
588 winner = best_time = UINT_MAX;
589 for(i=0; i<NR_LIST; i++){
590 if(!candidate[i]) continue;
591 if(candidate[i]->b_lru_time < best_time){
592 best_time = candidate[i]->b_lru_time;
593 winner = i;
594 }
595 }
596
597
598 if(winner != UINT_MAX) {
599 i = winner;
600 bh = candidate[i];
601 candidate[i] = bh->b_next_free;
602 if(candidate[i] == bh) candidate[i] = NULL;
603 if (bh->b_count || bh->b_size != size)
604 panic("Busy buffer in candidate list\n");
605 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
606 panic("Shared buffer in candidate list\n");
607 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
608
609 if(bh->b_dev == 0xffff) panic("Wrong list");
610 remove_from_queues(bh);
611 bh->b_dev = 0xffff;
612 put_last_free(bh);
613 needed -= bh->b_size;
614 buffers[i]--;
615 if(buffers[i] < 0) panic("Here is the problem");
616
617 if(buffers[i] == 0) candidate[i] = NULL;
618
619
620
621 if(candidate[i] && buffers[i] > 0){
622 if(buffers[i] <= 0) panic("Here is another problem");
623 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
624 if(buffers[i] < 0) panic("Here is the problem");
625 tmp = bh->b_next_free;
626 if (!bh) break;
627
628 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
629 bh->b_dirt) {
630 refile_buffer(bh);
631 continue;
632 };
633
634 if (bh->b_count || bh->b_size != size)
635 continue;
636
637
638
639
640
641 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
642 buffers[i] = 0;
643 break;
644 }
645
646 if (BADNESS(bh)) continue;
647 break;
648 };
649 if(!buffers[i]) candidate[i] = NULL;
650 else candidate[i] = bh;
651 if(candidate[i] && candidate[i]->b_count)
652 panic("Here is the problem");
653 }
654
655 goto repeat;
656 }
657
658 if(needed <= 0) return;
659
660
661
662 if (nr_free_pages > 5) {
663 if (grow_buffers(GFP_BUFFER, size)) {
664 needed -= PAGE_SIZE;
665 goto repeat0;
666 };
667 }
668
669
670 if (!grow_buffers(GFP_ATOMIC, size))
671 wakeup_bdflush(1);
672 needed -= PAGE_SIZE;
673 goto repeat0;
674 }
675
676
677
678
679
680
681
682
683
684
685
686 struct buffer_head * getblk(dev_t dev, int block, int size)
687 {
688 struct buffer_head * bh;
689 int isize = BUFSIZE_INDEX(size);
690
691
692 buffer_usage[isize]++;
693
694
695
696
697 repeat:
698 bh = get_hash_table(dev, block, size);
699 if (bh) {
700 if (bh->b_uptodate && !bh->b_dirt)
701 put_last_lru(bh);
702 if(!bh->b_dirt) bh->b_flushtime = 0;
703 return bh;
704 }
705
706 while(!free_list[isize]) refill_freelist(size);
707
708 if (find_buffer(dev,block,size))
709 goto repeat;
710
711 bh = free_list[isize];
712 remove_from_free_list(bh);
713
714
715
716 bh->b_count=1;
717 bh->b_dirt=0;
718 bh->b_lock=0;
719 bh->b_uptodate=0;
720 bh->b_flushtime = 0;
721 bh->b_req=0;
722 bh->b_dev=dev;
723 bh->b_blocknr=block;
724 insert_into_queues(bh);
725 return bh;
726 }
727
728 void set_writetime(struct buffer_head * buf, int flag)
729 {
730 int newtime;
731
732 if (buf->b_dirt){
733
734 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
735 bdf_prm.b_un.age_buffer);
736 if(!buf->b_flushtime || buf->b_flushtime > newtime)
737 buf->b_flushtime = newtime;
738 } else {
739 buf->b_flushtime = 0;
740 }
741 }
742
743
744 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
745 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
746
747 void refile_buffer(struct buffer_head * buf){
748 int i, dispose;
749 i = 0;
750 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
751 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
752 if(buf->b_lock) i |= 2;
753 if(buf->b_dirt) i |= 4;
754 dispose = buffer_disposition[i];
755 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
756 dispose = BUF_UNSHARED;
757 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
758 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
759 if(dispose != buf->b_list) {
760 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
761 buf->b_lru_time = jiffies;
762 if(dispose == BUF_LOCKED &&
763 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
764 dispose = BUF_LOCKED1;
765 remove_from_queues(buf);
766 buf->b_list = dispose;
767 insert_into_queues(buf);
768 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
769 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
770 bdf_prm.b_un.nfract/100)
771 wakeup_bdflush(0);
772 }
773 }
774
775 void brelse(struct buffer_head * buf)
776 {
777 if (!buf)
778 return;
779 wait_on_buffer(buf);
780
781
782 set_writetime(buf, 0);
783 refile_buffer(buf);
784
785 if (buf->b_count) {
786 if (--buf->b_count)
787 return;
788 wake_up(&buffer_wait);
789 return;
790 }
791 printk("VFS: brelse: Trying to free free buffer\n");
792 }
793
794
795
796
797
798 struct buffer_head * bread(dev_t dev, int block, int size)
799 {
800 struct buffer_head * bh;
801
802 if (!(bh = getblk(dev, block, size))) {
803 printk("VFS: bread: READ error on device %d/%d\n",
804 MAJOR(dev), MINOR(dev));
805 return NULL;
806 }
807 if (bh->b_uptodate)
808 return bh;
809 ll_rw_block(READ, 1, &bh);
810 wait_on_buffer(bh);
811 if (bh->b_uptodate)
812 return bh;
813 brelse(bh);
814 return NULL;
815 }
816
817
818
819
820
821
822
823 #define NBUF 16
824
825 struct buffer_head * breada(dev_t dev, int block, int bufsize,
826 unsigned int pos, unsigned int filesize)
827 {
828 struct buffer_head * bhlist[NBUF];
829 unsigned int blocks;
830 struct buffer_head * bh;
831 int index;
832 int i, j;
833
834 if (pos >= filesize)
835 return NULL;
836
837 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
838 return NULL;
839
840 index = BUFSIZE_INDEX(bh->b_size);
841
842 if (bh->b_uptodate)
843 return bh;
844
845 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
846
847 if (blocks > (read_ahead[MAJOR(dev)] >> index))
848 blocks = read_ahead[MAJOR(dev)] >> index;
849 if (blocks > NBUF)
850 blocks = NBUF;
851
852 bhlist[0] = bh;
853 j = 1;
854 for(i=1; i<blocks; i++) {
855 bh = getblk(dev,block+i,bufsize);
856 if (bh->b_uptodate) {
857 brelse(bh);
858 break;
859 }
860 bhlist[j++] = bh;
861 }
862
863
864 ll_rw_block(READ, j, bhlist);
865
866 for(i=1; i<j; i++)
867 brelse(bhlist[i]);
868
869
870 bh = bhlist[0];
871 wait_on_buffer(bh);
872 if (bh->b_uptodate)
873 return bh;
874 brelse(bh);
875 return NULL;
876 }
877
878
879
880
881 static void put_unused_buffer_head(struct buffer_head * bh)
882 {
883 struct wait_queue * wait;
884
885 wait = ((volatile struct buffer_head *) bh)->b_wait;
886 memset(bh,0,sizeof(*bh));
887 ((volatile struct buffer_head *) bh)->b_wait = wait;
888 bh->b_next_free = unused_list;
889 unused_list = bh;
890 }
891
892 static void get_more_buffer_heads(void)
893 {
894 int i;
895 struct buffer_head * bh;
896
897 if (unused_list)
898 return;
899
900 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
901 return;
902
903 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
904 bh->b_next_free = unused_list;
905 unused_list = bh++;
906 }
907 }
908
909 static struct buffer_head * get_unused_buffer_head(void)
910 {
911 struct buffer_head * bh;
912
913 get_more_buffer_heads();
914 if (!unused_list)
915 return NULL;
916 bh = unused_list;
917 unused_list = bh->b_next_free;
918 bh->b_next_free = NULL;
919 bh->b_data = NULL;
920 bh->b_size = 0;
921 bh->b_req = 0;
922 return bh;
923 }
924
925
926
927
928
929
930
931 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
932 {
933 struct buffer_head *bh, *head;
934 unsigned long offset;
935
936 head = NULL;
937 offset = PAGE_SIZE;
938 while ((offset -= size) < PAGE_SIZE) {
939 bh = get_unused_buffer_head();
940 if (!bh)
941 goto no_grow;
942 bh->b_this_page = head;
943 head = bh;
944 bh->b_data = (char *) (page+offset);
945 bh->b_size = size;
946 bh->b_dev = 0xffff;
947 }
948 return head;
949
950
951
952 no_grow:
953 bh = head;
954 while (bh) {
955 head = bh;
956 bh = bh->b_this_page;
957 put_unused_buffer_head(head);
958 }
959 return NULL;
960 }
961
962 static void read_buffers(struct buffer_head * bh[], int nrbuf)
963 {
964 int i;
965 int bhnum = 0;
966 struct buffer_head * bhr[8];
967
968 for (i = 0 ; i < nrbuf ; i++) {
969 if (bh[i] && !bh[i]->b_uptodate)
970 bhr[bhnum++] = bh[i];
971 }
972 if (bhnum)
973 ll_rw_block(READ, bhnum, bhr);
974 for (i = 0 ; i < nrbuf ; i++) {
975 if (bh[i]) {
976 wait_on_buffer(bh[i]);
977 }
978 }
979 }
980
981
982
983
984
985
986
987
988
989 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
990 unsigned long address)
991 {
992 while (nrbuf-- > 0)
993 brelse(bh[nrbuf]);
994 return 0;
995 }
996
997 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
998 dev_t dev, int *b, int size)
999 {
1000 struct buffer_head * bh[8];
1001 unsigned long page;
1002 unsigned long offset;
1003 int block;
1004 int nrbuf;
1005 int aligned = 1;
1006
1007 bh[0] = first;
1008 nrbuf = 1;
1009 page = (unsigned long) first->b_data;
1010 if (page & ~PAGE_MASK)
1011 aligned = 0;
1012 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1013 block = *++b;
1014 if (!block)
1015 goto no_go;
1016 first = get_hash_table(dev, block, size);
1017 if (!first)
1018 goto no_go;
1019 bh[nrbuf++] = first;
1020 if (page+offset != (unsigned long) first->b_data)
1021 aligned = 0;
1022 }
1023 if (!aligned)
1024 return try_to_align(bh, nrbuf, address);
1025 mem_map[MAP_NR(page)]++;
1026 read_buffers(bh,nrbuf);
1027 while (nrbuf-- > 0)
1028 brelse(bh[nrbuf]);
1029 free_page(address);
1030 ++current->mm->min_flt;
1031 return page;
1032 no_go:
1033 while (nrbuf-- > 0)
1034 brelse(bh[nrbuf]);
1035 return 0;
1036 }
1037
1038 static unsigned long try_to_load_aligned(unsigned long address,
1039 dev_t dev, int b[], int size)
1040 {
1041 struct buffer_head * bh, * tmp, * arr[8];
1042 unsigned long offset;
1043 int isize = BUFSIZE_INDEX(size);
1044 int * p;
1045 int block;
1046
1047 bh = create_buffers(address, size);
1048 if (!bh)
1049 return 0;
1050
1051 p = b;
1052 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1053 block = *(p++);
1054 if (!block)
1055 goto not_aligned;
1056 if (find_buffer(dev, block, size))
1057 goto not_aligned;
1058 }
1059 tmp = bh;
1060 p = b;
1061 block = 0;
1062 while (1) {
1063 arr[block++] = bh;
1064 bh->b_count = 1;
1065 bh->b_dirt = 0;
1066 bh->b_flushtime = 0;
1067 bh->b_uptodate = 0;
1068 bh->b_req = 0;
1069 bh->b_dev = dev;
1070 bh->b_blocknr = *(p++);
1071 bh->b_list = BUF_CLEAN;
1072 nr_buffers++;
1073 nr_buffers_size[isize]++;
1074 insert_into_queues(bh);
1075 if (bh->b_this_page)
1076 bh = bh->b_this_page;
1077 else
1078 break;
1079 }
1080 buffermem += PAGE_SIZE;
1081 bh->b_this_page = tmp;
1082 mem_map[MAP_NR(address)]++;
1083 buffer_pages[MAP_NR(address)] = bh;
1084 read_buffers(arr,block);
1085 while (block-- > 0)
1086 brelse(arr[block]);
1087 ++current->mm->maj_flt;
1088 return address;
1089 not_aligned:
1090 while ((tmp = bh) != NULL) {
1091 bh = bh->b_this_page;
1092 put_unused_buffer_head(tmp);
1093 }
1094 return 0;
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 static inline unsigned long try_to_share_buffers(unsigned long address,
1109 dev_t dev, int *b, int size)
1110 {
1111 struct buffer_head * bh;
1112 int block;
1113
1114 block = b[0];
1115 if (!block)
1116 return 0;
1117 bh = get_hash_table(dev, block, size);
1118 if (bh)
1119 return check_aligned(bh, address, dev, b, size);
1120 return try_to_load_aligned(address, dev, b, size);
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1131 {
1132 struct buffer_head * bh[8];
1133 unsigned long where;
1134 int i, j;
1135
1136 if (!no_share) {
1137 where = try_to_share_buffers(address, dev, b, size);
1138 if (where)
1139 return where;
1140 }
1141 ++current->mm->maj_flt;
1142 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1143 bh[i] = NULL;
1144 if (b[i])
1145 bh[i] = getblk(dev, b[i], size);
1146 }
1147 read_buffers(bh,i);
1148 where = address;
1149 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1150 if (bh[i]) {
1151 if (bh[i]->b_uptodate)
1152 memcpy((void *) where, bh[i]->b_data, size);
1153 brelse(bh[i]);
1154 }
1155 }
1156 return address;
1157 }
1158
1159
1160
1161
1162
1163 static int grow_buffers(int pri, int size)
1164 {
1165 unsigned long page;
1166 struct buffer_head *bh, *tmp;
1167 struct buffer_head * insert_point;
1168 int isize;
1169
1170 if ((size & 511) || (size > PAGE_SIZE)) {
1171 printk("VFS: grow_buffers: size = %d\n",size);
1172 return 0;
1173 }
1174
1175 isize = BUFSIZE_INDEX(size);
1176
1177 if (!(page = __get_free_page(pri)))
1178 return 0;
1179 bh = create_buffers(page, size);
1180 if (!bh) {
1181 free_page(page);
1182 return 0;
1183 }
1184
1185 insert_point = free_list[isize];
1186
1187 tmp = bh;
1188 while (1) {
1189 nr_free[isize]++;
1190 if (insert_point) {
1191 tmp->b_next_free = insert_point->b_next_free;
1192 tmp->b_prev_free = insert_point;
1193 insert_point->b_next_free->b_prev_free = tmp;
1194 insert_point->b_next_free = tmp;
1195 } else {
1196 tmp->b_prev_free = tmp;
1197 tmp->b_next_free = tmp;
1198 }
1199 insert_point = tmp;
1200 ++nr_buffers;
1201 if (tmp->b_this_page)
1202 tmp = tmp->b_this_page;
1203 else
1204 break;
1205 }
1206 free_list[isize] = bh;
1207 buffer_pages[MAP_NR(page)] = bh;
1208 tmp->b_this_page = bh;
1209 wake_up(&buffer_wait);
1210 buffermem += PAGE_SIZE;
1211 return 1;
1212 }
1213
1214
1215
1216
1217
1218 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1219 {
1220 unsigned long page;
1221 struct buffer_head * tmp, * p;
1222 int isize = BUFSIZE_INDEX(bh->b_size);
1223
1224 *bhp = bh;
1225 page = (unsigned long) bh->b_data;
1226 page &= PAGE_MASK;
1227 tmp = bh;
1228 do {
1229 if (!tmp)
1230 return 0;
1231 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1232 return 0;
1233 tmp = tmp->b_this_page;
1234 } while (tmp != bh);
1235 tmp = bh;
1236 do {
1237 p = tmp;
1238 tmp = tmp->b_this_page;
1239 nr_buffers--;
1240 nr_buffers_size[isize]--;
1241 if (p == *bhp)
1242 {
1243 *bhp = p->b_prev_free;
1244 if (p == *bhp)
1245 *bhp = NULL;
1246 }
1247 remove_from_queues(p);
1248 put_unused_buffer_head(p);
1249 } while (tmp != bh);
1250 buffermem -= PAGE_SIZE;
1251 buffer_pages[MAP_NR(page)] = NULL;
1252 free_page(page);
1253 return !mem_map[MAP_NR(page)];
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 static int maybe_shrink_lav_buffers(int size)
1270 {
1271 int nlist;
1272 int isize;
1273 int total_lav, total_n_buffers, n_sizes;
1274
1275
1276
1277
1278
1279
1280 total_lav = total_n_buffers = n_sizes = 0;
1281 for(nlist = 0; nlist < NR_SIZES; nlist++)
1282 {
1283 total_lav += buffers_lav[nlist];
1284 if(nr_buffers_size[nlist]) n_sizes++;
1285 total_n_buffers += nr_buffers_size[nlist];
1286 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1287 }
1288
1289
1290
1291
1292 isize = (size ? BUFSIZE_INDEX(size) : -1);
1293
1294 if (n_sizes > 1)
1295 for(nlist = 0; nlist < NR_SIZES; nlist++)
1296 {
1297 if(nlist == isize) continue;
1298 if(nr_buffers_size[nlist] &&
1299 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1300 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1301 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1302 return 1;
1303 }
1304 return 0;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313 int shrink_buffers(unsigned int priority)
1314 {
1315 if (priority < 2) {
1316 sync_buffers(0,0);
1317 }
1318
1319 if(priority == 2) wakeup_bdflush(1);
1320
1321 if(maybe_shrink_lav_buffers(0)) return 1;
1322
1323
1324 return shrink_specific_buffers(priority, 0);
1325 }
1326
1327 static int shrink_specific_buffers(unsigned int priority, int size)
1328 {
1329 struct buffer_head *bh;
1330 int nlist;
1331 int i, isize, isize1;
1332
1333 #ifdef DEBUG
1334 if(size) printk("Shrinking buffers of size %d\n", size);
1335 #endif
1336
1337
1338 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1339
1340 for(isize = 0; isize<NR_SIZES; isize++){
1341 if(isize1 != -1 && isize1 != isize) continue;
1342 bh = free_list[isize];
1343 if(!bh) continue;
1344 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1345 if (bh->b_count || !bh->b_this_page)
1346 continue;
1347 if (try_to_free(bh, &bh))
1348 return 1;
1349 if(!bh) break;
1350
1351 }
1352 }
1353
1354
1355
1356 for(nlist = 0; nlist < NR_LIST; nlist++) {
1357 repeat1:
1358 if(priority > 3 && nlist == BUF_SHARED) continue;
1359 bh = lru_list[nlist];
1360 if(!bh) continue;
1361 i = nr_buffers_type[nlist] >> priority;
1362 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1363
1364 if(bh->b_list != nlist) goto repeat1;
1365 if (bh->b_count || !bh->b_this_page)
1366 continue;
1367 if(size && bh->b_size != size) continue;
1368 if (bh->b_lock)
1369 if (priority)
1370 continue;
1371 else
1372 wait_on_buffer(bh);
1373 if (bh->b_dirt) {
1374 bh->b_count++;
1375 bh->b_flushtime = 0;
1376 ll_rw_block(WRITEA, 1, &bh);
1377 bh->b_count--;
1378 continue;
1379 }
1380 if (try_to_free(bh, &bh))
1381 return 1;
1382 if(!bh) break;
1383 }
1384 }
1385 return 0;
1386 }
1387
1388
1389 void show_buffers(void)
1390 {
1391 struct buffer_head * bh;
1392 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1393 int shared;
1394 int nlist, isize;
1395
1396 printk("Buffer memory: %6dkB\n",buffermem>>10);
1397 printk("Buffer heads: %6d\n",nr_buffer_heads);
1398 printk("Buffer blocks: %6d\n",nr_buffers);
1399
1400 for(nlist = 0; nlist < NR_LIST; nlist++) {
1401 shared = found = locked = dirty = used = lastused = 0;
1402 bh = lru_list[nlist];
1403 if(!bh) continue;
1404 do {
1405 found++;
1406 if (bh->b_lock)
1407 locked++;
1408 if (bh->b_dirt)
1409 dirty++;
1410 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1411 if (bh->b_count)
1412 used++, lastused = found;
1413 bh = bh->b_next_free;
1414 } while (bh != lru_list[nlist]);
1415 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1416 nlist, found, used, lastused, locked, dirty, shared);
1417 };
1418 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1419 for(isize = 0; isize<NR_SIZES; isize++){
1420 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1421 buffers_lav[isize], nr_free[isize]);
1422 for(nlist = 0; nlist < NR_LIST; nlist++)
1423 printk("%7d ", nr_buffers_st[isize][nlist]);
1424 printk("\n");
1425 }
1426 }
1427
1428
1429
1430
1431
1432 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1433 dev_t dev, unsigned int starting_block)
1434 {
1435 unsigned long page;
1436 struct buffer_head * tmp, * p;
1437
1438 *bhp = bh;
1439 page = (unsigned long) bh->b_data;
1440 page &= PAGE_MASK;
1441 if(mem_map[MAP_NR(page)] != 1) return 0;
1442 tmp = bh;
1443 do {
1444 if (!tmp)
1445 return 0;
1446
1447 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1448 return 0;
1449 tmp = tmp->b_this_page;
1450 } while (tmp != bh);
1451 tmp = bh;
1452
1453 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1454 tmp = tmp->b_this_page;
1455
1456
1457 bh = tmp;
1458 do {
1459 p = tmp;
1460 tmp = tmp->b_this_page;
1461 remove_from_queues(p);
1462 p->b_dev=dev;
1463 p->b_uptodate = 0;
1464 p->b_req = 0;
1465 p->b_blocknr=starting_block++;
1466 insert_into_queues(p);
1467 } while (tmp != bh);
1468 return 1;
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 static int reassign_cluster(dev_t dev,
1486 unsigned int starting_block, int size)
1487 {
1488 struct buffer_head *bh;
1489 int isize = BUFSIZE_INDEX(size);
1490 int i;
1491
1492
1493
1494
1495
1496 while(nr_free[isize] < 32) refill_freelist(size);
1497
1498 bh = free_list[isize];
1499 if(bh)
1500 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1501 if (!bh->b_this_page) continue;
1502 if (try_to_reassign(bh, &bh, dev, starting_block))
1503 return 4;
1504 }
1505 return 0;
1506 }
1507
1508
1509
1510
1511
1512 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1513 {
1514 struct buffer_head * bh, * tmp, * arr[8];
1515 int isize = BUFSIZE_INDEX(size);
1516 unsigned long offset;
1517 unsigned long page;
1518 int nblock;
1519
1520 page = get_free_page(GFP_NOBUFFER);
1521 if(!page) return 0;
1522
1523 bh = create_buffers(page, size);
1524 if (!bh) {
1525 free_page(page);
1526 return 0;
1527 };
1528 nblock = block;
1529 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1530 if (find_buffer(dev, nblock++, size))
1531 goto not_aligned;
1532 }
1533 tmp = bh;
1534 nblock = 0;
1535 while (1) {
1536 arr[nblock++] = bh;
1537 bh->b_count = 1;
1538 bh->b_dirt = 0;
1539 bh->b_flushtime = 0;
1540 bh->b_lock = 0;
1541 bh->b_uptodate = 0;
1542 bh->b_req = 0;
1543 bh->b_dev = dev;
1544 bh->b_list = BUF_CLEAN;
1545 bh->b_blocknr = block++;
1546 nr_buffers++;
1547 nr_buffers_size[isize]++;
1548 insert_into_queues(bh);
1549 if (bh->b_this_page)
1550 bh = bh->b_this_page;
1551 else
1552 break;
1553 }
1554 buffermem += PAGE_SIZE;
1555 buffer_pages[MAP_NR(page)] = bh;
1556 bh->b_this_page = tmp;
1557 while (nblock-- > 0)
1558 brelse(arr[nblock]);
1559 return 4;
1560 not_aligned:
1561 while ((tmp = bh) != NULL) {
1562 bh = bh->b_this_page;
1563 put_unused_buffer_head(tmp);
1564 }
1565 free_page(page);
1566 return 0;
1567 }
1568
1569 unsigned long generate_cluster(dev_t dev, int b[], int size)
1570 {
1571 int i, offset;
1572
1573 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1574 if(i && b[i]-1 != b[i-1]) return 0;
1575 if(find_buffer(dev, b[i], size)) return 0;
1576 };
1577
1578
1579
1580
1581
1582 if(maybe_shrink_lav_buffers(size))
1583 {
1584 int retval;
1585 retval = try_to_generate_cluster(dev, b[0], size);
1586 if(retval) return retval;
1587 };
1588
1589 if (nr_free_pages > min_free_pages*2)
1590 return try_to_generate_cluster(dev, b[0], size);
1591 else
1592 return reassign_cluster(dev, b[0], size);
1593 }
1594
1595
1596
1597
1598
1599
1600
1601
1602 void buffer_init(void)
1603 {
1604 int i;
1605 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1606
1607 if (high_memory >= 4*1024*1024) {
1608 if(high_memory >= 16*1024*1024)
1609 nr_hash = 16381;
1610 else
1611 nr_hash = 4093;
1612 } else {
1613 nr_hash = 997;
1614 };
1615
1616 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1617 sizeof(struct buffer_head *));
1618
1619
1620 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1621 sizeof(struct buffer_head *));
1622 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1623 buffer_pages[i] = NULL;
1624
1625 for (i = 0 ; i < nr_hash ; i++)
1626 hash_table[i] = NULL;
1627 lru_list[BUF_CLEAN] = 0;
1628 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1629 if (!free_list[isize])
1630 panic("VFS: Unable to initialize buffer free list!");
1631 return;
1632 }
1633
1634
1635
1636
1637
1638
1639
1640
1641 struct wait_queue * bdflush_wait = NULL;
1642 struct wait_queue * bdflush_done = NULL;
1643
1644 static int bdflush_running = 0;
1645
1646 static void wakeup_bdflush(int wait)
1647 {
1648 if(!bdflush_running){
1649 printk("Warning - bdflush not running\n");
1650 sync_buffers(0,0);
1651 return;
1652 };
1653 wake_up(&bdflush_wait);
1654 if(wait) sleep_on(&bdflush_done);
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 asmlinkage int sync_old_buffers(void)
1668 {
1669 int i, isize;
1670 int ndirty, nwritten;
1671 int nlist;
1672 int ncount;
1673 struct buffer_head * bh, *next;
1674
1675 sync_supers(0);
1676 sync_inodes(0);
1677
1678 ncount = 0;
1679 #ifdef DEBUG
1680 for(nlist = 0; nlist < NR_LIST; nlist++)
1681 #else
1682 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1683 #endif
1684 {
1685 ndirty = 0;
1686 nwritten = 0;
1687 repeat:
1688 bh = lru_list[nlist];
1689 if(bh)
1690 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1691
1692 if(bh->b_list != nlist) goto repeat;
1693 next = bh->b_next_free;
1694 if(!lru_list[nlist]) {
1695 printk("Dirty list empty %d\n", i);
1696 break;
1697 }
1698
1699
1700 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1701 {
1702 refile_buffer(bh);
1703 continue;
1704 }
1705
1706 if (bh->b_lock || !bh->b_dirt)
1707 continue;
1708 ndirty++;
1709 if(bh->b_flushtime > jiffies) continue;
1710 nwritten++;
1711 bh->b_count++;
1712 bh->b_flushtime = 0;
1713 #ifdef DEBUG
1714 if(nlist != BUF_DIRTY) ncount++;
1715 #endif
1716 ll_rw_block(WRITE, 1, &bh);
1717 bh->b_count--;
1718 }
1719 }
1720 #ifdef DEBUG
1721 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1722 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1723 #endif
1724
1725
1726
1727
1728 for(isize = 0; isize<NR_SIZES; isize++){
1729 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1730 buffer_usage[isize] = 0;
1731 };
1732 return 0;
1733 }
1734
1735
1736
1737
1738
1739
1740
1741
1742 asmlinkage int sys_bdflush(int func, long data)
1743 {
1744 int i, error;
1745 int ndirty;
1746 int nlist;
1747 int ncount;
1748 struct buffer_head * bh, *next;
1749
1750 if (!suser())
1751 return -EPERM;
1752
1753 if (func == 1)
1754 return sync_old_buffers();
1755
1756
1757 if (func >= 2) {
1758 i = (func-2) >> 1;
1759 if (i < 0 || i >= N_PARAM)
1760 return -EINVAL;
1761 if((func & 1) == 0) {
1762 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1763 if (error)
1764 return error;
1765 put_fs_long(bdf_prm.data[i], data);
1766 return 0;
1767 };
1768 if (data < bdflush_min[i] || data > bdflush_max[i])
1769 return -EINVAL;
1770 bdf_prm.data[i] = data;
1771 return 0;
1772 };
1773
1774 if (bdflush_running)
1775 return -EBUSY;
1776 bdflush_running++;
1777
1778
1779
1780 for (;;) {
1781 #ifdef DEBUG
1782 printk("bdflush() activated...");
1783 #endif
1784
1785 ncount = 0;
1786 #ifdef DEBUG
1787 for(nlist = 0; nlist < NR_LIST; nlist++)
1788 #else
1789 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1790 #endif
1791 {
1792 ndirty = 0;
1793 repeat:
1794 bh = lru_list[nlist];
1795 if(bh)
1796 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1797 bh = next) {
1798
1799 if(bh->b_list != nlist) goto repeat;
1800 next = bh->b_next_free;
1801 if(!lru_list[nlist]) {
1802 printk("Dirty list empty %d\n", i);
1803 break;
1804 }
1805
1806
1807 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1808 {
1809 refile_buffer(bh);
1810 continue;
1811 }
1812
1813 if (bh->b_lock || !bh->b_dirt)
1814 continue;
1815
1816
1817 bh->b_count++;
1818 ndirty++;
1819 bh->b_flushtime = 0;
1820 ll_rw_block(WRITE, 1, &bh);
1821 #ifdef DEBUG
1822 if(nlist != BUF_DIRTY) ncount++;
1823 #endif
1824 bh->b_count--;
1825 }
1826 }
1827 #ifdef DEBUG
1828 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1829 printk("sleeping again.\n");
1830 #endif
1831 wake_up(&bdflush_done);
1832
1833
1834
1835
1836 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1837 bdf_prm.b_un.nfract/100) {
1838 if (current->signal & (1 << (SIGKILL-1))) {
1839 bdflush_running--;
1840 return 0;
1841 }
1842 current->signal = 0;
1843 interruptible_sleep_on(&bdflush_wait);
1844 }
1845 }
1846 }
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864