This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/string.h>
23 #include <linux/locks.h>
24 #include <linux/errno.h>
25 #include <linux/malloc.h>
26
27 #include <asm/system.h>
28 #include <asm/segment.h>
29 #include <asm/io.h>
30
31 #define NR_SIZES 4
32 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
34
35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
36
37 static int grow_buffers(int pri, int size);
38 static int shrink_specific_buffers(unsigned int priority, int size);
39 static int maybe_shrink_lav_buffers(int);
40
41 static int nr_hash = 0;
42 static struct buffer_head ** hash_table;
43 struct buffer_head ** buffer_pages;
44 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
45 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
46 static struct buffer_head * unused_list = NULL;
47 static struct wait_queue * buffer_wait = NULL;
48
49 int nr_buffers = 0;
50 int nr_buffers_type[NR_LIST] = {0,};
51 int nr_buffers_size[NR_SIZES] = {0,};
52 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
53 int buffer_usage[NR_SIZES] = {0,};
54 int buffers_lav[NR_SIZES] = {0,};
55 int nr_free[NR_SIZES] = {0,};
56 int buffermem = 0;
57 int nr_buffer_heads = 0;
58 extern int *blksize_size[];
59
60
61 static void wakeup_bdflush(int);
62
63 #define N_PARAM 9
64 #define LAV
65
66 static union bdflush_param{
67 struct {
68 int nfract;
69
70 int ndirty;
71
72 int nrefill;
73
74 int nref_dirt;
75
76 int clu_nfract;
77
78 int age_buffer;
79
80 int age_super;
81
82 int lav_const;
83
84 int lav_ratio;
85
86
87 } b_un;
88 unsigned int data[N_PARAM];
89 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
90
91
92
93
94
95
96
97 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
98 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
99
100
101
102
103
104
105
106
107
108
109 void __wait_on_buffer(struct buffer_head * bh)
110 {
111 struct wait_queue wait = { current, NULL };
112
113 bh->b_count++;
114 add_wait_queue(&bh->b_wait, &wait);
115 repeat:
116 current->state = TASK_UNINTERRUPTIBLE;
117 if (bh->b_lock) {
118 schedule();
119 goto repeat;
120 }
121 remove_wait_queue(&bh->b_wait, &wait);
122 bh->b_count--;
123 current->state = TASK_RUNNING;
124 }
125
126
127
128
129
130
131
132
133
134
135
136 static int sync_buffers(dev_t dev, int wait)
137 {
138 int i, retry, pass = 0, err = 0;
139 int nlist, ncount;
140 struct buffer_head * bh, *next;
141
142
143
144
145
146 repeat:
147 retry = 0;
148 repeat2:
149 ncount = 0;
150
151
152 for(nlist = 0; nlist < NR_LIST; nlist++)
153 {
154 repeat1:
155 bh = lru_list[nlist];
156 if(!bh) continue;
157 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
158 if(bh->b_list != nlist) goto repeat1;
159 next = bh->b_next_free;
160 if(!lru_list[nlist]) break;
161 if (dev && bh->b_dev != dev)
162 continue;
163 if (bh->b_lock)
164 {
165
166
167 if (!wait || !pass) {
168 retry = 1;
169 continue;
170 }
171 wait_on_buffer (bh);
172 goto repeat2;
173 }
174
175
176 if (wait && bh->b_req && !bh->b_lock &&
177 !bh->b_dirt && !bh->b_uptodate) {
178 err = 1;
179 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
180 continue;
181 }
182
183
184 if (!bh->b_dirt || pass>=2)
185 continue;
186
187 if (bh->b_lock)
188 continue;
189 bh->b_count++;
190 bh->b_flushtime = 0;
191 ll_rw_block(WRITE, 1, &bh);
192
193 if(nlist != BUF_DIRTY) {
194 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
195 ncount++;
196 };
197 bh->b_count--;
198 retry = 1;
199 }
200 }
201 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
202
203
204
205
206
207 if (wait && retry && ++pass<=2)
208 goto repeat;
209 return err;
210 }
211
212 void sync_dev(dev_t dev)
213 {
214 sync_buffers(dev, 0);
215 sync_supers(dev);
216 sync_inodes(dev);
217 sync_buffers(dev, 0);
218 }
219
220 int fsync_dev(dev_t dev)
221 {
222 sync_buffers(dev, 0);
223 sync_supers(dev);
224 sync_inodes(dev);
225 return sync_buffers(dev, 1);
226 }
227
228 asmlinkage int sys_sync(void)
229 {
230 sync_dev(0);
231 return 0;
232 }
233
234 int file_fsync (struct inode *inode, struct file *filp)
235 {
236 return fsync_dev(inode->i_dev);
237 }
238
239 asmlinkage int sys_fsync(unsigned int fd)
240 {
241 struct file * file;
242 struct inode * inode;
243
244 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
245 return -EBADF;
246 if (!file->f_op || !file->f_op->fsync)
247 return -EINVAL;
248 if (file->f_op->fsync(inode,file))
249 return -EIO;
250 return 0;
251 }
252
253 void invalidate_buffers(dev_t dev)
254 {
255 int i;
256 int nlist;
257 struct buffer_head * bh;
258
259 for(nlist = 0; nlist < NR_LIST; nlist++) {
260 bh = lru_list[nlist];
261 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
262 bh = bh->b_next_free) {
263 if (bh->b_dev != dev)
264 continue;
265 wait_on_buffer(bh);
266 if (bh->b_dev == dev)
267 bh->b_flushtime = bh->b_uptodate =
268 bh->b_dirt = bh->b_req = 0;
269 }
270 }
271 }
272
273 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
274 #define hash(dev,block) hash_table[_hashfn(dev,block)]
275
276 static inline void remove_from_hash_queue(struct buffer_head * bh)
277 {
278 if (bh->b_next)
279 bh->b_next->b_prev = bh->b_prev;
280 if (bh->b_prev)
281 bh->b_prev->b_next = bh->b_next;
282 if (hash(bh->b_dev,bh->b_blocknr) == bh)
283 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
284 bh->b_next = bh->b_prev = NULL;
285 }
286
287 static inline void remove_from_lru_list(struct buffer_head * bh)
288 {
289 if (!(bh->b_prev_free) || !(bh->b_next_free))
290 panic("VFS: LRU block list corrupted");
291 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
292 bh->b_prev_free->b_next_free = bh->b_next_free;
293 bh->b_next_free->b_prev_free = bh->b_prev_free;
294
295 if (lru_list[bh->b_list] == bh)
296 lru_list[bh->b_list] = bh->b_next_free;
297 if(lru_list[bh->b_list] == bh)
298 lru_list[bh->b_list] = NULL;
299 bh->b_next_free = bh->b_prev_free = NULL;
300 }
301
302 static inline void remove_from_free_list(struct buffer_head * bh)
303 {
304 int isize = BUFSIZE_INDEX(bh->b_size);
305 if (!(bh->b_prev_free) || !(bh->b_next_free))
306 panic("VFS: Free block list corrupted");
307 if(bh->b_dev != 0xffff) panic("Free list corrupted");
308 if(!free_list[isize])
309 panic("Free list empty");
310 nr_free[isize]--;
311 if(bh->b_next_free == bh)
312 free_list[isize] = NULL;
313 else {
314 bh->b_prev_free->b_next_free = bh->b_next_free;
315 bh->b_next_free->b_prev_free = bh->b_prev_free;
316 if (free_list[isize] == bh)
317 free_list[isize] = bh->b_next_free;
318 };
319 bh->b_next_free = bh->b_prev_free = NULL;
320 }
321
322 static inline void remove_from_queues(struct buffer_head * bh)
323 {
324 if(bh->b_dev == 0xffff) {
325 remove_from_free_list(bh);
326
327 return;
328 };
329 nr_buffers_type[bh->b_list]--;
330 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
331 remove_from_hash_queue(bh);
332 remove_from_lru_list(bh);
333 }
334
335 static inline void put_last_lru(struct buffer_head * bh)
336 {
337 if (!bh)
338 return;
339 if (bh == lru_list[bh->b_list]) {
340 lru_list[bh->b_list] = bh->b_next_free;
341 return;
342 }
343 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
344 remove_from_lru_list(bh);
345
346
347 if(!lru_list[bh->b_list]) {
348 lru_list[bh->b_list] = bh;
349 lru_list[bh->b_list]->b_prev_free = bh;
350 };
351
352 bh->b_next_free = lru_list[bh->b_list];
353 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
354 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
355 lru_list[bh->b_list]->b_prev_free = bh;
356 }
357
358 static inline void put_last_free(struct buffer_head * bh)
359 {
360 int isize;
361 if (!bh)
362 return;
363
364 isize = BUFSIZE_INDEX(bh->b_size);
365 bh->b_dev = 0xffff;
366
367
368 if(!free_list[isize]) {
369 free_list[isize] = bh;
370 bh->b_prev_free = bh;
371 };
372
373 nr_free[isize]++;
374 bh->b_next_free = free_list[isize];
375 bh->b_prev_free = free_list[isize]->b_prev_free;
376 free_list[isize]->b_prev_free->b_next_free = bh;
377 free_list[isize]->b_prev_free = bh;
378 }
379
380 static inline void insert_into_queues(struct buffer_head * bh)
381 {
382
383
384 if(bh->b_dev == 0xffff) {
385 put_last_free(bh);
386 return;
387 };
388 if(!lru_list[bh->b_list]) {
389 lru_list[bh->b_list] = bh;
390 bh->b_prev_free = bh;
391 };
392 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
393 bh->b_next_free = lru_list[bh->b_list];
394 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
395 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
396 lru_list[bh->b_list]->b_prev_free = bh;
397 nr_buffers_type[bh->b_list]++;
398 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
399
400 bh->b_prev = NULL;
401 bh->b_next = NULL;
402 if (!bh->b_dev)
403 return;
404 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
405 hash(bh->b_dev,bh->b_blocknr) = bh;
406 if (bh->b_next)
407 bh->b_next->b_prev = bh;
408 }
409
410 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
411 {
412 struct buffer_head * tmp;
413
414 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
415 if (tmp->b_dev==dev && tmp->b_blocknr==block)
416 if (tmp->b_size == size)
417 return tmp;
418 else {
419 printk("VFS: Wrong blocksize on device %d/%d\n",
420 MAJOR(dev), MINOR(dev));
421 return NULL;
422 }
423 return NULL;
424 }
425
426
427
428
429
430
431
432
433 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
434 {
435 struct buffer_head * bh;
436
437 for (;;) {
438 if (!(bh=find_buffer(dev,block,size)))
439 return NULL;
440 bh->b_count++;
441 wait_on_buffer(bh);
442 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
443 return bh;
444 bh->b_count--;
445 }
446 }
447
448 void set_blocksize(dev_t dev, int size)
449 {
450 int i, nlist;
451 struct buffer_head * bh, *bhnext;
452
453 if (!blksize_size[MAJOR(dev)])
454 return;
455
456 switch(size) {
457 default: panic("Invalid blocksize passed to set_blocksize");
458 case 512: case 1024: case 2048: case 4096:;
459 }
460
461 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
462 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
463 return;
464 }
465 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
466 return;
467 sync_buffers(dev, 2);
468 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
469
470
471
472
473 for(nlist = 0; nlist < NR_LIST; nlist++) {
474 bh = lru_list[nlist];
475 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
476 if(!bh) break;
477 bhnext = bh->b_next_free;
478 if (bh->b_dev != dev)
479 continue;
480 if (bh->b_size == size)
481 continue;
482
483 wait_on_buffer(bh);
484 if (bh->b_dev == dev && bh->b_size != size) {
485 bh->b_uptodate = bh->b_dirt = bh->b_req =
486 bh->b_flushtime = 0;
487 };
488 remove_from_hash_queue(bh);
489 }
490 }
491 }
492
493 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
494
495 void refill_freelist(int size)
496 {
497 struct buffer_head * bh, * tmp;
498 struct buffer_head * candidate[NR_LIST];
499 unsigned int best_time, winner;
500 int isize = BUFSIZE_INDEX(size);
501 int buffers[NR_LIST];
502 int i;
503 int needed;
504
505
506
507
508
509 if (nr_free[isize] > 100)
510 return;
511
512
513
514
515
516
517 needed =bdf_prm.b_un.nrefill * size;
518
519 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
520 grow_buffers(GFP_BUFFER, size)) {
521 needed -= PAGE_SIZE;
522 }
523
524 if(needed <= 0) return;
525
526
527
528
529 while(maybe_shrink_lav_buffers(size))
530 {
531 if(!grow_buffers(GFP_BUFFER, size)) break;
532 needed -= PAGE_SIZE;
533 if(needed <= 0) return;
534 };
535
536
537
538
539
540
541
542 repeat0:
543 for(i=0; i<NR_LIST; i++){
544 if(i == BUF_DIRTY || i == BUF_SHARED ||
545 nr_buffers_type[i] == 0) {
546 candidate[i] = NULL;
547 buffers[i] = 0;
548 continue;
549 }
550 buffers[i] = nr_buffers_type[i];
551 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
552 {
553 if(buffers[i] < 0) panic("Here is the problem");
554 tmp = bh->b_next_free;
555 if (!bh) break;
556
557 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
558 bh->b_dirt) {
559 refile_buffer(bh);
560 continue;
561 };
562
563 if (bh->b_count || bh->b_size != size)
564 continue;
565
566
567
568
569
570 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
571 buffers[i] = 0;
572 break;
573 }
574
575 if (BADNESS(bh)) continue;
576 break;
577 };
578 if(!buffers[i]) candidate[i] = NULL;
579 else candidate[i] = bh;
580 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
581 }
582
583 repeat:
584 if(needed <= 0) return;
585
586
587
588 winner = best_time = UINT_MAX;
589 for(i=0; i<NR_LIST; i++){
590 if(!candidate[i]) continue;
591 if(candidate[i]->b_lru_time < best_time){
592 best_time = candidate[i]->b_lru_time;
593 winner = i;
594 }
595 }
596
597
598 if(winner != UINT_MAX) {
599 i = winner;
600 bh = candidate[i];
601 candidate[i] = bh->b_next_free;
602 if(candidate[i] == bh) candidate[i] = NULL;
603 if (bh->b_count || bh->b_size != size)
604 panic("Busy buffer in candidate list\n");
605 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
606 panic("Shared buffer in candidate list\n");
607 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
608
609 if(bh->b_dev == 0xffff) panic("Wrong list");
610 remove_from_queues(bh);
611 bh->b_dev = 0xffff;
612 put_last_free(bh);
613 needed -= bh->b_size;
614 buffers[i]--;
615 if(buffers[i] < 0) panic("Here is the problem");
616
617 if(buffers[i] == 0) candidate[i] = NULL;
618
619
620
621 if(candidate[i] && buffers[i] > 0){
622 if(buffers[i] <= 0) panic("Here is another problem");
623 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
624 if(buffers[i] < 0) panic("Here is the problem");
625 tmp = bh->b_next_free;
626 if (!bh) break;
627
628 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
629 bh->b_dirt) {
630 refile_buffer(bh);
631 continue;
632 };
633
634 if (bh->b_count || bh->b_size != size)
635 continue;
636
637
638
639
640
641 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
642 buffers[i] = 0;
643 break;
644 }
645
646 if (BADNESS(bh)) continue;
647 break;
648 };
649 if(!buffers[i]) candidate[i] = NULL;
650 else candidate[i] = bh;
651 if(candidate[i] && candidate[i]->b_count)
652 panic("Here is the problem");
653 }
654
655 goto repeat;
656 }
657
658 if(needed <= 0) return;
659
660
661
662 if (nr_free_pages > 5) {
663 if (grow_buffers(GFP_BUFFER, size)) {
664 needed -= PAGE_SIZE;
665 goto repeat0;
666 };
667 }
668
669
670 if (!grow_buffers(GFP_ATOMIC, size))
671 wakeup_bdflush(1);
672 needed -= PAGE_SIZE;
673 goto repeat0;
674 }
675
676
677
678
679
680
681
682
683
684
685
686 struct buffer_head * getblk(dev_t dev, int block, int size)
687 {
688 struct buffer_head * bh;
689 int isize = BUFSIZE_INDEX(size);
690
691
692 buffer_usage[isize]++;
693
694
695
696
697 repeat:
698 bh = get_hash_table(dev, block, size);
699 if (bh) {
700 if (bh->b_uptodate && !bh->b_dirt)
701 put_last_lru(bh);
702 if(!bh->b_dirt) bh->b_flushtime = 0;
703 return bh;
704 }
705
706 while(!free_list[isize]) refill_freelist(size);
707
708 if (find_buffer(dev,block,size))
709 goto repeat;
710
711 bh = free_list[isize];
712 remove_from_free_list(bh);
713
714
715
716 bh->b_count=1;
717 bh->b_dirt=0;
718 bh->b_lock=0;
719 bh->b_uptodate=0;
720 bh->b_flushtime = 0;
721 bh->b_req=0;
722 bh->b_dev=dev;
723 bh->b_blocknr=block;
724 insert_into_queues(bh);
725 return bh;
726 }
727
728 void set_writetime(struct buffer_head * buf, int flag)
729 {
730 int newtime;
731
732 if (buf->b_dirt){
733
734 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
735 bdf_prm.b_un.age_buffer);
736 if(!buf->b_flushtime || buf->b_flushtime > newtime)
737 buf->b_flushtime = newtime;
738 } else {
739 buf->b_flushtime = 0;
740 }
741 }
742
743
744 void refile_buffer(struct buffer_head * buf){
745 int dispose;
746 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
747 if (buf->b_dirt)
748 dispose = BUF_DIRTY;
749 else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
750 dispose = BUF_SHARED;
751 else if (buf->b_lock)
752 dispose = BUF_LOCKED;
753 else if (buf->b_list == BUF_SHARED)
754 dispose = BUF_UNSHARED;
755 else
756 dispose = BUF_CLEAN;
757 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
758 if(dispose != buf->b_list) {
759 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
760 buf->b_lru_time = jiffies;
761 if(dispose == BUF_LOCKED &&
762 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
763 dispose = BUF_LOCKED1;
764 remove_from_queues(buf);
765 buf->b_list = dispose;
766 insert_into_queues(buf);
767 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
768 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
769 bdf_prm.b_un.nfract/100)
770 wakeup_bdflush(0);
771 }
772 }
773
774 void brelse(struct buffer_head * buf)
775 {
776 if (!buf)
777 return;
778 wait_on_buffer(buf);
779
780
781 set_writetime(buf, 0);
782 refile_buffer(buf);
783
784 if (buf->b_count) {
785 if (--buf->b_count)
786 return;
787 wake_up(&buffer_wait);
788 return;
789 }
790 printk("VFS: brelse: Trying to free free buffer\n");
791 }
792
793
794
795
796
797 struct buffer_head * bread(dev_t dev, int block, int size)
798 {
799 struct buffer_head * bh;
800
801 if (!(bh = getblk(dev, block, size))) {
802 printk("VFS: bread: READ error on device %d/%d\n",
803 MAJOR(dev), MINOR(dev));
804 return NULL;
805 }
806 if (bh->b_uptodate)
807 return bh;
808 ll_rw_block(READ, 1, &bh);
809 wait_on_buffer(bh);
810 if (bh->b_uptodate)
811 return bh;
812 brelse(bh);
813 return NULL;
814 }
815
816
817
818
819
820
821
822 #define NBUF 16
823
824 struct buffer_head * breada(dev_t dev, int block, int bufsize,
825 unsigned int pos, unsigned int filesize)
826 {
827 struct buffer_head * bhlist[NBUF];
828 unsigned int blocks;
829 struct buffer_head * bh;
830 int index;
831 int i, j;
832
833 if (pos >= filesize)
834 return NULL;
835
836 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
837 return NULL;
838
839 index = BUFSIZE_INDEX(bh->b_size);
840
841 if (bh->b_uptodate)
842 return bh;
843
844 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
845
846 if (blocks > (read_ahead[MAJOR(dev)] >> index))
847 blocks = read_ahead[MAJOR(dev)] >> index;
848 if (blocks > NBUF)
849 blocks = NBUF;
850
851 bhlist[0] = bh;
852 j = 1;
853 for(i=1; i<blocks; i++) {
854 bh = getblk(dev,block+i,bufsize);
855 if (bh->b_uptodate) {
856 brelse(bh);
857 break;
858 }
859 bhlist[j++] = bh;
860 }
861
862
863 ll_rw_block(READ, j, bhlist);
864
865 for(i=1; i<j; i++)
866 brelse(bhlist[i]);
867
868
869 bh = bhlist[0];
870 wait_on_buffer(bh);
871 if (bh->b_uptodate)
872 return bh;
873 brelse(bh);
874 return NULL;
875 }
876
877
878
879
880 static void put_unused_buffer_head(struct buffer_head * bh)
881 {
882 struct wait_queue * wait;
883
884 wait = ((volatile struct buffer_head *) bh)->b_wait;
885 memset(bh,0,sizeof(*bh));
886 ((volatile struct buffer_head *) bh)->b_wait = wait;
887 bh->b_next_free = unused_list;
888 unused_list = bh;
889 }
890
891 static void get_more_buffer_heads(void)
892 {
893 int i;
894 struct buffer_head * bh;
895
896 if (unused_list)
897 return;
898
899 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
900 return;
901
902 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
903 bh->b_next_free = unused_list;
904 unused_list = bh++;
905 }
906 }
907
908 static struct buffer_head * get_unused_buffer_head(void)
909 {
910 struct buffer_head * bh;
911
912 get_more_buffer_heads();
913 if (!unused_list)
914 return NULL;
915 bh = unused_list;
916 unused_list = bh->b_next_free;
917 bh->b_next_free = NULL;
918 bh->b_data = NULL;
919 bh->b_size = 0;
920 bh->b_req = 0;
921 return bh;
922 }
923
924
925
926
927
928
929
930 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
931 {
932 struct buffer_head *bh, *head;
933 unsigned long offset;
934
935 head = NULL;
936 offset = PAGE_SIZE;
937 while ((offset -= size) < PAGE_SIZE) {
938 bh = get_unused_buffer_head();
939 if (!bh)
940 goto no_grow;
941 bh->b_this_page = head;
942 head = bh;
943 bh->b_data = (char *) (page+offset);
944 bh->b_size = size;
945 bh->b_dev = 0xffff;
946 }
947 return head;
948
949
950
951 no_grow:
952 bh = head;
953 while (bh) {
954 head = bh;
955 bh = bh->b_this_page;
956 put_unused_buffer_head(head);
957 }
958 return NULL;
959 }
960
961 static void read_buffers(struct buffer_head * bh[], int nrbuf)
962 {
963 int i;
964 int bhnum = 0;
965 struct buffer_head * bhr[8];
966
967 for (i = 0 ; i < nrbuf ; i++) {
968 if (bh[i] && !bh[i]->b_uptodate)
969 bhr[bhnum++] = bh[i];
970 }
971 if (bhnum)
972 ll_rw_block(READ, bhnum, bhr);
973 for (i = 0 ; i < nrbuf ; i++) {
974 if (bh[i]) {
975 wait_on_buffer(bh[i]);
976 }
977 }
978 }
979
980
981
982
983
984
985
986
987
988 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
989 unsigned long address)
990 {
991 while (nrbuf-- > 0)
992 brelse(bh[nrbuf]);
993 return 0;
994 }
995
996 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
997 dev_t dev, int *b, int size)
998 {
999 struct buffer_head * bh[8];
1000 unsigned long page;
1001 unsigned long offset;
1002 int block;
1003 int nrbuf;
1004 int aligned = 1;
1005
1006 bh[0] = first;
1007 nrbuf = 1;
1008 page = (unsigned long) first->b_data;
1009 if (page & ~PAGE_MASK)
1010 aligned = 0;
1011 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1012 block = *++b;
1013 if (!block)
1014 goto no_go;
1015 first = get_hash_table(dev, block, size);
1016 if (!first)
1017 goto no_go;
1018 bh[nrbuf++] = first;
1019 if (page+offset != (unsigned long) first->b_data)
1020 aligned = 0;
1021 }
1022 if (!aligned)
1023 return try_to_align(bh, nrbuf, address);
1024 mem_map[MAP_NR(page)]++;
1025 read_buffers(bh,nrbuf);
1026 while (nrbuf-- > 0)
1027 brelse(bh[nrbuf]);
1028 free_page(address);
1029 ++current->mm->min_flt;
1030 return page;
1031 no_go:
1032 while (nrbuf-- > 0)
1033 brelse(bh[nrbuf]);
1034 return 0;
1035 }
1036
1037 static unsigned long try_to_load_aligned(unsigned long address,
1038 dev_t dev, int b[], int size)
1039 {
1040 struct buffer_head * bh, * tmp, * arr[8];
1041 unsigned long offset;
1042 int isize = BUFSIZE_INDEX(size);
1043 int * p;
1044 int block;
1045
1046 bh = create_buffers(address, size);
1047 if (!bh)
1048 return 0;
1049
1050 p = b;
1051 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1052 block = *(p++);
1053 if (!block)
1054 goto not_aligned;
1055 if (find_buffer(dev, block, size))
1056 goto not_aligned;
1057 }
1058 tmp = bh;
1059 p = b;
1060 block = 0;
1061 while (1) {
1062 arr[block++] = bh;
1063 bh->b_count = 1;
1064 bh->b_dirt = 0;
1065 bh->b_flushtime = 0;
1066 bh->b_uptodate = 0;
1067 bh->b_req = 0;
1068 bh->b_dev = dev;
1069 bh->b_blocknr = *(p++);
1070 bh->b_list = BUF_CLEAN;
1071 nr_buffers++;
1072 nr_buffers_size[isize]++;
1073 insert_into_queues(bh);
1074 if (bh->b_this_page)
1075 bh = bh->b_this_page;
1076 else
1077 break;
1078 }
1079 buffermem += PAGE_SIZE;
1080 bh->b_this_page = tmp;
1081 mem_map[MAP_NR(address)]++;
1082 buffer_pages[MAP_NR(address)] = bh;
1083 read_buffers(arr,block);
1084 while (block-- > 0)
1085 brelse(arr[block]);
1086 ++current->mm->maj_flt;
1087 return address;
1088 not_aligned:
1089 while ((tmp = bh) != NULL) {
1090 bh = bh->b_this_page;
1091 put_unused_buffer_head(tmp);
1092 }
1093 return 0;
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 static inline unsigned long try_to_share_buffers(unsigned long address,
1108 dev_t dev, int *b, int size)
1109 {
1110 struct buffer_head * bh;
1111 int block;
1112
1113 block = b[0];
1114 if (!block)
1115 return 0;
1116 bh = get_hash_table(dev, block, size);
1117 if (bh)
1118 return check_aligned(bh, address, dev, b, size);
1119 return try_to_load_aligned(address, dev, b, size);
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1130 {
1131 struct buffer_head * bh[8];
1132 unsigned long where;
1133 int i, j;
1134
1135 if (!no_share) {
1136 where = try_to_share_buffers(address, dev, b, size);
1137 if (where)
1138 return where;
1139 }
1140 ++current->mm->maj_flt;
1141 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1142 bh[i] = NULL;
1143 if (b[i])
1144 bh[i] = getblk(dev, b[i], size);
1145 }
1146 read_buffers(bh,i);
1147 where = address;
1148 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1149 if (bh[i]) {
1150 if (bh[i]->b_uptodate)
1151 memcpy((void *) where, bh[i]->b_data, size);
1152 brelse(bh[i]);
1153 }
1154 }
1155 return address;
1156 }
1157
1158
1159
1160
1161
1162 static int grow_buffers(int pri, int size)
1163 {
1164 unsigned long page;
1165 struct buffer_head *bh, *tmp;
1166 struct buffer_head * insert_point;
1167 int isize;
1168
1169 if ((size & 511) || (size > PAGE_SIZE)) {
1170 printk("VFS: grow_buffers: size = %d\n",size);
1171 return 0;
1172 }
1173
1174 isize = BUFSIZE_INDEX(size);
1175
1176 if (!(page = __get_free_page(pri)))
1177 return 0;
1178 bh = create_buffers(page, size);
1179 if (!bh) {
1180 free_page(page);
1181 return 0;
1182 }
1183
1184 insert_point = free_list[isize];
1185
1186 tmp = bh;
1187 while (1) {
1188 nr_free[isize]++;
1189 if (insert_point) {
1190 tmp->b_next_free = insert_point->b_next_free;
1191 tmp->b_prev_free = insert_point;
1192 insert_point->b_next_free->b_prev_free = tmp;
1193 insert_point->b_next_free = tmp;
1194 } else {
1195 tmp->b_prev_free = tmp;
1196 tmp->b_next_free = tmp;
1197 }
1198 insert_point = tmp;
1199 ++nr_buffers;
1200 if (tmp->b_this_page)
1201 tmp = tmp->b_this_page;
1202 else
1203 break;
1204 }
1205 free_list[isize] = bh;
1206 buffer_pages[MAP_NR(page)] = bh;
1207 tmp->b_this_page = bh;
1208 wake_up(&buffer_wait);
1209 buffermem += PAGE_SIZE;
1210 return 1;
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1221 {
1222 unsigned long page;
1223 struct buffer_head * tmp, * p;
1224 int isize = BUFSIZE_INDEX(bh->b_size);
1225
1226 *bhp = bh;
1227 page = (unsigned long) bh->b_data;
1228 page &= PAGE_MASK;
1229 tmp = bh;
1230 do {
1231 if (!tmp)
1232 return 0;
1233 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1234 return 0;
1235 tmp = tmp->b_this_page;
1236 } while (tmp != bh);
1237 tmp = bh;
1238 do {
1239 p = tmp;
1240 tmp = tmp->b_this_page;
1241 nr_buffers--;
1242 nr_buffers_size[isize]--;
1243 if (p == *bhp)
1244 {
1245 *bhp = p->b_prev_free;
1246 if (p == *bhp)
1247 *bhp = NULL;
1248 }
1249 remove_from_queues(p);
1250 put_unused_buffer_head(p);
1251 } while (tmp != bh);
1252 buffermem -= PAGE_SIZE;
1253 buffer_pages[MAP_NR(page)] = NULL;
1254 free_page(page);
1255 return !mem_map[MAP_NR(page)];
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 static int maybe_shrink_lav_buffers(int size)
1272 {
1273 int nlist;
1274 int isize;
1275 int total_lav, total_n_buffers, n_sizes;
1276
1277
1278
1279
1280
1281
1282 total_lav = total_n_buffers = n_sizes = 0;
1283 for(nlist = 0; nlist < NR_SIZES; nlist++)
1284 {
1285 total_lav += buffers_lav[nlist];
1286 if(nr_buffers_size[nlist]) n_sizes++;
1287 total_n_buffers += nr_buffers_size[nlist];
1288 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1289 }
1290
1291
1292
1293
1294 isize = (size ? BUFSIZE_INDEX(size) : -1);
1295
1296 if (n_sizes > 1)
1297 for(nlist = 0; nlist < NR_SIZES; nlist++)
1298 {
1299 if(nlist == isize) continue;
1300 if(nr_buffers_size[nlist] &&
1301 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1302 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1303 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1304 return 1;
1305 }
1306 return 0;
1307 }
1308
1309
1310
1311
1312
1313
1314
1315 int shrink_buffers(unsigned int priority)
1316 {
1317 if (priority < 2) {
1318 sync_buffers(0,0);
1319 }
1320
1321 if(priority == 2) wakeup_bdflush(1);
1322
1323 if(maybe_shrink_lav_buffers(0)) return 1;
1324
1325
1326 return shrink_specific_buffers(priority, 0);
1327 }
1328
1329 static int shrink_specific_buffers(unsigned int priority, int size)
1330 {
1331 struct buffer_head *bh;
1332 int nlist;
1333 int i, isize, isize1;
1334
1335 #ifdef DEBUG
1336 if(size) printk("Shrinking buffers of size %d\n", size);
1337 #endif
1338
1339
1340 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1341
1342 for(isize = 0; isize<NR_SIZES; isize++){
1343 if(isize1 != -1 && isize1 != isize) continue;
1344 bh = free_list[isize];
1345 if(!bh) continue;
1346 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1347 if (bh->b_count || !bh->b_this_page)
1348 continue;
1349 if (try_to_free(bh, &bh))
1350 return 1;
1351 if(!bh) break;
1352
1353 }
1354 }
1355
1356
1357
1358 for(nlist = 0; nlist < NR_LIST; nlist++) {
1359 repeat1:
1360 if(priority > 3 && nlist == BUF_SHARED) continue;
1361 bh = lru_list[nlist];
1362 if(!bh) continue;
1363 i = nr_buffers_type[nlist] >> priority;
1364 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1365
1366 if(bh->b_list != nlist) goto repeat1;
1367 if (bh->b_count || !bh->b_this_page)
1368 continue;
1369 if(size && bh->b_size != size) continue;
1370 if (bh->b_lock)
1371 if (priority)
1372 continue;
1373 else
1374 wait_on_buffer(bh);
1375 if (bh->b_dirt) {
1376 bh->b_count++;
1377 bh->b_flushtime = 0;
1378 ll_rw_block(WRITEA, 1, &bh);
1379 bh->b_count--;
1380 continue;
1381 }
1382 if (try_to_free(bh, &bh))
1383 return 1;
1384 if(!bh) break;
1385 }
1386 }
1387 return 0;
1388 }
1389
1390
1391
1392
1393 void show_buffers(void)
1394 {
1395 struct buffer_head * bh;
1396 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1397 int shared;
1398 int nlist, isize;
1399
1400 printk("Buffer memory: %6dkB\n",buffermem>>10);
1401 printk("Buffer heads: %6d\n",nr_buffer_heads);
1402 printk("Buffer blocks: %6d\n",nr_buffers);
1403
1404 for(nlist = 0; nlist < NR_LIST; nlist++) {
1405 shared = found = locked = dirty = used = lastused = 0;
1406 bh = lru_list[nlist];
1407 if(!bh) continue;
1408 do {
1409 found++;
1410 if (bh->b_lock)
1411 locked++;
1412 if (bh->b_dirt)
1413 dirty++;
1414 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1415 if (bh->b_count)
1416 used++, lastused = found;
1417 bh = bh->b_next_free;
1418 } while (bh != lru_list[nlist]);
1419 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1420 nlist, found, used, lastused, locked, dirty, shared);
1421 };
1422 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1423 for(isize = 0; isize<NR_SIZES; isize++){
1424 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1425 buffers_lav[isize], nr_free[isize]);
1426 for(nlist = 0; nlist < NR_LIST; nlist++)
1427 printk("%7d ", nr_buffers_st[isize][nlist]);
1428 printk("\n");
1429 }
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1440 dev_t dev, unsigned int starting_block)
1441 {
1442 unsigned long page;
1443 struct buffer_head * tmp, * p;
1444
1445 *bhp = bh;
1446 page = (unsigned long) bh->b_data;
1447 page &= PAGE_MASK;
1448 if(mem_map[MAP_NR(page)] != 1) return 0;
1449 tmp = bh;
1450 do {
1451 if (!tmp)
1452 return 0;
1453
1454 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1455 return 0;
1456 tmp = tmp->b_this_page;
1457 } while (tmp != bh);
1458 tmp = bh;
1459
1460 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1461 tmp = tmp->b_this_page;
1462
1463
1464 bh = tmp;
1465 do {
1466 p = tmp;
1467 tmp = tmp->b_this_page;
1468 remove_from_queues(p);
1469 p->b_dev=dev;
1470 p->b_uptodate = 0;
1471 p->b_req = 0;
1472 p->b_blocknr=starting_block++;
1473 insert_into_queues(p);
1474 } while (tmp != bh);
1475 return 1;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 static int reassign_cluster(dev_t dev,
1493 unsigned int starting_block, int size)
1494 {
1495 struct buffer_head *bh;
1496 int isize = BUFSIZE_INDEX(size);
1497 int i;
1498
1499
1500
1501
1502
1503 while(nr_free[isize] < 32) refill_freelist(size);
1504
1505 bh = free_list[isize];
1506 if(bh)
1507 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1508 if (!bh->b_this_page) continue;
1509 if (try_to_reassign(bh, &bh, dev, starting_block))
1510 return 4;
1511 }
1512 return 0;
1513 }
1514
1515
1516
1517
1518
1519 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1520 {
1521 struct buffer_head * bh, * tmp, * arr[8];
1522 int isize = BUFSIZE_INDEX(size);
1523 unsigned long offset;
1524 unsigned long page;
1525 int nblock;
1526
1527 page = get_free_page(GFP_NOBUFFER);
1528 if(!page) return 0;
1529
1530 bh = create_buffers(page, size);
1531 if (!bh) {
1532 free_page(page);
1533 return 0;
1534 };
1535 nblock = block;
1536 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1537 if (find_buffer(dev, nblock++, size))
1538 goto not_aligned;
1539 }
1540 tmp = bh;
1541 nblock = 0;
1542 while (1) {
1543 arr[nblock++] = bh;
1544 bh->b_count = 1;
1545 bh->b_dirt = 0;
1546 bh->b_flushtime = 0;
1547 bh->b_lock = 0;
1548 bh->b_uptodate = 0;
1549 bh->b_req = 0;
1550 bh->b_dev = dev;
1551 bh->b_list = BUF_CLEAN;
1552 bh->b_blocknr = block++;
1553 nr_buffers++;
1554 nr_buffers_size[isize]++;
1555 insert_into_queues(bh);
1556 if (bh->b_this_page)
1557 bh = bh->b_this_page;
1558 else
1559 break;
1560 }
1561 buffermem += PAGE_SIZE;
1562 buffer_pages[MAP_NR(page)] = bh;
1563 bh->b_this_page = tmp;
1564 while (nblock-- > 0)
1565 brelse(arr[nblock]);
1566 return 4;
1567 not_aligned:
1568 while ((tmp = bh) != NULL) {
1569 bh = bh->b_this_page;
1570 put_unused_buffer_head(tmp);
1571 }
1572 free_page(page);
1573 return 0;
1574 }
1575
1576 unsigned long generate_cluster(dev_t dev, int b[], int size)
1577 {
1578 int i, offset;
1579
1580 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1581 if(i && b[i]-1 != b[i-1]) return 0;
1582 if(find_buffer(dev, b[i], size)) return 0;
1583 };
1584
1585
1586
1587
1588
1589 if(maybe_shrink_lav_buffers(size))
1590 {
1591 int retval;
1592 retval = try_to_generate_cluster(dev, b[0], size);
1593 if(retval) return retval;
1594 };
1595
1596 if (nr_free_pages > min_free_pages*2)
1597 return try_to_generate_cluster(dev, b[0], size);
1598 else
1599 return reassign_cluster(dev, b[0], size);
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 void buffer_init(void)
1613 {
1614 int i;
1615 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1616
1617 if (high_memory >= 4*1024*1024) {
1618 if(high_memory >= 16*1024*1024)
1619 nr_hash = 16381;
1620 else
1621 nr_hash = 4093;
1622 } else {
1623 nr_hash = 997;
1624 };
1625
1626 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1627 sizeof(struct buffer_head *));
1628
1629
1630 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1631 sizeof(struct buffer_head *));
1632 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1633 buffer_pages[i] = NULL;
1634
1635 for (i = 0 ; i < nr_hash ; i++)
1636 hash_table[i] = NULL;
1637 lru_list[BUF_CLEAN] = 0;
1638 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1639 if (!free_list[isize])
1640 panic("VFS: Unable to initialize buffer free list!");
1641 return;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 struct wait_queue * bdflush_wait = NULL;
1655 struct wait_queue * bdflush_done = NULL;
1656
1657 static int bdflush_running = 0;
1658
1659 static void wakeup_bdflush(int wait)
1660 {
1661 if(!bdflush_running){
1662 printk("Warning - bdflush not running\n");
1663 sync_buffers(0,0);
1664 return;
1665 };
1666 wake_up(&bdflush_wait);
1667 if(wait) sleep_on(&bdflush_done);
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 asmlinkage int sync_old_buffers(void)
1681 {
1682 int i, isize;
1683 int ndirty, nwritten;
1684 int nlist;
1685 int ncount;
1686 struct buffer_head * bh, *next;
1687
1688 sync_supers(0);
1689 sync_inodes(0);
1690
1691 ncount = 0;
1692 #ifdef DEBUG
1693 for(nlist = 0; nlist < NR_LIST; nlist++)
1694 #else
1695 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1696 #endif
1697 {
1698 ndirty = 0;
1699 nwritten = 0;
1700 repeat:
1701 bh = lru_list[nlist];
1702 if(bh)
1703 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1704
1705 if(bh->b_list != nlist) goto repeat;
1706 next = bh->b_next_free;
1707 if(!lru_list[nlist]) {
1708 printk("Dirty list empty %d\n", i);
1709 break;
1710 }
1711
1712
1713 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1714 {
1715 refile_buffer(bh);
1716 continue;
1717 }
1718
1719 if (bh->b_lock || !bh->b_dirt)
1720 continue;
1721 ndirty++;
1722 if(bh->b_flushtime > jiffies) continue;
1723 nwritten++;
1724 bh->b_count++;
1725 bh->b_flushtime = 0;
1726 #ifdef DEBUG
1727 if(nlist != BUF_DIRTY) ncount++;
1728 #endif
1729 ll_rw_block(WRITE, 1, &bh);
1730 bh->b_count--;
1731 }
1732 }
1733 #ifdef DEBUG
1734 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1735 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1736 #endif
1737
1738
1739
1740
1741 for(isize = 0; isize<NR_SIZES; isize++){
1742 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1743 buffer_usage[isize] = 0;
1744 };
1745 return 0;
1746 }
1747
1748
1749
1750
1751
1752
1753
1754
1755 asmlinkage int sys_bdflush(int func, long data)
1756 {
1757 int i, error;
1758 int ndirty;
1759 int nlist;
1760 int ncount;
1761 struct buffer_head * bh, *next;
1762
1763 if (!suser())
1764 return -EPERM;
1765
1766 if (func == 1)
1767 return sync_old_buffers();
1768
1769
1770 if (func >= 2) {
1771 i = (func-2) >> 1;
1772 if (i < 0 || i >= N_PARAM)
1773 return -EINVAL;
1774 if((func & 1) == 0) {
1775 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1776 if (error)
1777 return error;
1778 put_fs_long(bdf_prm.data[i], data);
1779 return 0;
1780 };
1781 if (data < bdflush_min[i] || data > bdflush_max[i])
1782 return -EINVAL;
1783 bdf_prm.data[i] = data;
1784 return 0;
1785 };
1786
1787 if (bdflush_running)
1788 return -EBUSY;
1789 bdflush_running++;
1790
1791
1792
1793 for (;;) {
1794 #ifdef DEBUG
1795 printk("bdflush() activated...");
1796 #endif
1797
1798 ncount = 0;
1799 #ifdef DEBUG
1800 for(nlist = 0; nlist < NR_LIST; nlist++)
1801 #else
1802 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1803 #endif
1804 {
1805 ndirty = 0;
1806 repeat:
1807 bh = lru_list[nlist];
1808 if(bh)
1809 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1810 bh = next) {
1811
1812 if(bh->b_list != nlist) goto repeat;
1813 next = bh->b_next_free;
1814 if(!lru_list[nlist]) {
1815 printk("Dirty list empty %d\n", i);
1816 break;
1817 }
1818
1819
1820 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1821 {
1822 refile_buffer(bh);
1823 continue;
1824 }
1825
1826 if (bh->b_lock || !bh->b_dirt)
1827 continue;
1828
1829
1830 bh->b_count++;
1831 ndirty++;
1832 bh->b_flushtime = 0;
1833 ll_rw_block(WRITE, 1, &bh);
1834 #ifdef DEBUG
1835 if(nlist != BUF_DIRTY) ncount++;
1836 #endif
1837 bh->b_count--;
1838 }
1839 }
1840 #ifdef DEBUG
1841 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1842 printk("sleeping again.\n");
1843 #endif
1844 wake_up(&bdflush_done);
1845
1846
1847
1848
1849 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1850 bdf_prm.b_un.nfract/100) {
1851 if (current->signal & (1 << (SIGKILL-1))) {
1852 bdflush_running--;
1853 return 0;
1854 }
1855 current->signal = 0;
1856 interruptible_sleep_on(&bdflush_wait);
1857 }
1858 }
1859 }
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877