This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/major.h>
23 #include <linux/string.h>
24 #include <linux/locks.h>
25 #include <linux/errno.h>
26 #include <linux/malloc.h>
27
28 #include <asm/system.h>
29 #include <asm/segment.h>
30 #include <asm/io.h>
31
32 #define NR_SIZES 4
33 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
35
36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 static int min_free_pages = 20;
60 extern int *blksize_size[];
61
62
63 static void wakeup_bdflush(int);
64
65 #define N_PARAM 9
66 #define LAV
67
68 static union bdflush_param{
69 struct {
70 int nfract;
71
72 int ndirty;
73
74 int nrefill;
75
76 int nref_dirt;
77
78 int clu_nfract;
79
80 int age_buffer;
81
82 int age_super;
83
84 int lav_const;
85
86 int lav_ratio;
87
88
89 } b_un;
90 unsigned int data[N_PARAM];
91 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
92
93
94
95
96
97
98
99 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
100 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
101
102
103
104
105
106
107
108
109
110
111 void __wait_on_buffer(struct buffer_head * bh)
112 {
113 struct wait_queue wait = { current, NULL };
114
115 bh->b_count++;
116 add_wait_queue(&bh->b_wait, &wait);
117 repeat:
118 current->state = TASK_UNINTERRUPTIBLE;
119 if (bh->b_lock) {
120 schedule();
121 goto repeat;
122 }
123 remove_wait_queue(&bh->b_wait, &wait);
124 bh->b_count--;
125 current->state = TASK_RUNNING;
126 }
127
128
129
130
131
132
133
134
135
136
137
138 static int sync_buffers(dev_t dev, int wait)
139 {
140 int i, retry, pass = 0, err = 0;
141 int nlist, ncount;
142 struct buffer_head * bh, *next;
143
144
145
146
147
148 repeat:
149 retry = 0;
150 ncount = 0;
151
152
153 for(nlist = 0; nlist < NR_LIST; nlist++)
154 {
155 repeat1:
156 bh = lru_list[nlist];
157 if(!bh) continue;
158 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
159 if(bh->b_list != nlist) goto repeat1;
160 next = bh->b_next_free;
161 if(!lru_list[nlist]) break;
162 if (dev && bh->b_dev != dev)
163 continue;
164 if (bh->b_lock)
165 {
166
167
168 if (!wait || !pass) {
169 retry = 1;
170 continue;
171 }
172 wait_on_buffer (bh);
173 }
174
175
176 if (wait && bh->b_req && !bh->b_lock &&
177 !bh->b_dirt && !bh->b_uptodate) {
178 err = 1;
179 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
180 continue;
181 }
182
183
184 if (!bh->b_dirt || pass>=2)
185 continue;
186 bh->b_count++;
187 bh->b_flushtime = 0;
188 ll_rw_block(WRITE, 1, &bh);
189
190 if(nlist != BUF_DIRTY) {
191 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
192 ncount++;
193 };
194 bh->b_count--;
195 retry = 1;
196 }
197 }
198 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
199
200
201
202
203
204 if (wait && retry && ++pass<=2)
205 goto repeat;
206 return err;
207 }
208
209 void sync_dev(dev_t dev)
210 {
211 sync_buffers(dev, 0);
212 sync_supers(dev);
213 sync_inodes(dev);
214 sync_buffers(dev, 0);
215 }
216
217 int fsync_dev(dev_t dev)
218 {
219 sync_buffers(dev, 0);
220 sync_supers(dev);
221 sync_inodes(dev);
222 return sync_buffers(dev, 1);
223 }
224
225 asmlinkage int sys_sync(void)
226 {
227 sync_dev(0);
228 return 0;
229 }
230
231 int file_fsync (struct inode *inode, struct file *filp)
232 {
233 return fsync_dev(inode->i_dev);
234 }
235
236 asmlinkage int sys_fsync(unsigned int fd)
237 {
238 struct file * file;
239 struct inode * inode;
240
241 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
242 return -EBADF;
243 if (!file->f_op || !file->f_op->fsync)
244 return -EINVAL;
245 if (file->f_op->fsync(inode,file))
246 return -EIO;
247 return 0;
248 }
249
250 void invalidate_buffers(dev_t dev)
251 {
252 int i;
253 int nlist;
254 struct buffer_head * bh;
255
256 for(nlist = 0; nlist < NR_LIST; nlist++) {
257 bh = lru_list[nlist];
258 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
259 bh = bh->b_next_free) {
260 if (bh->b_dev != dev)
261 continue;
262 wait_on_buffer(bh);
263 if (bh->b_dev == dev)
264 bh->b_flushtime = bh->b_uptodate =
265 bh->b_dirt = bh->b_req = 0;
266 }
267 }
268 }
269
270 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
271 #define hash(dev,block) hash_table[_hashfn(dev,block)]
272
273 static inline void remove_from_hash_queue(struct buffer_head * bh)
274 {
275 if (bh->b_next)
276 bh->b_next->b_prev = bh->b_prev;
277 if (bh->b_prev)
278 bh->b_prev->b_next = bh->b_next;
279 if (hash(bh->b_dev,bh->b_blocknr) == bh)
280 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
281 bh->b_next = bh->b_prev = NULL;
282 }
283
284 static inline void remove_from_lru_list(struct buffer_head * bh)
285 {
286 if (!(bh->b_prev_free) || !(bh->b_next_free))
287 panic("VFS: LRU block list corrupted");
288 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
289 bh->b_prev_free->b_next_free = bh->b_next_free;
290 bh->b_next_free->b_prev_free = bh->b_prev_free;
291
292 if (lru_list[bh->b_list] == bh)
293 lru_list[bh->b_list] = bh->b_next_free;
294 if(lru_list[bh->b_list] == bh)
295 lru_list[bh->b_list] = NULL;
296 bh->b_next_free = bh->b_prev_free = NULL;
297 }
298
299 static inline void remove_from_free_list(struct buffer_head * bh)
300 {
301 int isize = BUFSIZE_INDEX(bh->b_size);
302 if (!(bh->b_prev_free) || !(bh->b_next_free))
303 panic("VFS: Free block list corrupted");
304 if(bh->b_dev != 0xffff) panic("Free list corrupted");
305 if(!free_list[isize])
306 panic("Free list empty");
307 nr_free[isize]--;
308 if(bh->b_next_free == bh)
309 free_list[isize] = NULL;
310 else {
311 bh->b_prev_free->b_next_free = bh->b_next_free;
312 bh->b_next_free->b_prev_free = bh->b_prev_free;
313 if (free_list[isize] == bh)
314 free_list[isize] = bh->b_next_free;
315 };
316 bh->b_next_free = bh->b_prev_free = NULL;
317 }
318
319 static inline void remove_from_queues(struct buffer_head * bh)
320 {
321 if(bh->b_dev == 0xffff) {
322 remove_from_free_list(bh);
323
324 return;
325 };
326 nr_buffers_type[bh->b_list]--;
327 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
328 remove_from_hash_queue(bh);
329 remove_from_lru_list(bh);
330 }
331
332 static inline void put_last_lru(struct buffer_head * bh)
333 {
334 if (!bh)
335 return;
336 if (bh == lru_list[bh->b_list]) {
337 lru_list[bh->b_list] = bh->b_next_free;
338 return;
339 }
340 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
341 remove_from_lru_list(bh);
342
343
344 if(!lru_list[bh->b_list]) {
345 lru_list[bh->b_list] = bh;
346 lru_list[bh->b_list]->b_prev_free = bh;
347 };
348
349 bh->b_next_free = lru_list[bh->b_list];
350 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
351 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
352 lru_list[bh->b_list]->b_prev_free = bh;
353 }
354
355 static inline void put_last_free(struct buffer_head * bh)
356 {
357 int isize;
358 if (!bh)
359 return;
360
361 isize = BUFSIZE_INDEX(bh->b_size);
362 bh->b_dev = 0xffff;
363
364
365 if(!free_list[isize]) {
366 free_list[isize] = bh;
367 bh->b_prev_free = bh;
368 };
369
370 nr_free[isize]++;
371 bh->b_next_free = free_list[isize];
372 bh->b_prev_free = free_list[isize]->b_prev_free;
373 free_list[isize]->b_prev_free->b_next_free = bh;
374 free_list[isize]->b_prev_free = bh;
375 }
376
377 static inline void insert_into_queues(struct buffer_head * bh)
378 {
379
380
381 if(bh->b_dev == 0xffff) {
382 put_last_free(bh);
383 return;
384 };
385 if(!lru_list[bh->b_list]) {
386 lru_list[bh->b_list] = bh;
387 bh->b_prev_free = bh;
388 };
389 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
390 bh->b_next_free = lru_list[bh->b_list];
391 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
392 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
393 lru_list[bh->b_list]->b_prev_free = bh;
394 nr_buffers_type[bh->b_list]++;
395 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
396
397 bh->b_prev = NULL;
398 bh->b_next = NULL;
399 if (!bh->b_dev)
400 return;
401 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
402 hash(bh->b_dev,bh->b_blocknr) = bh;
403 if (bh->b_next)
404 bh->b_next->b_prev = bh;
405 }
406
407 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
408 {
409 struct buffer_head * tmp;
410
411 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
412 if (tmp->b_dev==dev && tmp->b_blocknr==block)
413 if (tmp->b_size == size)
414 return tmp;
415 else {
416 printk("VFS: Wrong blocksize on device %d/%d\n",
417 MAJOR(dev), MINOR(dev));
418 return NULL;
419 }
420 return NULL;
421 }
422
423
424
425
426
427
428
429
430 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
431 {
432 struct buffer_head * bh;
433
434 for (;;) {
435 if (!(bh=find_buffer(dev,block,size)))
436 return NULL;
437 bh->b_count++;
438 wait_on_buffer(bh);
439 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
440 return bh;
441 bh->b_count--;
442 }
443 }
444
445 void set_blocksize(dev_t dev, int size)
446 {
447 int i, nlist;
448 struct buffer_head * bh, *bhnext;
449
450 if (!blksize_size[MAJOR(dev)])
451 return;
452
453 switch(size) {
454 default: panic("Invalid blocksize passed to set_blocksize");
455 case 512: case 1024: case 2048: case 4096:;
456 }
457
458 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
459 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
460 return;
461 }
462 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
463 return;
464 sync_buffers(dev, 2);
465 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
466
467
468
469
470 for(nlist = 0; nlist < NR_LIST; nlist++) {
471 bh = lru_list[nlist];
472 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
473 if(!bh) break;
474 bhnext = bh->b_next_free;
475 if (bh->b_dev != dev)
476 continue;
477 if (bh->b_size == size)
478 continue;
479
480 wait_on_buffer(bh);
481 if (bh->b_dev == dev && bh->b_size != size) {
482 bh->b_uptodate = bh->b_dirt = bh->b_req =
483 bh->b_flushtime = 0;
484 };
485 remove_from_hash_queue(bh);
486 }
487 }
488 }
489
490 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
491
492 void refill_freelist(int size)
493 {
494 struct buffer_head * bh, * tmp;
495 struct buffer_head * candidate[NR_LIST];
496 unsigned int best_time, winner;
497 int isize = BUFSIZE_INDEX(size);
498 int buffers[NR_LIST];
499 int i;
500 int needed;
501
502
503
504
505
506 if (nr_free[isize] > 100)
507 return;
508
509
510
511
512
513
514 needed =bdf_prm.b_un.nrefill * size;
515
516 while (nr_free_pages > min_free_pages && needed > 0 &&
517 grow_buffers(GFP_BUFFER, size)) {
518 needed -= PAGE_SIZE;
519 }
520
521 if(needed <= 0) return;
522
523
524
525
526 while(maybe_shrink_lav_buffers(size))
527 {
528 if(!grow_buffers(GFP_BUFFER, size)) break;
529 needed -= PAGE_SIZE;
530 if(needed <= 0) return;
531 };
532
533
534
535
536
537
538
539 repeat0:
540 for(i=0; i<NR_LIST; i++){
541 if(i == BUF_DIRTY || i == BUF_SHARED ||
542 nr_buffers_type[i] == 0) {
543 candidate[i] = NULL;
544 buffers[i] = 0;
545 continue;
546 }
547 buffers[i] = nr_buffers_type[i];
548 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
549 {
550 if(buffers[i] < 0) panic("Here is the problem");
551 tmp = bh->b_next_free;
552 if (!bh) break;
553
554 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
555 bh->b_dirt) {
556 refile_buffer(bh);
557 continue;
558 };
559
560 if (bh->b_count || bh->b_size != size)
561 continue;
562
563
564
565
566
567 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
568 buffers[i] = 0;
569 break;
570 }
571
572 if (BADNESS(bh)) continue;
573 break;
574 };
575 if(!buffers[i]) candidate[i] = NULL;
576 else candidate[i] = bh;
577 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
578 }
579
580 repeat:
581 if(needed <= 0) return;
582
583
584
585 winner = best_time = UINT_MAX;
586 for(i=0; i<NR_LIST; i++){
587 if(!candidate[i]) continue;
588 if(candidate[i]->b_lru_time < best_time){
589 best_time = candidate[i]->b_lru_time;
590 winner = i;
591 }
592 }
593
594
595 if(winner != UINT_MAX) {
596 i = winner;
597 bh = candidate[i];
598 candidate[i] = bh->b_next_free;
599 if(candidate[i] == bh) candidate[i] = NULL;
600 if (bh->b_count || bh->b_size != size)
601 panic("Busy buffer in candidate list\n");
602 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
603 panic("Shared buffer in candidate list\n");
604 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
605
606 if(bh->b_dev == 0xffff) panic("Wrong list");
607 remove_from_queues(bh);
608 bh->b_dev = 0xffff;
609 put_last_free(bh);
610 needed -= bh->b_size;
611 buffers[i]--;
612 if(buffers[i] < 0) panic("Here is the problem");
613
614 if(buffers[i] == 0) candidate[i] = NULL;
615
616
617
618 if(candidate[i] && buffers[i] > 0){
619 if(buffers[i] <= 0) panic("Here is another problem");
620 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
621 if(buffers[i] < 0) panic("Here is the problem");
622 tmp = bh->b_next_free;
623 if (!bh) break;
624
625 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
626 bh->b_dirt) {
627 refile_buffer(bh);
628 continue;
629 };
630
631 if (bh->b_count || bh->b_size != size)
632 continue;
633
634
635
636
637
638 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
639 buffers[i] = 0;
640 break;
641 }
642
643 if (BADNESS(bh)) continue;
644 break;
645 };
646 if(!buffers[i]) candidate[i] = NULL;
647 else candidate[i] = bh;
648 if(candidate[i] && candidate[i]->b_count)
649 panic("Here is the problem");
650 }
651
652 goto repeat;
653 }
654
655 if(needed <= 0) return;
656
657
658
659 if (nr_free_pages > 5) {
660 if (grow_buffers(GFP_BUFFER, size)) {
661 needed -= PAGE_SIZE;
662 goto repeat0;
663 };
664 }
665
666
667 if (!grow_buffers(GFP_ATOMIC, size))
668 wakeup_bdflush(1);
669 needed -= PAGE_SIZE;
670 goto repeat0;
671 }
672
673
674
675
676
677
678
679
680
681
682
683 struct buffer_head * getblk(dev_t dev, int block, int size)
684 {
685 struct buffer_head * bh;
686 int isize = BUFSIZE_INDEX(size);
687
688
689 buffer_usage[isize]++;
690
691
692
693
694 repeat:
695 bh = get_hash_table(dev, block, size);
696 if (bh) {
697 if (bh->b_uptodate && !bh->b_dirt)
698 put_last_lru(bh);
699 if(!bh->b_dirt) bh->b_flushtime = 0;
700 return bh;
701 }
702
703 while(!free_list[isize]) refill_freelist(size);
704
705 if (find_buffer(dev,block,size))
706 goto repeat;
707
708 bh = free_list[isize];
709 remove_from_free_list(bh);
710
711
712
713 bh->b_count=1;
714 bh->b_dirt=0;
715 bh->b_lock=0;
716 bh->b_uptodate=0;
717 bh->b_flushtime = 0;
718 bh->b_req=0;
719 bh->b_dev=dev;
720 bh->b_blocknr=block;
721 insert_into_queues(bh);
722 return bh;
723 }
724
725 void set_writetime(struct buffer_head * buf, int flag)
726 {
727 int newtime;
728
729 if (buf->b_dirt){
730
731 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
732 bdf_prm.b_un.age_buffer);
733 if(!buf->b_flushtime || buf->b_flushtime > newtime)
734 buf->b_flushtime = newtime;
735 } else {
736 buf->b_flushtime = 0;
737 }
738 }
739
740
741 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
742 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
743
744 void refile_buffer(struct buffer_head * buf){
745 int i, dispose;
746 i = 0;
747 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
748 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
749 if(buf->b_lock) i |= 2;
750 if(buf->b_dirt) i |= 4;
751 dispose = buffer_disposition[i];
752 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
753 dispose = BUF_UNSHARED;
754 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
755 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
756 if(dispose != buf->b_list) {
757 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
758 buf->b_lru_time = jiffies;
759 if(dispose == BUF_LOCKED &&
760 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
761 dispose = BUF_LOCKED1;
762 remove_from_queues(buf);
763 buf->b_list = dispose;
764 insert_into_queues(buf);
765 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
766 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
767 bdf_prm.b_un.nfract/100)
768 wakeup_bdflush(0);
769 }
770 }
771
772 void brelse(struct buffer_head * buf)
773 {
774 if (!buf)
775 return;
776 wait_on_buffer(buf);
777
778
779 set_writetime(buf, 0);
780 refile_buffer(buf);
781
782 if (buf->b_count) {
783 if (--buf->b_count)
784 return;
785 wake_up(&buffer_wait);
786 return;
787 }
788 printk("VFS: brelse: Trying to free free buffer\n");
789 }
790
791
792
793
794
795 struct buffer_head * bread(dev_t dev, int block, int size)
796 {
797 struct buffer_head * bh;
798
799 if (!(bh = getblk(dev, block, size))) {
800 printk("VFS: bread: READ error on device %d/%d\n",
801 MAJOR(dev), MINOR(dev));
802 return NULL;
803 }
804 if (bh->b_uptodate)
805 return bh;
806 ll_rw_block(READ, 1, &bh);
807 wait_on_buffer(bh);
808 if (bh->b_uptodate)
809 return bh;
810 brelse(bh);
811 return NULL;
812 }
813
814
815
816
817
818
819
820 #define NBUF 16
821
822 struct buffer_head * breada(dev_t dev, int block, int bufsize,
823 unsigned int pos, unsigned int filesize)
824 {
825 struct buffer_head * bhlist[NBUF];
826 unsigned int blocks;
827 struct buffer_head * bh;
828 int index;
829 int i, j;
830
831 if (pos >= filesize)
832 return NULL;
833
834 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
835 return NULL;
836
837 index = BUFSIZE_INDEX(bh->b_size);
838
839 if (bh->b_uptodate)
840 return bh;
841
842 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
843
844 if (blocks > (read_ahead[MAJOR(dev)] >> index))
845 blocks = read_ahead[MAJOR(dev)] >> index;
846 if (blocks > NBUF)
847 blocks = NBUF;
848
849 bhlist[0] = bh;
850 j = 1;
851 for(i=1; i<blocks; i++) {
852 bh = getblk(dev,block+i,bufsize);
853 if (bh->b_uptodate) {
854 brelse(bh);
855 break;
856 }
857 bhlist[j++] = bh;
858 }
859
860
861 ll_rw_block(READ, j, bhlist);
862
863 for(i=1; i<j; i++)
864 brelse(bhlist[i]);
865
866
867 bh = bhlist[0];
868 wait_on_buffer(bh);
869 if (bh->b_uptodate)
870 return bh;
871 brelse(bh);
872 return NULL;
873 }
874
875
876
877
878 static void put_unused_buffer_head(struct buffer_head * bh)
879 {
880 struct wait_queue * wait;
881
882 wait = ((volatile struct buffer_head *) bh)->b_wait;
883 memset(bh,0,sizeof(*bh));
884 ((volatile struct buffer_head *) bh)->b_wait = wait;
885 bh->b_next_free = unused_list;
886 unused_list = bh;
887 }
888
889 static void get_more_buffer_heads(void)
890 {
891 int i;
892 struct buffer_head * bh;
893
894 if (unused_list)
895 return;
896
897 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
898 return;
899
900 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
901 bh->b_next_free = unused_list;
902 unused_list = bh++;
903 }
904 }
905
906 static struct buffer_head * get_unused_buffer_head(void)
907 {
908 struct buffer_head * bh;
909
910 get_more_buffer_heads();
911 if (!unused_list)
912 return NULL;
913 bh = unused_list;
914 unused_list = bh->b_next_free;
915 bh->b_next_free = NULL;
916 bh->b_data = NULL;
917 bh->b_size = 0;
918 bh->b_req = 0;
919 return bh;
920 }
921
922
923
924
925
926
927
928 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
929 {
930 struct buffer_head *bh, *head;
931 unsigned long offset;
932
933 head = NULL;
934 offset = PAGE_SIZE;
935 while ((offset -= size) < PAGE_SIZE) {
936 bh = get_unused_buffer_head();
937 if (!bh)
938 goto no_grow;
939 bh->b_this_page = head;
940 head = bh;
941 bh->b_data = (char *) (page+offset);
942 bh->b_size = size;
943 bh->b_dev = 0xffff;
944 }
945 return head;
946
947
948
949 no_grow:
950 bh = head;
951 while (bh) {
952 head = bh;
953 bh = bh->b_this_page;
954 put_unused_buffer_head(head);
955 }
956 return NULL;
957 }
958
959 static void read_buffers(struct buffer_head * bh[], int nrbuf)
960 {
961 int i;
962 int bhnum = 0;
963 struct buffer_head * bhr[8];
964
965 for (i = 0 ; i < nrbuf ; i++) {
966 if (bh[i] && !bh[i]->b_uptodate)
967 bhr[bhnum++] = bh[i];
968 }
969 if (bhnum)
970 ll_rw_block(READ, bhnum, bhr);
971 for (i = 0 ; i < nrbuf ; i++) {
972 if (bh[i]) {
973 wait_on_buffer(bh[i]);
974 }
975 }
976 }
977
978 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
979 dev_t dev, int *b, int size)
980 {
981 struct buffer_head * bh[8];
982 unsigned long page;
983 unsigned long offset;
984 int block;
985 int nrbuf;
986
987 page = (unsigned long) first->b_data;
988 if (page & ~PAGE_MASK) {
989 brelse(first);
990 return 0;
991 }
992 mem_map[MAP_NR(page)]++;
993 bh[0] = first;
994 nrbuf = 1;
995 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
996 block = *++b;
997 if (!block)
998 goto no_go;
999 first = get_hash_table(dev, block, size);
1000 if (!first)
1001 goto no_go;
1002 bh[nrbuf++] = first;
1003 if (page+offset != (unsigned long) first->b_data)
1004 goto no_go;
1005 }
1006 read_buffers(bh,nrbuf);
1007 while (nrbuf-- > 0)
1008 brelse(bh[nrbuf]);
1009 free_page(address);
1010 ++current->mm->min_flt;
1011 return page;
1012 no_go:
1013 while (nrbuf-- > 0)
1014 brelse(bh[nrbuf]);
1015 free_page(page);
1016 return 0;
1017 }
1018
1019 static unsigned long try_to_load_aligned(unsigned long address,
1020 dev_t dev, int b[], int size)
1021 {
1022 struct buffer_head * bh, * tmp, * arr[8];
1023 unsigned long offset;
1024 int isize = BUFSIZE_INDEX(size);
1025 int * p;
1026 int block;
1027
1028 bh = create_buffers(address, size);
1029 if (!bh)
1030 return 0;
1031
1032 p = b;
1033 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1034 block = *(p++);
1035 if (!block)
1036 goto not_aligned;
1037 if (find_buffer(dev, block, size))
1038 goto not_aligned;
1039 }
1040 tmp = bh;
1041 p = b;
1042 block = 0;
1043 while (1) {
1044 arr[block++] = bh;
1045 bh->b_count = 1;
1046 bh->b_dirt = 0;
1047 bh->b_flushtime = 0;
1048 bh->b_uptodate = 0;
1049 bh->b_req = 0;
1050 bh->b_dev = dev;
1051 bh->b_blocknr = *(p++);
1052 bh->b_list = BUF_CLEAN;
1053 nr_buffers++;
1054 nr_buffers_size[isize]++;
1055 insert_into_queues(bh);
1056 if (bh->b_this_page)
1057 bh = bh->b_this_page;
1058 else
1059 break;
1060 }
1061 buffermem += PAGE_SIZE;
1062 bh->b_this_page = tmp;
1063 mem_map[MAP_NR(address)]++;
1064 buffer_pages[MAP_NR(address)] = bh;
1065 read_buffers(arr,block);
1066 while (block-- > 0)
1067 brelse(arr[block]);
1068 ++current->mm->maj_flt;
1069 return address;
1070 not_aligned:
1071 while ((tmp = bh) != NULL) {
1072 bh = bh->b_this_page;
1073 put_unused_buffer_head(tmp);
1074 }
1075 return 0;
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 static inline unsigned long try_to_share_buffers(unsigned long address,
1090 dev_t dev, int *b, int size)
1091 {
1092 struct buffer_head * bh;
1093 int block;
1094
1095 block = b[0];
1096 if (!block)
1097 return 0;
1098 bh = get_hash_table(dev, block, size);
1099 if (bh)
1100 return check_aligned(bh, address, dev, b, size);
1101 return try_to_load_aligned(address, dev, b, size);
1102 }
1103
1104 #define COPYBLK(size,from,to) \
1105 __asm__ __volatile__("rep ; movsl": \
1106 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1107 :"cx","di","si")
1108
1109
1110
1111
1112
1113
1114
1115
1116 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1117 {
1118 struct buffer_head * bh[8];
1119 unsigned long where;
1120 int i, j;
1121
1122 if (!no_share) {
1123 where = try_to_share_buffers(address, dev, b, size);
1124 if (where)
1125 return where;
1126 }
1127 ++current->mm->maj_flt;
1128 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1129 bh[i] = NULL;
1130 if (b[i])
1131 bh[i] = getblk(dev, b[i], size);
1132 }
1133 read_buffers(bh,i);
1134 where = address;
1135 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1136 if (bh[i]) {
1137 if (bh[i]->b_uptodate)
1138 COPYBLK(size, (unsigned long) bh[i]->b_data, where);
1139 brelse(bh[i]);
1140 }
1141 }
1142 return address;
1143 }
1144
1145
1146
1147
1148
1149 static int grow_buffers(int pri, int size)
1150 {
1151 unsigned long page;
1152 struct buffer_head *bh, *tmp;
1153 struct buffer_head * insert_point;
1154 int isize;
1155
1156 if ((size & 511) || (size > PAGE_SIZE)) {
1157 printk("VFS: grow_buffers: size = %d\n",size);
1158 return 0;
1159 }
1160
1161 isize = BUFSIZE_INDEX(size);
1162
1163 if (!(page = __get_free_page(pri)))
1164 return 0;
1165 bh = create_buffers(page, size);
1166 if (!bh) {
1167 free_page(page);
1168 return 0;
1169 }
1170
1171 insert_point = free_list[isize];
1172
1173 tmp = bh;
1174 while (1) {
1175 nr_free[isize]++;
1176 if (insert_point) {
1177 tmp->b_next_free = insert_point->b_next_free;
1178 tmp->b_prev_free = insert_point;
1179 insert_point->b_next_free->b_prev_free = tmp;
1180 insert_point->b_next_free = tmp;
1181 } else {
1182 tmp->b_prev_free = tmp;
1183 tmp->b_next_free = tmp;
1184 }
1185 insert_point = tmp;
1186 ++nr_buffers;
1187 if (tmp->b_this_page)
1188 tmp = tmp->b_this_page;
1189 else
1190 break;
1191 }
1192 free_list[isize] = bh;
1193 buffer_pages[MAP_NR(page)] = bh;
1194 tmp->b_this_page = bh;
1195 wake_up(&buffer_wait);
1196 buffermem += PAGE_SIZE;
1197 return 1;
1198 }
1199
1200
1201
1202
1203
1204 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1205 {
1206 unsigned long page;
1207 struct buffer_head * tmp, * p;
1208 int isize = BUFSIZE_INDEX(bh->b_size);
1209
1210 *bhp = bh;
1211 page = (unsigned long) bh->b_data;
1212 page &= PAGE_MASK;
1213 tmp = bh;
1214 do {
1215 if (!tmp)
1216 return 0;
1217 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1218 return 0;
1219 tmp = tmp->b_this_page;
1220 } while (tmp != bh);
1221 tmp = bh;
1222 do {
1223 p = tmp;
1224 tmp = tmp->b_this_page;
1225 nr_buffers--;
1226 nr_buffers_size[isize]--;
1227 if (p == *bhp)
1228 {
1229 *bhp = p->b_prev_free;
1230 if (p == *bhp)
1231 *bhp = NULL;
1232 }
1233 remove_from_queues(p);
1234 put_unused_buffer_head(p);
1235 } while (tmp != bh);
1236 buffermem -= PAGE_SIZE;
1237 buffer_pages[MAP_NR(page)] = NULL;
1238 free_page(page);
1239 return !mem_map[MAP_NR(page)];
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 static int maybe_shrink_lav_buffers(int size)
1256 {
1257 int nlist;
1258 int isize;
1259 int total_lav, total_n_buffers, n_sizes;
1260
1261
1262
1263
1264
1265
1266 total_lav = total_n_buffers = n_sizes = 0;
1267 for(nlist = 0; nlist < NR_SIZES; nlist++)
1268 {
1269 total_lav += buffers_lav[nlist];
1270 if(nr_buffers_size[nlist]) n_sizes++;
1271 total_n_buffers += nr_buffers_size[nlist];
1272 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1273 }
1274
1275
1276
1277
1278 isize = (size ? BUFSIZE_INDEX(size) : -1);
1279
1280 if (n_sizes > 1)
1281 for(nlist = 0; nlist < NR_SIZES; nlist++)
1282 {
1283 if(nlist == isize) continue;
1284 if(nr_buffers_size[nlist] &&
1285 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1286 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1287 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1288 return 1;
1289 }
1290 return 0;
1291 }
1292
1293
1294
1295
1296
1297
1298
1299 int shrink_buffers(unsigned int priority)
1300 {
1301 if (priority < 2) {
1302 sync_buffers(0,0);
1303 }
1304
1305 if(priority == 2) wakeup_bdflush(1);
1306
1307 if(maybe_shrink_lav_buffers(0)) return 1;
1308
1309
1310 return shrink_specific_buffers(priority, 0);
1311 }
1312
1313 static int shrink_specific_buffers(unsigned int priority, int size)
1314 {
1315 struct buffer_head *bh;
1316 int nlist;
1317 int i, isize, isize1;
1318
1319 #ifdef DEBUG
1320 if(size) printk("Shrinking buffers of size %d\n", size);
1321 #endif
1322
1323
1324 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1325
1326 for(isize = 0; isize<NR_SIZES; isize++){
1327 if(isize1 != -1 && isize1 != isize) continue;
1328 bh = free_list[isize];
1329 if(!bh) continue;
1330 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1331 if (bh->b_count || !bh->b_this_page)
1332 continue;
1333 if (try_to_free(bh, &bh))
1334 return 1;
1335 if(!bh) break;
1336
1337 }
1338 }
1339
1340
1341
1342 for(nlist = 0; nlist < NR_LIST; nlist++) {
1343 repeat1:
1344 if(priority > 3 && nlist == BUF_SHARED) continue;
1345 bh = lru_list[nlist];
1346 if(!bh) continue;
1347 i = nr_buffers_type[nlist] >> priority;
1348 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1349
1350 if(bh->b_list != nlist) goto repeat1;
1351 if (bh->b_count || !bh->b_this_page)
1352 continue;
1353 if(size && bh->b_size != size) continue;
1354 if (bh->b_lock)
1355 if (priority)
1356 continue;
1357 else
1358 wait_on_buffer(bh);
1359 if (bh->b_dirt) {
1360 bh->b_count++;
1361 bh->b_flushtime = 0;
1362 ll_rw_block(WRITEA, 1, &bh);
1363 bh->b_count--;
1364 continue;
1365 }
1366 if (try_to_free(bh, &bh))
1367 return 1;
1368 if(!bh) break;
1369 }
1370 }
1371 return 0;
1372 }
1373
1374
1375 void show_buffers(void)
1376 {
1377 struct buffer_head * bh;
1378 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1379 int shared;
1380 int nlist, isize;
1381
1382 printk("Buffer memory: %6dkB\n",buffermem>>10);
1383 printk("Buffer heads: %6d\n",nr_buffer_heads);
1384 printk("Buffer blocks: %6d\n",nr_buffers);
1385
1386 for(nlist = 0; nlist < NR_LIST; nlist++) {
1387 shared = found = locked = dirty = used = lastused = 0;
1388 bh = lru_list[nlist];
1389 if(!bh) continue;
1390 do {
1391 found++;
1392 if (bh->b_lock)
1393 locked++;
1394 if (bh->b_dirt)
1395 dirty++;
1396 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1397 if (bh->b_count)
1398 used++, lastused = found;
1399 bh = bh->b_next_free;
1400 } while (bh != lru_list[nlist]);
1401 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1402 nlist, found, used, lastused, locked, dirty, shared);
1403 };
1404 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1405 for(isize = 0; isize<NR_SIZES; isize++){
1406 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1407 buffers_lav[isize], nr_free[isize]);
1408 for(nlist = 0; nlist < NR_LIST; nlist++)
1409 printk("%7d ", nr_buffers_st[isize][nlist]);
1410 printk("\n");
1411 }
1412 }
1413
1414
1415
1416
1417
1418 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1419 dev_t dev, unsigned int starting_block)
1420 {
1421 unsigned long page;
1422 struct buffer_head * tmp, * p;
1423
1424 *bhp = bh;
1425 page = (unsigned long) bh->b_data;
1426 page &= PAGE_MASK;
1427 if(mem_map[MAP_NR(page)] != 1) return 0;
1428 tmp = bh;
1429 do {
1430 if (!tmp)
1431 return 0;
1432
1433 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1434 return 0;
1435 tmp = tmp->b_this_page;
1436 } while (tmp != bh);
1437 tmp = bh;
1438
1439 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1440 tmp = tmp->b_this_page;
1441
1442
1443 bh = tmp;
1444 do {
1445 p = tmp;
1446 tmp = tmp->b_this_page;
1447 remove_from_queues(p);
1448 p->b_dev=dev;
1449 p->b_uptodate = 0;
1450 p->b_req = 0;
1451 p->b_blocknr=starting_block++;
1452 insert_into_queues(p);
1453 } while (tmp != bh);
1454 return 1;
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 static int reassign_cluster(dev_t dev,
1472 unsigned int starting_block, int size)
1473 {
1474 struct buffer_head *bh;
1475 int isize = BUFSIZE_INDEX(size);
1476 int i;
1477
1478
1479
1480
1481
1482 while(nr_free[isize] < 32) refill_freelist(size);
1483
1484 bh = free_list[isize];
1485 if(bh)
1486 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1487 if (!bh->b_this_page) continue;
1488 if (try_to_reassign(bh, &bh, dev, starting_block))
1489 return 4;
1490 }
1491 return 0;
1492 }
1493
1494
1495
1496
1497
1498 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1499 {
1500 struct buffer_head * bh, * tmp, * arr[8];
1501 int isize = BUFSIZE_INDEX(size);
1502 unsigned long offset;
1503 unsigned long page;
1504 int nblock;
1505
1506 page = get_free_page(GFP_NOBUFFER);
1507 if(!page) return 0;
1508
1509 bh = create_buffers(page, size);
1510 if (!bh) {
1511 free_page(page);
1512 return 0;
1513 };
1514 nblock = block;
1515 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1516 if (find_buffer(dev, nblock++, size))
1517 goto not_aligned;
1518 }
1519 tmp = bh;
1520 nblock = 0;
1521 while (1) {
1522 arr[nblock++] = bh;
1523 bh->b_count = 1;
1524 bh->b_dirt = 0;
1525 bh->b_flushtime = 0;
1526 bh->b_lock = 0;
1527 bh->b_uptodate = 0;
1528 bh->b_req = 0;
1529 bh->b_dev = dev;
1530 bh->b_list = BUF_CLEAN;
1531 bh->b_blocknr = block++;
1532 nr_buffers++;
1533 nr_buffers_size[isize]++;
1534 insert_into_queues(bh);
1535 if (bh->b_this_page)
1536 bh = bh->b_this_page;
1537 else
1538 break;
1539 }
1540 buffermem += PAGE_SIZE;
1541 buffer_pages[MAP_NR(page)] = bh;
1542 bh->b_this_page = tmp;
1543 while (nblock-- > 0)
1544 brelse(arr[nblock]);
1545 return 4;
1546 not_aligned:
1547 while ((tmp = bh) != NULL) {
1548 bh = bh->b_this_page;
1549 put_unused_buffer_head(tmp);
1550 }
1551 free_page(page);
1552 return 0;
1553 }
1554
1555 unsigned long generate_cluster(dev_t dev, int b[], int size)
1556 {
1557 int i, offset;
1558
1559 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1560 if(i && b[i]-1 != b[i-1]) return 0;
1561 if(find_buffer(dev, b[i], size)) return 0;
1562 };
1563
1564
1565
1566
1567
1568 if(maybe_shrink_lav_buffers(size))
1569 {
1570 int retval;
1571 retval = try_to_generate_cluster(dev, b[0], size);
1572 if(retval) return retval;
1573 };
1574
1575 if (nr_free_pages > min_free_pages)
1576 return try_to_generate_cluster(dev, b[0], size);
1577 else
1578 return reassign_cluster(dev, b[0], size);
1579 }
1580
1581
1582
1583
1584
1585
1586
1587
1588 void buffer_init(void)
1589 {
1590 int i;
1591 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1592
1593 if (high_memory >= 4*1024*1024) {
1594 min_free_pages = 200;
1595 if(high_memory >= 16*1024*1024)
1596 nr_hash = 16381;
1597 else
1598 nr_hash = 4093;
1599 } else {
1600 min_free_pages = 20;
1601 nr_hash = 997;
1602 };
1603
1604 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1605 sizeof(struct buffer_head *));
1606
1607
1608 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1609 sizeof(struct buffer_head *));
1610 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1611 buffer_pages[i] = NULL;
1612
1613 for (i = 0 ; i < nr_hash ; i++)
1614 hash_table[i] = NULL;
1615 lru_list[BUF_CLEAN] = 0;
1616 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1617 if (!free_list[isize])
1618 panic("VFS: Unable to initialize buffer free list!");
1619 return;
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629 struct wait_queue * bdflush_wait = NULL;
1630 struct wait_queue * bdflush_done = NULL;
1631
1632 static int bdflush_running = 0;
1633
1634 static void wakeup_bdflush(int wait)
1635 {
1636 if(!bdflush_running){
1637 printk("Warning - bdflush not running\n");
1638 sync_buffers(0,0);
1639 return;
1640 };
1641 wake_up(&bdflush_wait);
1642 if(wait) sleep_on(&bdflush_done);
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 asmlinkage int sync_old_buffers(void)
1656 {
1657 int i, isize;
1658 int ndirty, nwritten;
1659 int nlist;
1660 int ncount;
1661 struct buffer_head * bh, *next;
1662
1663 sync_supers(0);
1664 sync_inodes(0);
1665
1666 ncount = 0;
1667 #ifdef DEBUG
1668 for(nlist = 0; nlist < NR_LIST; nlist++)
1669 #else
1670 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1671 #endif
1672 {
1673 ndirty = 0;
1674 nwritten = 0;
1675 repeat:
1676 bh = lru_list[nlist];
1677 if(bh)
1678 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1679
1680 if(bh->b_list != nlist) goto repeat;
1681 next = bh->b_next_free;
1682 if(!lru_list[nlist]) {
1683 printk("Dirty list empty %d\n", i);
1684 break;
1685 }
1686
1687
1688 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1689 {
1690 refile_buffer(bh);
1691 continue;
1692 }
1693
1694 if (bh->b_lock || !bh->b_dirt)
1695 continue;
1696 ndirty++;
1697 if(bh->b_flushtime > jiffies) continue;
1698 nwritten++;
1699 bh->b_count++;
1700 bh->b_flushtime = 0;
1701 #ifdef DEBUG
1702 if(nlist != BUF_DIRTY) ncount++;
1703 #endif
1704 ll_rw_block(WRITE, 1, &bh);
1705 bh->b_count--;
1706 }
1707 }
1708 #ifdef DEBUG
1709 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1710 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1711 #endif
1712
1713
1714
1715
1716 for(isize = 0; isize<NR_SIZES; isize++){
1717 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1718 buffer_usage[isize] = 0;
1719 };
1720 return 0;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730 asmlinkage int sys_bdflush(int func, int data)
1731 {
1732 int i, error;
1733 int ndirty;
1734 int nlist;
1735 int ncount;
1736 struct buffer_head * bh, *next;
1737
1738 if(!suser()) return -EPERM;
1739
1740 if(func == 1)
1741 return sync_old_buffers();
1742
1743
1744 if(func >= 2){
1745 i = (func-2) >> 1;
1746 if (i < 0 || i >= N_PARAM) return -EINVAL;
1747 if((func & 1) == 0) {
1748 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1749 if(error) return error;
1750 put_fs_long(bdf_prm.data[i], data);
1751 return 0;
1752 };
1753 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1754 bdf_prm.data[i] = data;
1755 return 0;
1756 };
1757
1758 if(bdflush_running++) return -EBUSY;
1759
1760
1761
1762 while(1==1){
1763 #ifdef DEBUG
1764 printk("bdflush() activated...");
1765 #endif
1766
1767 ncount = 0;
1768 #ifdef DEBUG
1769 for(nlist = 0; nlist < NR_LIST; nlist++)
1770 #else
1771 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1772 #endif
1773 {
1774 ndirty = 0;
1775 repeat:
1776 bh = lru_list[nlist];
1777 if(bh)
1778 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1779 bh = next) {
1780
1781 if(bh->b_list != nlist) goto repeat;
1782 next = bh->b_next_free;
1783 if(!lru_list[nlist]) {
1784 printk("Dirty list empty %d\n", i);
1785 break;
1786 }
1787
1788
1789 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1790 {
1791 refile_buffer(bh);
1792 continue;
1793 }
1794
1795 if (bh->b_lock || !bh->b_dirt)
1796 continue;
1797
1798
1799 bh->b_count++;
1800 ndirty++;
1801 bh->b_flushtime = 0;
1802 ll_rw_block(WRITE, 1, &bh);
1803 #ifdef DEBUG
1804 if(nlist != BUF_DIRTY) ncount++;
1805 #endif
1806 bh->b_count--;
1807 }
1808 }
1809 #ifdef DEBUG
1810 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1811 printk("sleeping again.\n");
1812 #endif
1813 wake_up(&bdflush_done);
1814
1815
1816
1817
1818 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1819 bdf_prm.b_un.nfract/100) {
1820 if (current->signal & (1 << (SIGKILL-1))) {
1821 bdflush_running--;
1822 return 0;
1823 }
1824 current->signal = 0;
1825 interruptible_sleep_on(&bdflush_wait);
1826 }
1827 }
1828 }
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846