This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/major.h>
23 #include <linux/string.h>
24 #include <linux/locks.h>
25 #include <linux/errno.h>
26 #include <linux/malloc.h>
27
28 #include <asm/system.h>
29 #include <asm/segment.h>
30 #include <asm/io.h>
31
32 #define NR_SIZES 4
33 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
35
36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 static int min_free_pages = 20;
60 extern int *blksize_size[];
61
62
63 static void wakeup_bdflush(int);
64
65 #define N_PARAM 9
66 #define LAV
67
68 static union bdflush_param{
69 struct {
70 int nfract;
71
72 int ndirty;
73
74 int nrefill;
75
76 int nref_dirt;
77
78 int clu_nfract;
79
80 int age_buffer;
81
82 int age_super;
83
84 int lav_const;
85
86 int lav_ratio;
87
88
89 } b_un;
90 unsigned int data[N_PARAM];
91 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
92
93
94
95
96
97
98
99 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
100 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
101
102
103
104
105
106
107
108
109
110
111 void __wait_on_buffer(struct buffer_head * bh)
112 {
113 struct wait_queue wait = { current, NULL };
114
115 bh->b_count++;
116 add_wait_queue(&bh->b_wait, &wait);
117 repeat:
118 current->state = TASK_UNINTERRUPTIBLE;
119 if (bh->b_lock) {
120 schedule();
121 goto repeat;
122 }
123 remove_wait_queue(&bh->b_wait, &wait);
124 bh->b_count--;
125 current->state = TASK_RUNNING;
126 }
127
128
129
130
131
132
133
134
135
136
137
138 static int sync_buffers(dev_t dev, int wait)
139 {
140 int i, retry, pass = 0, err = 0;
141 int nlist, ncount;
142 struct buffer_head * bh, *next;
143
144
145
146
147
148 repeat:
149 retry = 0;
150 repeat2:
151 ncount = 0;
152
153
154 for(nlist = 0; nlist < NR_LIST; nlist++)
155 {
156 repeat1:
157 bh = lru_list[nlist];
158 if(!bh) continue;
159 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
160 if(bh->b_list != nlist) goto repeat1;
161 next = bh->b_next_free;
162 if(!lru_list[nlist]) break;
163 if (dev && bh->b_dev != dev)
164 continue;
165 if (bh->b_lock)
166 {
167
168
169 if (!wait || !pass) {
170 retry = 1;
171 continue;
172 }
173 wait_on_buffer (bh);
174 goto repeat2;
175 }
176
177
178 if (wait && bh->b_req && !bh->b_lock &&
179 !bh->b_dirt && !bh->b_uptodate) {
180 err = 1;
181 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
182 continue;
183 }
184
185
186 if (!bh->b_dirt || pass>=2)
187 continue;
188
189 if (bh->b_lock)
190 continue;
191 bh->b_count++;
192 bh->b_flushtime = 0;
193 ll_rw_block(WRITE, 1, &bh);
194
195 if(nlist != BUF_DIRTY) {
196 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
197 ncount++;
198 };
199 bh->b_count--;
200 retry = 1;
201 }
202 }
203 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
204
205
206
207
208
209 if (wait && retry && ++pass<=2)
210 goto repeat;
211 return err;
212 }
213
214 void sync_dev(dev_t dev)
215 {
216 sync_buffers(dev, 0);
217 sync_supers(dev);
218 sync_inodes(dev);
219 sync_buffers(dev, 0);
220 }
221
222 int fsync_dev(dev_t dev)
223 {
224 sync_buffers(dev, 0);
225 sync_supers(dev);
226 sync_inodes(dev);
227 return sync_buffers(dev, 1);
228 }
229
230 asmlinkage int sys_sync(void)
231 {
232 sync_dev(0);
233 return 0;
234 }
235
236 int file_fsync (struct inode *inode, struct file *filp)
237 {
238 return fsync_dev(inode->i_dev);
239 }
240
241 asmlinkage int sys_fsync(unsigned int fd)
242 {
243 struct file * file;
244 struct inode * inode;
245
246 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
247 return -EBADF;
248 if (!file->f_op || !file->f_op->fsync)
249 return -EINVAL;
250 if (file->f_op->fsync(inode,file))
251 return -EIO;
252 return 0;
253 }
254
255 void invalidate_buffers(dev_t dev)
256 {
257 int i;
258 int nlist;
259 struct buffer_head * bh;
260
261 for(nlist = 0; nlist < NR_LIST; nlist++) {
262 bh = lru_list[nlist];
263 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
264 bh = bh->b_next_free) {
265 if (bh->b_dev != dev)
266 continue;
267 wait_on_buffer(bh);
268 if (bh->b_dev == dev)
269 bh->b_flushtime = bh->b_uptodate =
270 bh->b_dirt = bh->b_req = 0;
271 }
272 }
273 }
274
275 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
276 #define hash(dev,block) hash_table[_hashfn(dev,block)]
277
278 static inline void remove_from_hash_queue(struct buffer_head * bh)
279 {
280 if (bh->b_next)
281 bh->b_next->b_prev = bh->b_prev;
282 if (bh->b_prev)
283 bh->b_prev->b_next = bh->b_next;
284 if (hash(bh->b_dev,bh->b_blocknr) == bh)
285 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
286 bh->b_next = bh->b_prev = NULL;
287 }
288
289 static inline void remove_from_lru_list(struct buffer_head * bh)
290 {
291 if (!(bh->b_prev_free) || !(bh->b_next_free))
292 panic("VFS: LRU block list corrupted");
293 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
294 bh->b_prev_free->b_next_free = bh->b_next_free;
295 bh->b_next_free->b_prev_free = bh->b_prev_free;
296
297 if (lru_list[bh->b_list] == bh)
298 lru_list[bh->b_list] = bh->b_next_free;
299 if(lru_list[bh->b_list] == bh)
300 lru_list[bh->b_list] = NULL;
301 bh->b_next_free = bh->b_prev_free = NULL;
302 }
303
304 static inline void remove_from_free_list(struct buffer_head * bh)
305 {
306 int isize = BUFSIZE_INDEX(bh->b_size);
307 if (!(bh->b_prev_free) || !(bh->b_next_free))
308 panic("VFS: Free block list corrupted");
309 if(bh->b_dev != 0xffff) panic("Free list corrupted");
310 if(!free_list[isize])
311 panic("Free list empty");
312 nr_free[isize]--;
313 if(bh->b_next_free == bh)
314 free_list[isize] = NULL;
315 else {
316 bh->b_prev_free->b_next_free = bh->b_next_free;
317 bh->b_next_free->b_prev_free = bh->b_prev_free;
318 if (free_list[isize] == bh)
319 free_list[isize] = bh->b_next_free;
320 };
321 bh->b_next_free = bh->b_prev_free = NULL;
322 }
323
324 static inline void remove_from_queues(struct buffer_head * bh)
325 {
326 if(bh->b_dev == 0xffff) {
327 remove_from_free_list(bh);
328
329 return;
330 };
331 nr_buffers_type[bh->b_list]--;
332 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
333 remove_from_hash_queue(bh);
334 remove_from_lru_list(bh);
335 }
336
337 static inline void put_last_lru(struct buffer_head * bh)
338 {
339 if (!bh)
340 return;
341 if (bh == lru_list[bh->b_list]) {
342 lru_list[bh->b_list] = bh->b_next_free;
343 return;
344 }
345 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
346 remove_from_lru_list(bh);
347
348
349 if(!lru_list[bh->b_list]) {
350 lru_list[bh->b_list] = bh;
351 lru_list[bh->b_list]->b_prev_free = bh;
352 };
353
354 bh->b_next_free = lru_list[bh->b_list];
355 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
356 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
357 lru_list[bh->b_list]->b_prev_free = bh;
358 }
359
360 static inline void put_last_free(struct buffer_head * bh)
361 {
362 int isize;
363 if (!bh)
364 return;
365
366 isize = BUFSIZE_INDEX(bh->b_size);
367 bh->b_dev = 0xffff;
368
369
370 if(!free_list[isize]) {
371 free_list[isize] = bh;
372 bh->b_prev_free = bh;
373 };
374
375 nr_free[isize]++;
376 bh->b_next_free = free_list[isize];
377 bh->b_prev_free = free_list[isize]->b_prev_free;
378 free_list[isize]->b_prev_free->b_next_free = bh;
379 free_list[isize]->b_prev_free = bh;
380 }
381
382 static inline void insert_into_queues(struct buffer_head * bh)
383 {
384
385
386 if(bh->b_dev == 0xffff) {
387 put_last_free(bh);
388 return;
389 };
390 if(!lru_list[bh->b_list]) {
391 lru_list[bh->b_list] = bh;
392 bh->b_prev_free = bh;
393 };
394 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
395 bh->b_next_free = lru_list[bh->b_list];
396 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
397 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
398 lru_list[bh->b_list]->b_prev_free = bh;
399 nr_buffers_type[bh->b_list]++;
400 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
401
402 bh->b_prev = NULL;
403 bh->b_next = NULL;
404 if (!bh->b_dev)
405 return;
406 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
407 hash(bh->b_dev,bh->b_blocknr) = bh;
408 if (bh->b_next)
409 bh->b_next->b_prev = bh;
410 }
411
412 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
413 {
414 struct buffer_head * tmp;
415
416 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
417 if (tmp->b_dev==dev && tmp->b_blocknr==block)
418 if (tmp->b_size == size)
419 return tmp;
420 else {
421 printk("VFS: Wrong blocksize on device %d/%d\n",
422 MAJOR(dev), MINOR(dev));
423 return NULL;
424 }
425 return NULL;
426 }
427
428
429
430
431
432
433
434
435 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
436 {
437 struct buffer_head * bh;
438
439 for (;;) {
440 if (!(bh=find_buffer(dev,block,size)))
441 return NULL;
442 bh->b_count++;
443 wait_on_buffer(bh);
444 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
445 return bh;
446 bh->b_count--;
447 }
448 }
449
450 void set_blocksize(dev_t dev, int size)
451 {
452 int i, nlist;
453 struct buffer_head * bh, *bhnext;
454
455 if (!blksize_size[MAJOR(dev)])
456 return;
457
458 switch(size) {
459 default: panic("Invalid blocksize passed to set_blocksize");
460 case 512: case 1024: case 2048: case 4096:;
461 }
462
463 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
464 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
465 return;
466 }
467 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
468 return;
469 sync_buffers(dev, 2);
470 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
471
472
473
474
475 for(nlist = 0; nlist < NR_LIST; nlist++) {
476 bh = lru_list[nlist];
477 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
478 if(!bh) break;
479 bhnext = bh->b_next_free;
480 if (bh->b_dev != dev)
481 continue;
482 if (bh->b_size == size)
483 continue;
484
485 wait_on_buffer(bh);
486 if (bh->b_dev == dev && bh->b_size != size) {
487 bh->b_uptodate = bh->b_dirt = bh->b_req =
488 bh->b_flushtime = 0;
489 };
490 remove_from_hash_queue(bh);
491 }
492 }
493 }
494
495 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
496
497 void refill_freelist(int size)
498 {
499 struct buffer_head * bh, * tmp;
500 struct buffer_head * candidate[NR_LIST];
501 unsigned int best_time, winner;
502 int isize = BUFSIZE_INDEX(size);
503 int buffers[NR_LIST];
504 int i;
505 int needed;
506
507
508
509
510
511 if (nr_free[isize] > 100)
512 return;
513
514
515
516
517
518
519 needed =bdf_prm.b_un.nrefill * size;
520
521 while (nr_free_pages > min_free_pages && needed > 0 &&
522 grow_buffers(GFP_BUFFER, size)) {
523 needed -= PAGE_SIZE;
524 }
525
526 if(needed <= 0) return;
527
528
529
530
531 while(maybe_shrink_lav_buffers(size))
532 {
533 if(!grow_buffers(GFP_BUFFER, size)) break;
534 needed -= PAGE_SIZE;
535 if(needed <= 0) return;
536 };
537
538
539
540
541
542
543
544 repeat0:
545 for(i=0; i<NR_LIST; i++){
546 if(i == BUF_DIRTY || i == BUF_SHARED ||
547 nr_buffers_type[i] == 0) {
548 candidate[i] = NULL;
549 buffers[i] = 0;
550 continue;
551 }
552 buffers[i] = nr_buffers_type[i];
553 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
554 {
555 if(buffers[i] < 0) panic("Here is the problem");
556 tmp = bh->b_next_free;
557 if (!bh) break;
558
559 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
560 bh->b_dirt) {
561 refile_buffer(bh);
562 continue;
563 };
564
565 if (bh->b_count || bh->b_size != size)
566 continue;
567
568
569
570
571
572 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
573 buffers[i] = 0;
574 break;
575 }
576
577 if (BADNESS(bh)) continue;
578 break;
579 };
580 if(!buffers[i]) candidate[i] = NULL;
581 else candidate[i] = bh;
582 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
583 }
584
585 repeat:
586 if(needed <= 0) return;
587
588
589
590 winner = best_time = UINT_MAX;
591 for(i=0; i<NR_LIST; i++){
592 if(!candidate[i]) continue;
593 if(candidate[i]->b_lru_time < best_time){
594 best_time = candidate[i]->b_lru_time;
595 winner = i;
596 }
597 }
598
599
600 if(winner != UINT_MAX) {
601 i = winner;
602 bh = candidate[i];
603 candidate[i] = bh->b_next_free;
604 if(candidate[i] == bh) candidate[i] = NULL;
605 if (bh->b_count || bh->b_size != size)
606 panic("Busy buffer in candidate list\n");
607 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
608 panic("Shared buffer in candidate list\n");
609 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
610
611 if(bh->b_dev == 0xffff) panic("Wrong list");
612 remove_from_queues(bh);
613 bh->b_dev = 0xffff;
614 put_last_free(bh);
615 needed -= bh->b_size;
616 buffers[i]--;
617 if(buffers[i] < 0) panic("Here is the problem");
618
619 if(buffers[i] == 0) candidate[i] = NULL;
620
621
622
623 if(candidate[i] && buffers[i] > 0){
624 if(buffers[i] <= 0) panic("Here is another problem");
625 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
626 if(buffers[i] < 0) panic("Here is the problem");
627 tmp = bh->b_next_free;
628 if (!bh) break;
629
630 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
631 bh->b_dirt) {
632 refile_buffer(bh);
633 continue;
634 };
635
636 if (bh->b_count || bh->b_size != size)
637 continue;
638
639
640
641
642
643 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
644 buffers[i] = 0;
645 break;
646 }
647
648 if (BADNESS(bh)) continue;
649 break;
650 };
651 if(!buffers[i]) candidate[i] = NULL;
652 else candidate[i] = bh;
653 if(candidate[i] && candidate[i]->b_count)
654 panic("Here is the problem");
655 }
656
657 goto repeat;
658 }
659
660 if(needed <= 0) return;
661
662
663
664 if (nr_free_pages > 5) {
665 if (grow_buffers(GFP_BUFFER, size)) {
666 needed -= PAGE_SIZE;
667 goto repeat0;
668 };
669 }
670
671
672 if (!grow_buffers(GFP_ATOMIC, size))
673 wakeup_bdflush(1);
674 needed -= PAGE_SIZE;
675 goto repeat0;
676 }
677
678
679
680
681
682
683
684
685
686
687
688 struct buffer_head * getblk(dev_t dev, int block, int size)
689 {
690 struct buffer_head * bh;
691 int isize = BUFSIZE_INDEX(size);
692
693
694 buffer_usage[isize]++;
695
696
697
698
699 repeat:
700 bh = get_hash_table(dev, block, size);
701 if (bh) {
702 if (bh->b_uptodate && !bh->b_dirt)
703 put_last_lru(bh);
704 if(!bh->b_dirt) bh->b_flushtime = 0;
705 return bh;
706 }
707
708 while(!free_list[isize]) refill_freelist(size);
709
710 if (find_buffer(dev,block,size))
711 goto repeat;
712
713 bh = free_list[isize];
714 remove_from_free_list(bh);
715
716
717
718 bh->b_count=1;
719 bh->b_dirt=0;
720 bh->b_lock=0;
721 bh->b_uptodate=0;
722 bh->b_flushtime = 0;
723 bh->b_req=0;
724 bh->b_dev=dev;
725 bh->b_blocknr=block;
726 insert_into_queues(bh);
727 return bh;
728 }
729
730 void set_writetime(struct buffer_head * buf, int flag)
731 {
732 int newtime;
733
734 if (buf->b_dirt){
735
736 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
737 bdf_prm.b_un.age_buffer);
738 if(!buf->b_flushtime || buf->b_flushtime > newtime)
739 buf->b_flushtime = newtime;
740 } else {
741 buf->b_flushtime = 0;
742 }
743 }
744
745
746 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
747 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
748
749 void refile_buffer(struct buffer_head * buf){
750 int i, dispose;
751 i = 0;
752 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
753 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
754 if(buf->b_lock) i |= 2;
755 if(buf->b_dirt) i |= 4;
756 dispose = buffer_disposition[i];
757 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
758 dispose = BUF_UNSHARED;
759 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
760 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
761 if(dispose != buf->b_list) {
762 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
763 buf->b_lru_time = jiffies;
764 if(dispose == BUF_LOCKED &&
765 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
766 dispose = BUF_LOCKED1;
767 remove_from_queues(buf);
768 buf->b_list = dispose;
769 insert_into_queues(buf);
770 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
771 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
772 bdf_prm.b_un.nfract/100)
773 wakeup_bdflush(0);
774 }
775 }
776
777 void brelse(struct buffer_head * buf)
778 {
779 if (!buf)
780 return;
781 wait_on_buffer(buf);
782
783
784 set_writetime(buf, 0);
785 refile_buffer(buf);
786
787 if (buf->b_count) {
788 if (--buf->b_count)
789 return;
790 wake_up(&buffer_wait);
791 return;
792 }
793 printk("VFS: brelse: Trying to free free buffer\n");
794 }
795
796
797
798
799
800 struct buffer_head * bread(dev_t dev, int block, int size)
801 {
802 struct buffer_head * bh;
803
804 if (!(bh = getblk(dev, block, size))) {
805 printk("VFS: bread: READ error on device %d/%d\n",
806 MAJOR(dev), MINOR(dev));
807 return NULL;
808 }
809 if (bh->b_uptodate)
810 return bh;
811 ll_rw_block(READ, 1, &bh);
812 wait_on_buffer(bh);
813 if (bh->b_uptodate)
814 return bh;
815 brelse(bh);
816 return NULL;
817 }
818
819
820
821
822
823
824
825 #define NBUF 16
826
827 struct buffer_head * breada(dev_t dev, int block, int bufsize,
828 unsigned int pos, unsigned int filesize)
829 {
830 struct buffer_head * bhlist[NBUF];
831 unsigned int blocks;
832 struct buffer_head * bh;
833 int index;
834 int i, j;
835
836 if (pos >= filesize)
837 return NULL;
838
839 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
840 return NULL;
841
842 index = BUFSIZE_INDEX(bh->b_size);
843
844 if (bh->b_uptodate)
845 return bh;
846
847 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
848
849 if (blocks > (read_ahead[MAJOR(dev)] >> index))
850 blocks = read_ahead[MAJOR(dev)] >> index;
851 if (blocks > NBUF)
852 blocks = NBUF;
853
854 bhlist[0] = bh;
855 j = 1;
856 for(i=1; i<blocks; i++) {
857 bh = getblk(dev,block+i,bufsize);
858 if (bh->b_uptodate) {
859 brelse(bh);
860 break;
861 }
862 bhlist[j++] = bh;
863 }
864
865
866 ll_rw_block(READ, j, bhlist);
867
868 for(i=1; i<j; i++)
869 brelse(bhlist[i]);
870
871
872 bh = bhlist[0];
873 wait_on_buffer(bh);
874 if (bh->b_uptodate)
875 return bh;
876 brelse(bh);
877 return NULL;
878 }
879
880
881
882
883 static void put_unused_buffer_head(struct buffer_head * bh)
884 {
885 struct wait_queue * wait;
886
887 wait = ((volatile struct buffer_head *) bh)->b_wait;
888 memset(bh,0,sizeof(*bh));
889 ((volatile struct buffer_head *) bh)->b_wait = wait;
890 bh->b_next_free = unused_list;
891 unused_list = bh;
892 }
893
894 static void get_more_buffer_heads(void)
895 {
896 int i;
897 struct buffer_head * bh;
898
899 if (unused_list)
900 return;
901
902 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
903 return;
904
905 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
906 bh->b_next_free = unused_list;
907 unused_list = bh++;
908 }
909 }
910
911 static struct buffer_head * get_unused_buffer_head(void)
912 {
913 struct buffer_head * bh;
914
915 get_more_buffer_heads();
916 if (!unused_list)
917 return NULL;
918 bh = unused_list;
919 unused_list = bh->b_next_free;
920 bh->b_next_free = NULL;
921 bh->b_data = NULL;
922 bh->b_size = 0;
923 bh->b_req = 0;
924 return bh;
925 }
926
927
928
929
930
931
932
933 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
934 {
935 struct buffer_head *bh, *head;
936 unsigned long offset;
937
938 head = NULL;
939 offset = PAGE_SIZE;
940 while ((offset -= size) < PAGE_SIZE) {
941 bh = get_unused_buffer_head();
942 if (!bh)
943 goto no_grow;
944 bh->b_this_page = head;
945 head = bh;
946 bh->b_data = (char *) (page+offset);
947 bh->b_size = size;
948 bh->b_dev = 0xffff;
949 }
950 return head;
951
952
953
954 no_grow:
955 bh = head;
956 while (bh) {
957 head = bh;
958 bh = bh->b_this_page;
959 put_unused_buffer_head(head);
960 }
961 return NULL;
962 }
963
964 static void read_buffers(struct buffer_head * bh[], int nrbuf)
965 {
966 int i;
967 int bhnum = 0;
968 struct buffer_head * bhr[8];
969
970 for (i = 0 ; i < nrbuf ; i++) {
971 if (bh[i] && !bh[i]->b_uptodate)
972 bhr[bhnum++] = bh[i];
973 }
974 if (bhnum)
975 ll_rw_block(READ, bhnum, bhr);
976 for (i = 0 ; i < nrbuf ; i++) {
977 if (bh[i]) {
978 wait_on_buffer(bh[i]);
979 }
980 }
981 }
982
983 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
984 dev_t dev, int *b, int size)
985 {
986 struct buffer_head * bh[8];
987 unsigned long page;
988 unsigned long offset;
989 int block;
990 int nrbuf;
991
992 page = (unsigned long) first->b_data;
993 if (page & ~PAGE_MASK) {
994 brelse(first);
995 return 0;
996 }
997 mem_map[MAP_NR(page)]++;
998 bh[0] = first;
999 nrbuf = 1;
1000 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1001 block = *++b;
1002 if (!block)
1003 goto no_go;
1004 first = get_hash_table(dev, block, size);
1005 if (!first)
1006 goto no_go;
1007 bh[nrbuf++] = first;
1008 if (page+offset != (unsigned long) first->b_data)
1009 goto no_go;
1010 }
1011 read_buffers(bh,nrbuf);
1012 while (nrbuf-- > 0)
1013 brelse(bh[nrbuf]);
1014 free_page(address);
1015 ++current->mm->min_flt;
1016 return page;
1017 no_go:
1018 while (nrbuf-- > 0)
1019 brelse(bh[nrbuf]);
1020 free_page(page);
1021 return 0;
1022 }
1023
1024 static unsigned long try_to_load_aligned(unsigned long address,
1025 dev_t dev, int b[], int size)
1026 {
1027 struct buffer_head * bh, * tmp, * arr[8];
1028 unsigned long offset;
1029 int isize = BUFSIZE_INDEX(size);
1030 int * p;
1031 int block;
1032
1033 bh = create_buffers(address, size);
1034 if (!bh)
1035 return 0;
1036
1037 p = b;
1038 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1039 block = *(p++);
1040 if (!block)
1041 goto not_aligned;
1042 if (find_buffer(dev, block, size))
1043 goto not_aligned;
1044 }
1045 tmp = bh;
1046 p = b;
1047 block = 0;
1048 while (1) {
1049 arr[block++] = bh;
1050 bh->b_count = 1;
1051 bh->b_dirt = 0;
1052 bh->b_flushtime = 0;
1053 bh->b_uptodate = 0;
1054 bh->b_req = 0;
1055 bh->b_dev = dev;
1056 bh->b_blocknr = *(p++);
1057 bh->b_list = BUF_CLEAN;
1058 nr_buffers++;
1059 nr_buffers_size[isize]++;
1060 insert_into_queues(bh);
1061 if (bh->b_this_page)
1062 bh = bh->b_this_page;
1063 else
1064 break;
1065 }
1066 buffermem += PAGE_SIZE;
1067 bh->b_this_page = tmp;
1068 mem_map[MAP_NR(address)]++;
1069 buffer_pages[MAP_NR(address)] = bh;
1070 read_buffers(arr,block);
1071 while (block-- > 0)
1072 brelse(arr[block]);
1073 ++current->mm->maj_flt;
1074 return address;
1075 not_aligned:
1076 while ((tmp = bh) != NULL) {
1077 bh = bh->b_this_page;
1078 put_unused_buffer_head(tmp);
1079 }
1080 return 0;
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 static inline unsigned long try_to_share_buffers(unsigned long address,
1095 dev_t dev, int *b, int size)
1096 {
1097 struct buffer_head * bh;
1098 int block;
1099
1100 block = b[0];
1101 if (!block)
1102 return 0;
1103 bh = get_hash_table(dev, block, size);
1104 if (bh)
1105 return check_aligned(bh, address, dev, b, size);
1106 return try_to_load_aligned(address, dev, b, size);
1107 }
1108
1109 #define COPYBLK(size,from,to) \
1110 __asm__ __volatile__("rep ; movsl": \
1111 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1112 :"cx","di","si")
1113
1114
1115
1116
1117
1118
1119
1120
1121 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1122 {
1123 struct buffer_head * bh[8];
1124 unsigned long where;
1125 int i, j;
1126
1127 if (!no_share) {
1128 where = try_to_share_buffers(address, dev, b, size);
1129 if (where)
1130 return where;
1131 }
1132 ++current->mm->maj_flt;
1133 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1134 bh[i] = NULL;
1135 if (b[i])
1136 bh[i] = getblk(dev, b[i], size);
1137 }
1138 read_buffers(bh,i);
1139 where = address;
1140 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1141 if (bh[i]) {
1142 if (bh[i]->b_uptodate)
1143 COPYBLK(size, (unsigned long) bh[i]->b_data, where);
1144 brelse(bh[i]);
1145 }
1146 }
1147 return address;
1148 }
1149
1150
1151
1152
1153
1154 static int grow_buffers(int pri, int size)
1155 {
1156 unsigned long page;
1157 struct buffer_head *bh, *tmp;
1158 struct buffer_head * insert_point;
1159 int isize;
1160
1161 if ((size & 511) || (size > PAGE_SIZE)) {
1162 printk("VFS: grow_buffers: size = %d\n",size);
1163 return 0;
1164 }
1165
1166 isize = BUFSIZE_INDEX(size);
1167
1168 if (!(page = __get_free_page(pri)))
1169 return 0;
1170 bh = create_buffers(page, size);
1171 if (!bh) {
1172 free_page(page);
1173 return 0;
1174 }
1175
1176 insert_point = free_list[isize];
1177
1178 tmp = bh;
1179 while (1) {
1180 nr_free[isize]++;
1181 if (insert_point) {
1182 tmp->b_next_free = insert_point->b_next_free;
1183 tmp->b_prev_free = insert_point;
1184 insert_point->b_next_free->b_prev_free = tmp;
1185 insert_point->b_next_free = tmp;
1186 } else {
1187 tmp->b_prev_free = tmp;
1188 tmp->b_next_free = tmp;
1189 }
1190 insert_point = tmp;
1191 ++nr_buffers;
1192 if (tmp->b_this_page)
1193 tmp = tmp->b_this_page;
1194 else
1195 break;
1196 }
1197 free_list[isize] = bh;
1198 buffer_pages[MAP_NR(page)] = bh;
1199 tmp->b_this_page = bh;
1200 wake_up(&buffer_wait);
1201 buffermem += PAGE_SIZE;
1202 return 1;
1203 }
1204
1205
1206
1207
1208
1209 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1210 {
1211 unsigned long page;
1212 struct buffer_head * tmp, * p;
1213 int isize = BUFSIZE_INDEX(bh->b_size);
1214
1215 *bhp = bh;
1216 page = (unsigned long) bh->b_data;
1217 page &= PAGE_MASK;
1218 tmp = bh;
1219 do {
1220 if (!tmp)
1221 return 0;
1222 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1223 return 0;
1224 tmp = tmp->b_this_page;
1225 } while (tmp != bh);
1226 tmp = bh;
1227 do {
1228 p = tmp;
1229 tmp = tmp->b_this_page;
1230 nr_buffers--;
1231 nr_buffers_size[isize]--;
1232 if (p == *bhp)
1233 {
1234 *bhp = p->b_prev_free;
1235 if (p == *bhp)
1236 *bhp = NULL;
1237 }
1238 remove_from_queues(p);
1239 put_unused_buffer_head(p);
1240 } while (tmp != bh);
1241 buffermem -= PAGE_SIZE;
1242 buffer_pages[MAP_NR(page)] = NULL;
1243 free_page(page);
1244 return !mem_map[MAP_NR(page)];
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 static int maybe_shrink_lav_buffers(int size)
1261 {
1262 int nlist;
1263 int isize;
1264 int total_lav, total_n_buffers, n_sizes;
1265
1266
1267
1268
1269
1270
1271 total_lav = total_n_buffers = n_sizes = 0;
1272 for(nlist = 0; nlist < NR_SIZES; nlist++)
1273 {
1274 total_lav += buffers_lav[nlist];
1275 if(nr_buffers_size[nlist]) n_sizes++;
1276 total_n_buffers += nr_buffers_size[nlist];
1277 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1278 }
1279
1280
1281
1282
1283 isize = (size ? BUFSIZE_INDEX(size) : -1);
1284
1285 if (n_sizes > 1)
1286 for(nlist = 0; nlist < NR_SIZES; nlist++)
1287 {
1288 if(nlist == isize) continue;
1289 if(nr_buffers_size[nlist] &&
1290 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1291 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1292 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1293 return 1;
1294 }
1295 return 0;
1296 }
1297
1298
1299
1300
1301
1302
1303
1304 int shrink_buffers(unsigned int priority)
1305 {
1306 if (priority < 2) {
1307 sync_buffers(0,0);
1308 }
1309
1310 if(priority == 2) wakeup_bdflush(1);
1311
1312 if(maybe_shrink_lav_buffers(0)) return 1;
1313
1314
1315 return shrink_specific_buffers(priority, 0);
1316 }
1317
1318 static int shrink_specific_buffers(unsigned int priority, int size)
1319 {
1320 struct buffer_head *bh;
1321 int nlist;
1322 int i, isize, isize1;
1323
1324 #ifdef DEBUG
1325 if(size) printk("Shrinking buffers of size %d\n", size);
1326 #endif
1327
1328
1329 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1330
1331 for(isize = 0; isize<NR_SIZES; isize++){
1332 if(isize1 != -1 && isize1 != isize) continue;
1333 bh = free_list[isize];
1334 if(!bh) continue;
1335 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1336 if (bh->b_count || !bh->b_this_page)
1337 continue;
1338 if (try_to_free(bh, &bh))
1339 return 1;
1340 if(!bh) break;
1341
1342 }
1343 }
1344
1345
1346
1347 for(nlist = 0; nlist < NR_LIST; nlist++) {
1348 repeat1:
1349 if(priority > 3 && nlist == BUF_SHARED) continue;
1350 bh = lru_list[nlist];
1351 if(!bh) continue;
1352 i = nr_buffers_type[nlist] >> priority;
1353 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1354
1355 if(bh->b_list != nlist) goto repeat1;
1356 if (bh->b_count || !bh->b_this_page)
1357 continue;
1358 if(size && bh->b_size != size) continue;
1359 if (bh->b_lock)
1360 if (priority)
1361 continue;
1362 else
1363 wait_on_buffer(bh);
1364 if (bh->b_dirt) {
1365 bh->b_count++;
1366 bh->b_flushtime = 0;
1367 ll_rw_block(WRITEA, 1, &bh);
1368 bh->b_count--;
1369 continue;
1370 }
1371 if (try_to_free(bh, &bh))
1372 return 1;
1373 if(!bh) break;
1374 }
1375 }
1376 return 0;
1377 }
1378
1379
1380 void show_buffers(void)
1381 {
1382 struct buffer_head * bh;
1383 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1384 int shared;
1385 int nlist, isize;
1386
1387 printk("Buffer memory: %6dkB\n",buffermem>>10);
1388 printk("Buffer heads: %6d\n",nr_buffer_heads);
1389 printk("Buffer blocks: %6d\n",nr_buffers);
1390
1391 for(nlist = 0; nlist < NR_LIST; nlist++) {
1392 shared = found = locked = dirty = used = lastused = 0;
1393 bh = lru_list[nlist];
1394 if(!bh) continue;
1395 do {
1396 found++;
1397 if (bh->b_lock)
1398 locked++;
1399 if (bh->b_dirt)
1400 dirty++;
1401 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1402 if (bh->b_count)
1403 used++, lastused = found;
1404 bh = bh->b_next_free;
1405 } while (bh != lru_list[nlist]);
1406 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1407 nlist, found, used, lastused, locked, dirty, shared);
1408 };
1409 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1410 for(isize = 0; isize<NR_SIZES; isize++){
1411 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1412 buffers_lav[isize], nr_free[isize]);
1413 for(nlist = 0; nlist < NR_LIST; nlist++)
1414 printk("%7d ", nr_buffers_st[isize][nlist]);
1415 printk("\n");
1416 }
1417 }
1418
1419
1420
1421
1422
1423 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1424 dev_t dev, unsigned int starting_block)
1425 {
1426 unsigned long page;
1427 struct buffer_head * tmp, * p;
1428
1429 *bhp = bh;
1430 page = (unsigned long) bh->b_data;
1431 page &= PAGE_MASK;
1432 if(mem_map[MAP_NR(page)] != 1) return 0;
1433 tmp = bh;
1434 do {
1435 if (!tmp)
1436 return 0;
1437
1438 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1439 return 0;
1440 tmp = tmp->b_this_page;
1441 } while (tmp != bh);
1442 tmp = bh;
1443
1444 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1445 tmp = tmp->b_this_page;
1446
1447
1448 bh = tmp;
1449 do {
1450 p = tmp;
1451 tmp = tmp->b_this_page;
1452 remove_from_queues(p);
1453 p->b_dev=dev;
1454 p->b_uptodate = 0;
1455 p->b_req = 0;
1456 p->b_blocknr=starting_block++;
1457 insert_into_queues(p);
1458 } while (tmp != bh);
1459 return 1;
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 static int reassign_cluster(dev_t dev,
1477 unsigned int starting_block, int size)
1478 {
1479 struct buffer_head *bh;
1480 int isize = BUFSIZE_INDEX(size);
1481 int i;
1482
1483
1484
1485
1486
1487 while(nr_free[isize] < 32) refill_freelist(size);
1488
1489 bh = free_list[isize];
1490 if(bh)
1491 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1492 if (!bh->b_this_page) continue;
1493 if (try_to_reassign(bh, &bh, dev, starting_block))
1494 return 4;
1495 }
1496 return 0;
1497 }
1498
1499
1500
1501
1502
1503 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1504 {
1505 struct buffer_head * bh, * tmp, * arr[8];
1506 int isize = BUFSIZE_INDEX(size);
1507 unsigned long offset;
1508 unsigned long page;
1509 int nblock;
1510
1511 page = get_free_page(GFP_NOBUFFER);
1512 if(!page) return 0;
1513
1514 bh = create_buffers(page, size);
1515 if (!bh) {
1516 free_page(page);
1517 return 0;
1518 };
1519 nblock = block;
1520 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1521 if (find_buffer(dev, nblock++, size))
1522 goto not_aligned;
1523 }
1524 tmp = bh;
1525 nblock = 0;
1526 while (1) {
1527 arr[nblock++] = bh;
1528 bh->b_count = 1;
1529 bh->b_dirt = 0;
1530 bh->b_flushtime = 0;
1531 bh->b_lock = 0;
1532 bh->b_uptodate = 0;
1533 bh->b_req = 0;
1534 bh->b_dev = dev;
1535 bh->b_list = BUF_CLEAN;
1536 bh->b_blocknr = block++;
1537 nr_buffers++;
1538 nr_buffers_size[isize]++;
1539 insert_into_queues(bh);
1540 if (bh->b_this_page)
1541 bh = bh->b_this_page;
1542 else
1543 break;
1544 }
1545 buffermem += PAGE_SIZE;
1546 buffer_pages[MAP_NR(page)] = bh;
1547 bh->b_this_page = tmp;
1548 while (nblock-- > 0)
1549 brelse(arr[nblock]);
1550 return 4;
1551 not_aligned:
1552 while ((tmp = bh) != NULL) {
1553 bh = bh->b_this_page;
1554 put_unused_buffer_head(tmp);
1555 }
1556 free_page(page);
1557 return 0;
1558 }
1559
1560 unsigned long generate_cluster(dev_t dev, int b[], int size)
1561 {
1562 int i, offset;
1563
1564 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1565 if(i && b[i]-1 != b[i-1]) return 0;
1566 if(find_buffer(dev, b[i], size)) return 0;
1567 };
1568
1569
1570
1571
1572
1573 if(maybe_shrink_lav_buffers(size))
1574 {
1575 int retval;
1576 retval = try_to_generate_cluster(dev, b[0], size);
1577 if(retval) return retval;
1578 };
1579
1580 if (nr_free_pages > min_free_pages)
1581 return try_to_generate_cluster(dev, b[0], size);
1582 else
1583 return reassign_cluster(dev, b[0], size);
1584 }
1585
1586
1587
1588
1589
1590
1591
1592
1593 void buffer_init(void)
1594 {
1595 int i;
1596 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1597
1598 if (high_memory >= 4*1024*1024) {
1599 min_free_pages = 200;
1600 if(high_memory >= 16*1024*1024)
1601 nr_hash = 16381;
1602 else
1603 nr_hash = 4093;
1604 } else {
1605 min_free_pages = 20;
1606 nr_hash = 997;
1607 };
1608
1609 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1610 sizeof(struct buffer_head *));
1611
1612
1613 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1614 sizeof(struct buffer_head *));
1615 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1616 buffer_pages[i] = NULL;
1617
1618 for (i = 0 ; i < nr_hash ; i++)
1619 hash_table[i] = NULL;
1620 lru_list[BUF_CLEAN] = 0;
1621 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1622 if (!free_list[isize])
1623 panic("VFS: Unable to initialize buffer free list!");
1624 return;
1625 }
1626
1627
1628
1629
1630
1631
1632
1633
1634 struct wait_queue * bdflush_wait = NULL;
1635 struct wait_queue * bdflush_done = NULL;
1636
1637 static int bdflush_running = 0;
1638
1639 static void wakeup_bdflush(int wait)
1640 {
1641 if(!bdflush_running){
1642 printk("Warning - bdflush not running\n");
1643 sync_buffers(0,0);
1644 return;
1645 };
1646 wake_up(&bdflush_wait);
1647 if(wait) sleep_on(&bdflush_done);
1648 }
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 asmlinkage int sync_old_buffers(void)
1661 {
1662 int i, isize;
1663 int ndirty, nwritten;
1664 int nlist;
1665 int ncount;
1666 struct buffer_head * bh, *next;
1667
1668 sync_supers(0);
1669 sync_inodes(0);
1670
1671 ncount = 0;
1672 #ifdef DEBUG
1673 for(nlist = 0; nlist < NR_LIST; nlist++)
1674 #else
1675 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1676 #endif
1677 {
1678 ndirty = 0;
1679 nwritten = 0;
1680 repeat:
1681 bh = lru_list[nlist];
1682 if(bh)
1683 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1684
1685 if(bh->b_list != nlist) goto repeat;
1686 next = bh->b_next_free;
1687 if(!lru_list[nlist]) {
1688 printk("Dirty list empty %d\n", i);
1689 break;
1690 }
1691
1692
1693 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1694 {
1695 refile_buffer(bh);
1696 continue;
1697 }
1698
1699 if (bh->b_lock || !bh->b_dirt)
1700 continue;
1701 ndirty++;
1702 if(bh->b_flushtime > jiffies) continue;
1703 nwritten++;
1704 bh->b_count++;
1705 bh->b_flushtime = 0;
1706 #ifdef DEBUG
1707 if(nlist != BUF_DIRTY) ncount++;
1708 #endif
1709 ll_rw_block(WRITE, 1, &bh);
1710 bh->b_count--;
1711 }
1712 }
1713 #ifdef DEBUG
1714 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1715 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1716 #endif
1717
1718
1719
1720
1721 for(isize = 0; isize<NR_SIZES; isize++){
1722 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1723 buffer_usage[isize] = 0;
1724 };
1725 return 0;
1726 }
1727
1728
1729
1730
1731
1732
1733
1734
1735 asmlinkage int sys_bdflush(int func, int data)
1736 {
1737 int i, error;
1738 int ndirty;
1739 int nlist;
1740 int ncount;
1741 struct buffer_head * bh, *next;
1742
1743 if (!suser())
1744 return -EPERM;
1745
1746 if (func == 1)
1747 return sync_old_buffers();
1748
1749
1750 if (func >= 2) {
1751 i = (func-2) >> 1;
1752 if (i < 0 || i >= N_PARAM)
1753 return -EINVAL;
1754 if((func & 1) == 0) {
1755 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1756 if (error)
1757 return error;
1758 put_fs_long(bdf_prm.data[i], data);
1759 return 0;
1760 };
1761 if (data < bdflush_min[i] || data > bdflush_max[i])
1762 return -EINVAL;
1763 bdf_prm.data[i] = data;
1764 return 0;
1765 };
1766
1767 if (bdflush_running)
1768 return -EBUSY;
1769 bdflush_running++;
1770
1771
1772
1773 for (;;) {
1774 #ifdef DEBUG
1775 printk("bdflush() activated...");
1776 #endif
1777
1778 ncount = 0;
1779 #ifdef DEBUG
1780 for(nlist = 0; nlist < NR_LIST; nlist++)
1781 #else
1782 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1783 #endif
1784 {
1785 ndirty = 0;
1786 repeat:
1787 bh = lru_list[nlist];
1788 if(bh)
1789 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1790 bh = next) {
1791
1792 if(bh->b_list != nlist) goto repeat;
1793 next = bh->b_next_free;
1794 if(!lru_list[nlist]) {
1795 printk("Dirty list empty %d\n", i);
1796 break;
1797 }
1798
1799
1800 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1801 {
1802 refile_buffer(bh);
1803 continue;
1804 }
1805
1806 if (bh->b_lock || !bh->b_dirt)
1807 continue;
1808
1809
1810 bh->b_count++;
1811 ndirty++;
1812 bh->b_flushtime = 0;
1813 ll_rw_block(WRITE, 1, &bh);
1814 #ifdef DEBUG
1815 if(nlist != BUF_DIRTY) ncount++;
1816 #endif
1817 bh->b_count--;
1818 }
1819 }
1820 #ifdef DEBUG
1821 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1822 printk("sleeping again.\n");
1823 #endif
1824 wake_up(&bdflush_done);
1825
1826
1827
1828
1829 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1830 bdf_prm.b_un.nfract/100) {
1831 if (current->signal & (1 << (SIGKILL-1))) {
1832 bdflush_running--;
1833 return 0;
1834 }
1835 current->signal = 0;
1836 interruptible_sleep_on(&bdflush_wait);
1837 }
1838 }
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857