This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/major.h>
23 #include <linux/string.h>
24 #include <linux/locks.h>
25 #include <linux/errno.h>
26 #include <linux/malloc.h>
27
28 #include <asm/system.h>
29 #include <asm/segment.h>
30 #include <asm/io.h>
31
32 #define NR_SIZES 4
33 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
34 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
35
36 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 extern int *blksize_size[];
60
61
62 static void wakeup_bdflush(int);
63
64 #define N_PARAM 9
65 #define LAV
66
67 static union bdflush_param{
68 struct {
69 int nfract;
70
71 int ndirty;
72
73 int nrefill;
74
75 int nref_dirt;
76
77 int clu_nfract;
78
79 int age_buffer;
80
81 int age_super;
82
83 int lav_const;
84
85 int lav_ratio;
86
87
88 } b_un;
89 unsigned int data[N_PARAM];
90 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
91
92
93
94
95
96
97
98 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
100
101
102
103
104
105
106
107
108
109
110 void __wait_on_buffer(struct buffer_head * bh)
111 {
112 struct wait_queue wait = { current, NULL };
113
114 bh->b_count++;
115 add_wait_queue(&bh->b_wait, &wait);
116 repeat:
117 current->state = TASK_UNINTERRUPTIBLE;
118 if (bh->b_lock) {
119 schedule();
120 goto repeat;
121 }
122 remove_wait_queue(&bh->b_wait, &wait);
123 bh->b_count--;
124 current->state = TASK_RUNNING;
125 }
126
127
128
129
130
131
132
133
134
135
136
137 static int sync_buffers(dev_t dev, int wait)
138 {
139 int i, retry, pass = 0, err = 0;
140 int nlist, ncount;
141 struct buffer_head * bh, *next;
142
143
144
145
146
147 repeat:
148 retry = 0;
149 repeat2:
150 ncount = 0;
151
152
153 for(nlist = 0; nlist < NR_LIST; nlist++)
154 {
155 repeat1:
156 bh = lru_list[nlist];
157 if(!bh) continue;
158 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
159 if(bh->b_list != nlist) goto repeat1;
160 next = bh->b_next_free;
161 if(!lru_list[nlist]) break;
162 if (dev && bh->b_dev != dev)
163 continue;
164 if (bh->b_lock)
165 {
166
167
168 if (!wait || !pass) {
169 retry = 1;
170 continue;
171 }
172 wait_on_buffer (bh);
173 goto repeat2;
174 }
175
176
177 if (wait && bh->b_req && !bh->b_lock &&
178 !bh->b_dirt && !bh->b_uptodate) {
179 err = 1;
180 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
181 continue;
182 }
183
184
185 if (!bh->b_dirt || pass>=2)
186 continue;
187
188 if (bh->b_lock)
189 continue;
190 bh->b_count++;
191 bh->b_flushtime = 0;
192 ll_rw_block(WRITE, 1, &bh);
193
194 if(nlist != BUF_DIRTY) {
195 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
196 ncount++;
197 };
198 bh->b_count--;
199 retry = 1;
200 }
201 }
202 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
203
204
205
206
207
208 if (wait && retry && ++pass<=2)
209 goto repeat;
210 return err;
211 }
212
213 void sync_dev(dev_t dev)
214 {
215 sync_buffers(dev, 0);
216 sync_supers(dev);
217 sync_inodes(dev);
218 sync_buffers(dev, 0);
219 }
220
221 int fsync_dev(dev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 return sync_buffers(dev, 1);
227 }
228
229 asmlinkage int sys_sync(void)
230 {
231 sync_dev(0);
232 return 0;
233 }
234
235 int file_fsync (struct inode *inode, struct file *filp)
236 {
237 return fsync_dev(inode->i_dev);
238 }
239
240 asmlinkage int sys_fsync(unsigned int fd)
241 {
242 struct file * file;
243 struct inode * inode;
244
245 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
246 return -EBADF;
247 if (!file->f_op || !file->f_op->fsync)
248 return -EINVAL;
249 if (file->f_op->fsync(inode,file))
250 return -EIO;
251 return 0;
252 }
253
254 void invalidate_buffers(dev_t dev)
255 {
256 int i;
257 int nlist;
258 struct buffer_head * bh;
259
260 for(nlist = 0; nlist < NR_LIST; nlist++) {
261 bh = lru_list[nlist];
262 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
263 bh = bh->b_next_free) {
264 if (bh->b_dev != dev)
265 continue;
266 wait_on_buffer(bh);
267 if (bh->b_dev == dev)
268 bh->b_flushtime = bh->b_uptodate =
269 bh->b_dirt = bh->b_req = 0;
270 }
271 }
272 }
273
274 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
275 #define hash(dev,block) hash_table[_hashfn(dev,block)]
276
277 static inline void remove_from_hash_queue(struct buffer_head * bh)
278 {
279 if (bh->b_next)
280 bh->b_next->b_prev = bh->b_prev;
281 if (bh->b_prev)
282 bh->b_prev->b_next = bh->b_next;
283 if (hash(bh->b_dev,bh->b_blocknr) == bh)
284 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
285 bh->b_next = bh->b_prev = NULL;
286 }
287
288 static inline void remove_from_lru_list(struct buffer_head * bh)
289 {
290 if (!(bh->b_prev_free) || !(bh->b_next_free))
291 panic("VFS: LRU block list corrupted");
292 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
293 bh->b_prev_free->b_next_free = bh->b_next_free;
294 bh->b_next_free->b_prev_free = bh->b_prev_free;
295
296 if (lru_list[bh->b_list] == bh)
297 lru_list[bh->b_list] = bh->b_next_free;
298 if(lru_list[bh->b_list] == bh)
299 lru_list[bh->b_list] = NULL;
300 bh->b_next_free = bh->b_prev_free = NULL;
301 }
302
303 static inline void remove_from_free_list(struct buffer_head * bh)
304 {
305 int isize = BUFSIZE_INDEX(bh->b_size);
306 if (!(bh->b_prev_free) || !(bh->b_next_free))
307 panic("VFS: Free block list corrupted");
308 if(bh->b_dev != 0xffff) panic("Free list corrupted");
309 if(!free_list[isize])
310 panic("Free list empty");
311 nr_free[isize]--;
312 if(bh->b_next_free == bh)
313 free_list[isize] = NULL;
314 else {
315 bh->b_prev_free->b_next_free = bh->b_next_free;
316 bh->b_next_free->b_prev_free = bh->b_prev_free;
317 if (free_list[isize] == bh)
318 free_list[isize] = bh->b_next_free;
319 };
320 bh->b_next_free = bh->b_prev_free = NULL;
321 }
322
323 static inline void remove_from_queues(struct buffer_head * bh)
324 {
325 if(bh->b_dev == 0xffff) {
326 remove_from_free_list(bh);
327
328 return;
329 };
330 nr_buffers_type[bh->b_list]--;
331 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
332 remove_from_hash_queue(bh);
333 remove_from_lru_list(bh);
334 }
335
336 static inline void put_last_lru(struct buffer_head * bh)
337 {
338 if (!bh)
339 return;
340 if (bh == lru_list[bh->b_list]) {
341 lru_list[bh->b_list] = bh->b_next_free;
342 return;
343 }
344 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
345 remove_from_lru_list(bh);
346
347
348 if(!lru_list[bh->b_list]) {
349 lru_list[bh->b_list] = bh;
350 lru_list[bh->b_list]->b_prev_free = bh;
351 };
352
353 bh->b_next_free = lru_list[bh->b_list];
354 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
355 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
356 lru_list[bh->b_list]->b_prev_free = bh;
357 }
358
359 static inline void put_last_free(struct buffer_head * bh)
360 {
361 int isize;
362 if (!bh)
363 return;
364
365 isize = BUFSIZE_INDEX(bh->b_size);
366 bh->b_dev = 0xffff;
367
368
369 if(!free_list[isize]) {
370 free_list[isize] = bh;
371 bh->b_prev_free = bh;
372 };
373
374 nr_free[isize]++;
375 bh->b_next_free = free_list[isize];
376 bh->b_prev_free = free_list[isize]->b_prev_free;
377 free_list[isize]->b_prev_free->b_next_free = bh;
378 free_list[isize]->b_prev_free = bh;
379 }
380
381 static inline void insert_into_queues(struct buffer_head * bh)
382 {
383
384
385 if(bh->b_dev == 0xffff) {
386 put_last_free(bh);
387 return;
388 };
389 if(!lru_list[bh->b_list]) {
390 lru_list[bh->b_list] = bh;
391 bh->b_prev_free = bh;
392 };
393 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
394 bh->b_next_free = lru_list[bh->b_list];
395 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
396 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
397 lru_list[bh->b_list]->b_prev_free = bh;
398 nr_buffers_type[bh->b_list]++;
399 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
400
401 bh->b_prev = NULL;
402 bh->b_next = NULL;
403 if (!bh->b_dev)
404 return;
405 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
406 hash(bh->b_dev,bh->b_blocknr) = bh;
407 if (bh->b_next)
408 bh->b_next->b_prev = bh;
409 }
410
411 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
412 {
413 struct buffer_head * tmp;
414
415 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
416 if (tmp->b_dev==dev && tmp->b_blocknr==block)
417 if (tmp->b_size == size)
418 return tmp;
419 else {
420 printk("VFS: Wrong blocksize on device %d/%d\n",
421 MAJOR(dev), MINOR(dev));
422 return NULL;
423 }
424 return NULL;
425 }
426
427
428
429
430
431
432
433
434 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
435 {
436 struct buffer_head * bh;
437
438 for (;;) {
439 if (!(bh=find_buffer(dev,block,size)))
440 return NULL;
441 bh->b_count++;
442 wait_on_buffer(bh);
443 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
444 return bh;
445 bh->b_count--;
446 }
447 }
448
449 void set_blocksize(dev_t dev, int size)
450 {
451 int i, nlist;
452 struct buffer_head * bh, *bhnext;
453
454 if (!blksize_size[MAJOR(dev)])
455 return;
456
457 switch(size) {
458 default: panic("Invalid blocksize passed to set_blocksize");
459 case 512: case 1024: case 2048: case 4096:;
460 }
461
462 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
463 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
464 return;
465 }
466 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
467 return;
468 sync_buffers(dev, 2);
469 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
470
471
472
473
474 for(nlist = 0; nlist < NR_LIST; nlist++) {
475 bh = lru_list[nlist];
476 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
477 if(!bh) break;
478 bhnext = bh->b_next_free;
479 if (bh->b_dev != dev)
480 continue;
481 if (bh->b_size == size)
482 continue;
483
484 wait_on_buffer(bh);
485 if (bh->b_dev == dev && bh->b_size != size) {
486 bh->b_uptodate = bh->b_dirt = bh->b_req =
487 bh->b_flushtime = 0;
488 };
489 remove_from_hash_queue(bh);
490 }
491 }
492 }
493
494 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
495
496 void refill_freelist(int size)
497 {
498 struct buffer_head * bh, * tmp;
499 struct buffer_head * candidate[NR_LIST];
500 unsigned int best_time, winner;
501 int isize = BUFSIZE_INDEX(size);
502 int buffers[NR_LIST];
503 int i;
504 int needed;
505
506
507
508
509
510 if (nr_free[isize] > 100)
511 return;
512
513
514
515
516
517
518 needed =bdf_prm.b_un.nrefill * size;
519
520 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
521 grow_buffers(GFP_BUFFER, size)) {
522 needed -= PAGE_SIZE;
523 }
524
525 if(needed <= 0) return;
526
527
528
529
530 while(maybe_shrink_lav_buffers(size))
531 {
532 if(!grow_buffers(GFP_BUFFER, size)) break;
533 needed -= PAGE_SIZE;
534 if(needed <= 0) return;
535 };
536
537
538
539
540
541
542
543 repeat0:
544 for(i=0; i<NR_LIST; i++){
545 if(i == BUF_DIRTY || i == BUF_SHARED ||
546 nr_buffers_type[i] == 0) {
547 candidate[i] = NULL;
548 buffers[i] = 0;
549 continue;
550 }
551 buffers[i] = nr_buffers_type[i];
552 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
553 {
554 if(buffers[i] < 0) panic("Here is the problem");
555 tmp = bh->b_next_free;
556 if (!bh) break;
557
558 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
559 bh->b_dirt) {
560 refile_buffer(bh);
561 continue;
562 };
563
564 if (bh->b_count || bh->b_size != size)
565 continue;
566
567
568
569
570
571 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
572 buffers[i] = 0;
573 break;
574 }
575
576 if (BADNESS(bh)) continue;
577 break;
578 };
579 if(!buffers[i]) candidate[i] = NULL;
580 else candidate[i] = bh;
581 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
582 }
583
584 repeat:
585 if(needed <= 0) return;
586
587
588
589 winner = best_time = UINT_MAX;
590 for(i=0; i<NR_LIST; i++){
591 if(!candidate[i]) continue;
592 if(candidate[i]->b_lru_time < best_time){
593 best_time = candidate[i]->b_lru_time;
594 winner = i;
595 }
596 }
597
598
599 if(winner != UINT_MAX) {
600 i = winner;
601 bh = candidate[i];
602 candidate[i] = bh->b_next_free;
603 if(candidate[i] == bh) candidate[i] = NULL;
604 if (bh->b_count || bh->b_size != size)
605 panic("Busy buffer in candidate list\n");
606 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
607 panic("Shared buffer in candidate list\n");
608 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
609
610 if(bh->b_dev == 0xffff) panic("Wrong list");
611 remove_from_queues(bh);
612 bh->b_dev = 0xffff;
613 put_last_free(bh);
614 needed -= bh->b_size;
615 buffers[i]--;
616 if(buffers[i] < 0) panic("Here is the problem");
617
618 if(buffers[i] == 0) candidate[i] = NULL;
619
620
621
622 if(candidate[i] && buffers[i] > 0){
623 if(buffers[i] <= 0) panic("Here is another problem");
624 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
625 if(buffers[i] < 0) panic("Here is the problem");
626 tmp = bh->b_next_free;
627 if (!bh) break;
628
629 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
630 bh->b_dirt) {
631 refile_buffer(bh);
632 continue;
633 };
634
635 if (bh->b_count || bh->b_size != size)
636 continue;
637
638
639
640
641
642 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
643 buffers[i] = 0;
644 break;
645 }
646
647 if (BADNESS(bh)) continue;
648 break;
649 };
650 if(!buffers[i]) candidate[i] = NULL;
651 else candidate[i] = bh;
652 if(candidate[i] && candidate[i]->b_count)
653 panic("Here is the problem");
654 }
655
656 goto repeat;
657 }
658
659 if(needed <= 0) return;
660
661
662
663 if (nr_free_pages > 5) {
664 if (grow_buffers(GFP_BUFFER, size)) {
665 needed -= PAGE_SIZE;
666 goto repeat0;
667 };
668 }
669
670
671 if (!grow_buffers(GFP_ATOMIC, size))
672 wakeup_bdflush(1);
673 needed -= PAGE_SIZE;
674 goto repeat0;
675 }
676
677
678
679
680
681
682
683
684
685
686
687 struct buffer_head * getblk(dev_t dev, int block, int size)
688 {
689 struct buffer_head * bh;
690 int isize = BUFSIZE_INDEX(size);
691
692
693 buffer_usage[isize]++;
694
695
696
697
698 repeat:
699 bh = get_hash_table(dev, block, size);
700 if (bh) {
701 if (bh->b_uptodate && !bh->b_dirt)
702 put_last_lru(bh);
703 if(!bh->b_dirt) bh->b_flushtime = 0;
704 return bh;
705 }
706
707 while(!free_list[isize]) refill_freelist(size);
708
709 if (find_buffer(dev,block,size))
710 goto repeat;
711
712 bh = free_list[isize];
713 remove_from_free_list(bh);
714
715
716
717 bh->b_count=1;
718 bh->b_dirt=0;
719 bh->b_lock=0;
720 bh->b_uptodate=0;
721 bh->b_flushtime = 0;
722 bh->b_req=0;
723 bh->b_dev=dev;
724 bh->b_blocknr=block;
725 insert_into_queues(bh);
726 return bh;
727 }
728
729 void set_writetime(struct buffer_head * buf, int flag)
730 {
731 int newtime;
732
733 if (buf->b_dirt){
734
735 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
736 bdf_prm.b_un.age_buffer);
737 if(!buf->b_flushtime || buf->b_flushtime > newtime)
738 buf->b_flushtime = newtime;
739 } else {
740 buf->b_flushtime = 0;
741 }
742 }
743
744
745 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
746 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
747
748 void refile_buffer(struct buffer_head * buf){
749 int i, dispose;
750 i = 0;
751 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
752 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
753 if(buf->b_lock) i |= 2;
754 if(buf->b_dirt) i |= 4;
755 dispose = buffer_disposition[i];
756 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
757 dispose = BUF_UNSHARED;
758 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
759 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
760 if(dispose != buf->b_list) {
761 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
762 buf->b_lru_time = jiffies;
763 if(dispose == BUF_LOCKED &&
764 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
765 dispose = BUF_LOCKED1;
766 remove_from_queues(buf);
767 buf->b_list = dispose;
768 insert_into_queues(buf);
769 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
770 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
771 bdf_prm.b_un.nfract/100)
772 wakeup_bdflush(0);
773 }
774 }
775
776 void brelse(struct buffer_head * buf)
777 {
778 if (!buf)
779 return;
780 wait_on_buffer(buf);
781
782
783 set_writetime(buf, 0);
784 refile_buffer(buf);
785
786 if (buf->b_count) {
787 if (--buf->b_count)
788 return;
789 wake_up(&buffer_wait);
790 return;
791 }
792 printk("VFS: brelse: Trying to free free buffer\n");
793 }
794
795
796
797
798
799 struct buffer_head * bread(dev_t dev, int block, int size)
800 {
801 struct buffer_head * bh;
802
803 if (!(bh = getblk(dev, block, size))) {
804 printk("VFS: bread: READ error on device %d/%d\n",
805 MAJOR(dev), MINOR(dev));
806 return NULL;
807 }
808 if (bh->b_uptodate)
809 return bh;
810 ll_rw_block(READ, 1, &bh);
811 wait_on_buffer(bh);
812 if (bh->b_uptodate)
813 return bh;
814 brelse(bh);
815 return NULL;
816 }
817
818
819
820
821
822
823
824 #define NBUF 16
825
826 struct buffer_head * breada(dev_t dev, int block, int bufsize,
827 unsigned int pos, unsigned int filesize)
828 {
829 struct buffer_head * bhlist[NBUF];
830 unsigned int blocks;
831 struct buffer_head * bh;
832 int index;
833 int i, j;
834
835 if (pos >= filesize)
836 return NULL;
837
838 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
839 return NULL;
840
841 index = BUFSIZE_INDEX(bh->b_size);
842
843 if (bh->b_uptodate)
844 return bh;
845
846 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
847
848 if (blocks > (read_ahead[MAJOR(dev)] >> index))
849 blocks = read_ahead[MAJOR(dev)] >> index;
850 if (blocks > NBUF)
851 blocks = NBUF;
852
853 bhlist[0] = bh;
854 j = 1;
855 for(i=1; i<blocks; i++) {
856 bh = getblk(dev,block+i,bufsize);
857 if (bh->b_uptodate) {
858 brelse(bh);
859 break;
860 }
861 bhlist[j++] = bh;
862 }
863
864
865 ll_rw_block(READ, j, bhlist);
866
867 for(i=1; i<j; i++)
868 brelse(bhlist[i]);
869
870
871 bh = bhlist[0];
872 wait_on_buffer(bh);
873 if (bh->b_uptodate)
874 return bh;
875 brelse(bh);
876 return NULL;
877 }
878
879
880
881
882 static void put_unused_buffer_head(struct buffer_head * bh)
883 {
884 struct wait_queue * wait;
885
886 wait = ((volatile struct buffer_head *) bh)->b_wait;
887 memset(bh,0,sizeof(*bh));
888 ((volatile struct buffer_head *) bh)->b_wait = wait;
889 bh->b_next_free = unused_list;
890 unused_list = bh;
891 }
892
893 static void get_more_buffer_heads(void)
894 {
895 int i;
896 struct buffer_head * bh;
897
898 if (unused_list)
899 return;
900
901 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
902 return;
903
904 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
905 bh->b_next_free = unused_list;
906 unused_list = bh++;
907 }
908 }
909
910 static struct buffer_head * get_unused_buffer_head(void)
911 {
912 struct buffer_head * bh;
913
914 get_more_buffer_heads();
915 if (!unused_list)
916 return NULL;
917 bh = unused_list;
918 unused_list = bh->b_next_free;
919 bh->b_next_free = NULL;
920 bh->b_data = NULL;
921 bh->b_size = 0;
922 bh->b_req = 0;
923 return bh;
924 }
925
926
927
928
929
930
931
932 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
933 {
934 struct buffer_head *bh, *head;
935 unsigned long offset;
936
937 head = NULL;
938 offset = PAGE_SIZE;
939 while ((offset -= size) < PAGE_SIZE) {
940 bh = get_unused_buffer_head();
941 if (!bh)
942 goto no_grow;
943 bh->b_this_page = head;
944 head = bh;
945 bh->b_data = (char *) (page+offset);
946 bh->b_size = size;
947 bh->b_dev = 0xffff;
948 }
949 return head;
950
951
952
953 no_grow:
954 bh = head;
955 while (bh) {
956 head = bh;
957 bh = bh->b_this_page;
958 put_unused_buffer_head(head);
959 }
960 return NULL;
961 }
962
963 static void read_buffers(struct buffer_head * bh[], int nrbuf)
964 {
965 int i;
966 int bhnum = 0;
967 struct buffer_head * bhr[8];
968
969 for (i = 0 ; i < nrbuf ; i++) {
970 if (bh[i] && !bh[i]->b_uptodate)
971 bhr[bhnum++] = bh[i];
972 }
973 if (bhnum)
974 ll_rw_block(READ, bhnum, bhr);
975 for (i = 0 ; i < nrbuf ; i++) {
976 if (bh[i]) {
977 wait_on_buffer(bh[i]);
978 }
979 }
980 }
981
982
983
984
985
986
987
988
989
990 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
991 unsigned long address)
992 {
993 while (nrbuf-- > 0)
994 brelse(bh[nrbuf]);
995 return 0;
996 }
997
998 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
999 dev_t dev, int *b, int size)
1000 {
1001 struct buffer_head * bh[8];
1002 unsigned long page;
1003 unsigned long offset;
1004 int block;
1005 int nrbuf;
1006 int aligned = 1;
1007
1008 bh[0] = first;
1009 nrbuf = 1;
1010 page = (unsigned long) first->b_data;
1011 if (page & ~PAGE_MASK)
1012 aligned = 0;
1013 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1014 block = *++b;
1015 if (!block)
1016 goto no_go;
1017 first = get_hash_table(dev, block, size);
1018 if (!first)
1019 goto no_go;
1020 bh[nrbuf++] = first;
1021 if (page+offset != (unsigned long) first->b_data)
1022 aligned = 0;
1023 }
1024 if (!aligned)
1025 return try_to_align(bh, nrbuf, address);
1026 mem_map[MAP_NR(page)]++;
1027 read_buffers(bh,nrbuf);
1028 while (nrbuf-- > 0)
1029 brelse(bh[nrbuf]);
1030 free_page(address);
1031 ++current->mm->min_flt;
1032 return page;
1033 no_go:
1034 while (nrbuf-- > 0)
1035 brelse(bh[nrbuf]);
1036 return 0;
1037 }
1038
1039 static unsigned long try_to_load_aligned(unsigned long address,
1040 dev_t dev, int b[], int size)
1041 {
1042 struct buffer_head * bh, * tmp, * arr[8];
1043 unsigned long offset;
1044 int isize = BUFSIZE_INDEX(size);
1045 int * p;
1046 int block;
1047
1048 bh = create_buffers(address, size);
1049 if (!bh)
1050 return 0;
1051
1052 p = b;
1053 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1054 block = *(p++);
1055 if (!block)
1056 goto not_aligned;
1057 if (find_buffer(dev, block, size))
1058 goto not_aligned;
1059 }
1060 tmp = bh;
1061 p = b;
1062 block = 0;
1063 while (1) {
1064 arr[block++] = bh;
1065 bh->b_count = 1;
1066 bh->b_dirt = 0;
1067 bh->b_flushtime = 0;
1068 bh->b_uptodate = 0;
1069 bh->b_req = 0;
1070 bh->b_dev = dev;
1071 bh->b_blocknr = *(p++);
1072 bh->b_list = BUF_CLEAN;
1073 nr_buffers++;
1074 nr_buffers_size[isize]++;
1075 insert_into_queues(bh);
1076 if (bh->b_this_page)
1077 bh = bh->b_this_page;
1078 else
1079 break;
1080 }
1081 buffermem += PAGE_SIZE;
1082 bh->b_this_page = tmp;
1083 mem_map[MAP_NR(address)]++;
1084 buffer_pages[MAP_NR(address)] = bh;
1085 read_buffers(arr,block);
1086 while (block-- > 0)
1087 brelse(arr[block]);
1088 ++current->mm->maj_flt;
1089 return address;
1090 not_aligned:
1091 while ((tmp = bh) != NULL) {
1092 bh = bh->b_this_page;
1093 put_unused_buffer_head(tmp);
1094 }
1095 return 0;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 static inline unsigned long try_to_share_buffers(unsigned long address,
1110 dev_t dev, int *b, int size)
1111 {
1112 struct buffer_head * bh;
1113 int block;
1114
1115 block = b[0];
1116 if (!block)
1117 return 0;
1118 bh = get_hash_table(dev, block, size);
1119 if (bh)
1120 return check_aligned(bh, address, dev, b, size);
1121 return try_to_load_aligned(address, dev, b, size);
1122 }
1123
1124
1125
1126
1127
1128
1129
1130
1131 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1132 {
1133 struct buffer_head * bh[8];
1134 unsigned long where;
1135 int i, j;
1136
1137 if (!no_share) {
1138 where = try_to_share_buffers(address, dev, b, size);
1139 if (where)
1140 return where;
1141 }
1142 ++current->mm->maj_flt;
1143 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1144 bh[i] = NULL;
1145 if (b[i])
1146 bh[i] = getblk(dev, b[i], size);
1147 }
1148 read_buffers(bh,i);
1149 where = address;
1150 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1151 if (bh[i]) {
1152 if (bh[i]->b_uptodate)
1153 memcpy((void *) where, bh[i]->b_data, size);
1154 brelse(bh[i]);
1155 }
1156 }
1157 return address;
1158 }
1159
1160
1161
1162
1163
1164 static int grow_buffers(int pri, int size)
1165 {
1166 unsigned long page;
1167 struct buffer_head *bh, *tmp;
1168 struct buffer_head * insert_point;
1169 int isize;
1170
1171 if ((size & 511) || (size > PAGE_SIZE)) {
1172 printk("VFS: grow_buffers: size = %d\n",size);
1173 return 0;
1174 }
1175
1176 isize = BUFSIZE_INDEX(size);
1177
1178 if (!(page = __get_free_page(pri)))
1179 return 0;
1180 bh = create_buffers(page, size);
1181 if (!bh) {
1182 free_page(page);
1183 return 0;
1184 }
1185
1186 insert_point = free_list[isize];
1187
1188 tmp = bh;
1189 while (1) {
1190 nr_free[isize]++;
1191 if (insert_point) {
1192 tmp->b_next_free = insert_point->b_next_free;
1193 tmp->b_prev_free = insert_point;
1194 insert_point->b_next_free->b_prev_free = tmp;
1195 insert_point->b_next_free = tmp;
1196 } else {
1197 tmp->b_prev_free = tmp;
1198 tmp->b_next_free = tmp;
1199 }
1200 insert_point = tmp;
1201 ++nr_buffers;
1202 if (tmp->b_this_page)
1203 tmp = tmp->b_this_page;
1204 else
1205 break;
1206 }
1207 free_list[isize] = bh;
1208 buffer_pages[MAP_NR(page)] = bh;
1209 tmp->b_this_page = bh;
1210 wake_up(&buffer_wait);
1211 buffermem += PAGE_SIZE;
1212 return 1;
1213 }
1214
1215
1216
1217
1218
1219 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1220 {
1221 unsigned long page;
1222 struct buffer_head * tmp, * p;
1223 int isize = BUFSIZE_INDEX(bh->b_size);
1224
1225 *bhp = bh;
1226 page = (unsigned long) bh->b_data;
1227 page &= PAGE_MASK;
1228 tmp = bh;
1229 do {
1230 if (!tmp)
1231 return 0;
1232 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1233 return 0;
1234 tmp = tmp->b_this_page;
1235 } while (tmp != bh);
1236 tmp = bh;
1237 do {
1238 p = tmp;
1239 tmp = tmp->b_this_page;
1240 nr_buffers--;
1241 nr_buffers_size[isize]--;
1242 if (p == *bhp)
1243 {
1244 *bhp = p->b_prev_free;
1245 if (p == *bhp)
1246 *bhp = NULL;
1247 }
1248 remove_from_queues(p);
1249 put_unused_buffer_head(p);
1250 } while (tmp != bh);
1251 buffermem -= PAGE_SIZE;
1252 buffer_pages[MAP_NR(page)] = NULL;
1253 free_page(page);
1254 return !mem_map[MAP_NR(page)];
1255 }
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 static int maybe_shrink_lav_buffers(int size)
1271 {
1272 int nlist;
1273 int isize;
1274 int total_lav, total_n_buffers, n_sizes;
1275
1276
1277
1278
1279
1280
1281 total_lav = total_n_buffers = n_sizes = 0;
1282 for(nlist = 0; nlist < NR_SIZES; nlist++)
1283 {
1284 total_lav += buffers_lav[nlist];
1285 if(nr_buffers_size[nlist]) n_sizes++;
1286 total_n_buffers += nr_buffers_size[nlist];
1287 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1288 }
1289
1290
1291
1292
1293 isize = (size ? BUFSIZE_INDEX(size) : -1);
1294
1295 if (n_sizes > 1)
1296 for(nlist = 0; nlist < NR_SIZES; nlist++)
1297 {
1298 if(nlist == isize) continue;
1299 if(nr_buffers_size[nlist] &&
1300 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1301 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1302 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1303 return 1;
1304 }
1305 return 0;
1306 }
1307
1308
1309
1310
1311
1312
1313
1314 int shrink_buffers(unsigned int priority)
1315 {
1316 if (priority < 2) {
1317 sync_buffers(0,0);
1318 }
1319
1320 if(priority == 2) wakeup_bdflush(1);
1321
1322 if(maybe_shrink_lav_buffers(0)) return 1;
1323
1324
1325 return shrink_specific_buffers(priority, 0);
1326 }
1327
1328 static int shrink_specific_buffers(unsigned int priority, int size)
1329 {
1330 struct buffer_head *bh;
1331 int nlist;
1332 int i, isize, isize1;
1333
1334 #ifdef DEBUG
1335 if(size) printk("Shrinking buffers of size %d\n", size);
1336 #endif
1337
1338
1339 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1340
1341 for(isize = 0; isize<NR_SIZES; isize++){
1342 if(isize1 != -1 && isize1 != isize) continue;
1343 bh = free_list[isize];
1344 if(!bh) continue;
1345 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1346 if (bh->b_count || !bh->b_this_page)
1347 continue;
1348 if (try_to_free(bh, &bh))
1349 return 1;
1350 if(!bh) break;
1351
1352 }
1353 }
1354
1355
1356
1357 for(nlist = 0; nlist < NR_LIST; nlist++) {
1358 repeat1:
1359 if(priority > 3 && nlist == BUF_SHARED) continue;
1360 bh = lru_list[nlist];
1361 if(!bh) continue;
1362 i = nr_buffers_type[nlist] >> priority;
1363 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1364
1365 if(bh->b_list != nlist) goto repeat1;
1366 if (bh->b_count || !bh->b_this_page)
1367 continue;
1368 if(size && bh->b_size != size) continue;
1369 if (bh->b_lock)
1370 if (priority)
1371 continue;
1372 else
1373 wait_on_buffer(bh);
1374 if (bh->b_dirt) {
1375 bh->b_count++;
1376 bh->b_flushtime = 0;
1377 ll_rw_block(WRITEA, 1, &bh);
1378 bh->b_count--;
1379 continue;
1380 }
1381 if (try_to_free(bh, &bh))
1382 return 1;
1383 if(!bh) break;
1384 }
1385 }
1386 return 0;
1387 }
1388
1389
1390 void show_buffers(void)
1391 {
1392 struct buffer_head * bh;
1393 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1394 int shared;
1395 int nlist, isize;
1396
1397 printk("Buffer memory: %6dkB\n",buffermem>>10);
1398 printk("Buffer heads: %6d\n",nr_buffer_heads);
1399 printk("Buffer blocks: %6d\n",nr_buffers);
1400
1401 for(nlist = 0; nlist < NR_LIST; nlist++) {
1402 shared = found = locked = dirty = used = lastused = 0;
1403 bh = lru_list[nlist];
1404 if(!bh) continue;
1405 do {
1406 found++;
1407 if (bh->b_lock)
1408 locked++;
1409 if (bh->b_dirt)
1410 dirty++;
1411 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1412 if (bh->b_count)
1413 used++, lastused = found;
1414 bh = bh->b_next_free;
1415 } while (bh != lru_list[nlist]);
1416 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1417 nlist, found, used, lastused, locked, dirty, shared);
1418 };
1419 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1420 for(isize = 0; isize<NR_SIZES; isize++){
1421 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1422 buffers_lav[isize], nr_free[isize]);
1423 for(nlist = 0; nlist < NR_LIST; nlist++)
1424 printk("%7d ", nr_buffers_st[isize][nlist]);
1425 printk("\n");
1426 }
1427 }
1428
1429
1430
1431
1432
1433 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1434 dev_t dev, unsigned int starting_block)
1435 {
1436 unsigned long page;
1437 struct buffer_head * tmp, * p;
1438
1439 *bhp = bh;
1440 page = (unsigned long) bh->b_data;
1441 page &= PAGE_MASK;
1442 if(mem_map[MAP_NR(page)] != 1) return 0;
1443 tmp = bh;
1444 do {
1445 if (!tmp)
1446 return 0;
1447
1448 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1449 return 0;
1450 tmp = tmp->b_this_page;
1451 } while (tmp != bh);
1452 tmp = bh;
1453
1454 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1455 tmp = tmp->b_this_page;
1456
1457
1458 bh = tmp;
1459 do {
1460 p = tmp;
1461 tmp = tmp->b_this_page;
1462 remove_from_queues(p);
1463 p->b_dev=dev;
1464 p->b_uptodate = 0;
1465 p->b_req = 0;
1466 p->b_blocknr=starting_block++;
1467 insert_into_queues(p);
1468 } while (tmp != bh);
1469 return 1;
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 static int reassign_cluster(dev_t dev,
1487 unsigned int starting_block, int size)
1488 {
1489 struct buffer_head *bh;
1490 int isize = BUFSIZE_INDEX(size);
1491 int i;
1492
1493
1494
1495
1496
1497 while(nr_free[isize] < 32) refill_freelist(size);
1498
1499 bh = free_list[isize];
1500 if(bh)
1501 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1502 if (!bh->b_this_page) continue;
1503 if (try_to_reassign(bh, &bh, dev, starting_block))
1504 return 4;
1505 }
1506 return 0;
1507 }
1508
1509
1510
1511
1512
1513 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1514 {
1515 struct buffer_head * bh, * tmp, * arr[8];
1516 int isize = BUFSIZE_INDEX(size);
1517 unsigned long offset;
1518 unsigned long page;
1519 int nblock;
1520
1521 page = get_free_page(GFP_NOBUFFER);
1522 if(!page) return 0;
1523
1524 bh = create_buffers(page, size);
1525 if (!bh) {
1526 free_page(page);
1527 return 0;
1528 };
1529 nblock = block;
1530 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1531 if (find_buffer(dev, nblock++, size))
1532 goto not_aligned;
1533 }
1534 tmp = bh;
1535 nblock = 0;
1536 while (1) {
1537 arr[nblock++] = bh;
1538 bh->b_count = 1;
1539 bh->b_dirt = 0;
1540 bh->b_flushtime = 0;
1541 bh->b_lock = 0;
1542 bh->b_uptodate = 0;
1543 bh->b_req = 0;
1544 bh->b_dev = dev;
1545 bh->b_list = BUF_CLEAN;
1546 bh->b_blocknr = block++;
1547 nr_buffers++;
1548 nr_buffers_size[isize]++;
1549 insert_into_queues(bh);
1550 if (bh->b_this_page)
1551 bh = bh->b_this_page;
1552 else
1553 break;
1554 }
1555 buffermem += PAGE_SIZE;
1556 buffer_pages[MAP_NR(page)] = bh;
1557 bh->b_this_page = tmp;
1558 while (nblock-- > 0)
1559 brelse(arr[nblock]);
1560 return 4;
1561 not_aligned:
1562 while ((tmp = bh) != NULL) {
1563 bh = bh->b_this_page;
1564 put_unused_buffer_head(tmp);
1565 }
1566 free_page(page);
1567 return 0;
1568 }
1569
1570 unsigned long generate_cluster(dev_t dev, int b[], int size)
1571 {
1572 int i, offset;
1573
1574 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1575 if(i && b[i]-1 != b[i-1]) return 0;
1576 if(find_buffer(dev, b[i], size)) return 0;
1577 };
1578
1579
1580
1581
1582
1583 if(maybe_shrink_lav_buffers(size))
1584 {
1585 int retval;
1586 retval = try_to_generate_cluster(dev, b[0], size);
1587 if(retval) return retval;
1588 };
1589
1590 if (nr_free_pages > min_free_pages*2)
1591 return try_to_generate_cluster(dev, b[0], size);
1592 else
1593 return reassign_cluster(dev, b[0], size);
1594 }
1595
1596
1597
1598
1599
1600
1601
1602
1603 void buffer_init(void)
1604 {
1605 int i;
1606 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1607
1608 if (high_memory >= 4*1024*1024) {
1609 if(high_memory >= 16*1024*1024)
1610 nr_hash = 16381;
1611 else
1612 nr_hash = 4093;
1613 } else {
1614 nr_hash = 997;
1615 };
1616
1617 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1618 sizeof(struct buffer_head *));
1619
1620
1621 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1622 sizeof(struct buffer_head *));
1623 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1624 buffer_pages[i] = NULL;
1625
1626 for (i = 0 ; i < nr_hash ; i++)
1627 hash_table[i] = NULL;
1628 lru_list[BUF_CLEAN] = 0;
1629 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1630 if (!free_list[isize])
1631 panic("VFS: Unable to initialize buffer free list!");
1632 return;
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642 struct wait_queue * bdflush_wait = NULL;
1643 struct wait_queue * bdflush_done = NULL;
1644
1645 static int bdflush_running = 0;
1646
1647 static void wakeup_bdflush(int wait)
1648 {
1649 if(!bdflush_running){
1650 printk("Warning - bdflush not running\n");
1651 sync_buffers(0,0);
1652 return;
1653 };
1654 wake_up(&bdflush_wait);
1655 if(wait) sleep_on(&bdflush_done);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 asmlinkage int sync_old_buffers(void)
1669 {
1670 int i, isize;
1671 int ndirty, nwritten;
1672 int nlist;
1673 int ncount;
1674 struct buffer_head * bh, *next;
1675
1676 sync_supers(0);
1677 sync_inodes(0);
1678
1679 ncount = 0;
1680 #ifdef DEBUG
1681 for(nlist = 0; nlist < NR_LIST; nlist++)
1682 #else
1683 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1684 #endif
1685 {
1686 ndirty = 0;
1687 nwritten = 0;
1688 repeat:
1689 bh = lru_list[nlist];
1690 if(bh)
1691 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1692
1693 if(bh->b_list != nlist) goto repeat;
1694 next = bh->b_next_free;
1695 if(!lru_list[nlist]) {
1696 printk("Dirty list empty %d\n", i);
1697 break;
1698 }
1699
1700
1701 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1702 {
1703 refile_buffer(bh);
1704 continue;
1705 }
1706
1707 if (bh->b_lock || !bh->b_dirt)
1708 continue;
1709 ndirty++;
1710 if(bh->b_flushtime > jiffies) continue;
1711 nwritten++;
1712 bh->b_count++;
1713 bh->b_flushtime = 0;
1714 #ifdef DEBUG
1715 if(nlist != BUF_DIRTY) ncount++;
1716 #endif
1717 ll_rw_block(WRITE, 1, &bh);
1718 bh->b_count--;
1719 }
1720 }
1721 #ifdef DEBUG
1722 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1723 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1724 #endif
1725
1726
1727
1728
1729 for(isize = 0; isize<NR_SIZES; isize++){
1730 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1731 buffer_usage[isize] = 0;
1732 };
1733 return 0;
1734 }
1735
1736
1737
1738
1739
1740
1741
1742
1743 asmlinkage int sys_bdflush(int func, int data)
1744 {
1745 int i, error;
1746 int ndirty;
1747 int nlist;
1748 int ncount;
1749 struct buffer_head * bh, *next;
1750
1751 if (!suser())
1752 return -EPERM;
1753
1754 if (func == 1)
1755 return sync_old_buffers();
1756
1757
1758 if (func >= 2) {
1759 i = (func-2) >> 1;
1760 if (i < 0 || i >= N_PARAM)
1761 return -EINVAL;
1762 if((func & 1) == 0) {
1763 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1764 if (error)
1765 return error;
1766 put_fs_long(bdf_prm.data[i], data);
1767 return 0;
1768 };
1769 if (data < bdflush_min[i] || data > bdflush_max[i])
1770 return -EINVAL;
1771 bdf_prm.data[i] = data;
1772 return 0;
1773 };
1774
1775 if (bdflush_running)
1776 return -EBUSY;
1777 bdflush_running++;
1778
1779
1780
1781 for (;;) {
1782 #ifdef DEBUG
1783 printk("bdflush() activated...");
1784 #endif
1785
1786 ncount = 0;
1787 #ifdef DEBUG
1788 for(nlist = 0; nlist < NR_LIST; nlist++)
1789 #else
1790 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1791 #endif
1792 {
1793 ndirty = 0;
1794 repeat:
1795 bh = lru_list[nlist];
1796 if(bh)
1797 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1798 bh = next) {
1799
1800 if(bh->b_list != nlist) goto repeat;
1801 next = bh->b_next_free;
1802 if(!lru_list[nlist]) {
1803 printk("Dirty list empty %d\n", i);
1804 break;
1805 }
1806
1807
1808 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1809 {
1810 refile_buffer(bh);
1811 continue;
1812 }
1813
1814 if (bh->b_lock || !bh->b_dirt)
1815 continue;
1816
1817
1818 bh->b_count++;
1819 ndirty++;
1820 bh->b_flushtime = 0;
1821 ll_rw_block(WRITE, 1, &bh);
1822 #ifdef DEBUG
1823 if(nlist != BUF_DIRTY) ncount++;
1824 #endif
1825 bh->b_count--;
1826 }
1827 }
1828 #ifdef DEBUG
1829 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1830 printk("sleeping again.\n");
1831 #endif
1832 wake_up(&bdflush_done);
1833
1834
1835
1836
1837 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1838 bdf_prm.b_un.nfract/100) {
1839 if (current->signal & (1 << (SIGKILL-1))) {
1840 bdflush_running--;
1841 return 0;
1842 }
1843 current->signal = 0;
1844 interruptible_sleep_on(&bdflush_wait);
1845 }
1846 }
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865