This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- bwrite_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/string.h>
23 #include <linux/locks.h>
24 #include <linux/errno.h>
25 #include <linux/malloc.h>
26
27 #include <asm/system.h>
28 #include <asm/segment.h>
29 #include <asm/io.h>
30
31 #define NR_SIZES 4
32 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
34
35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
36 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 extern int *blksize_size[];
60
61
62 static void wakeup_bdflush(int);
63
64 #define N_PARAM 9
65 #define LAV
66
67 static union bdflush_param{
68 struct {
69 int nfract;
70
71 int ndirty;
72
73 int nrefill;
74
75 int nref_dirt;
76
77 int clu_nfract;
78
79 int age_buffer;
80
81 int age_super;
82
83 int lav_const;
84
85 int lav_ratio;
86
87
88 } b_un;
89 unsigned int data[N_PARAM];
90 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
91
92
93
94
95
96
97
98 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
100
101
102
103
104
105
106
107
108
109
110 void __wait_on_buffer(struct buffer_head * bh)
111 {
112 struct wait_queue wait = { current, NULL };
113
114 bh->b_count++;
115 add_wait_queue(&bh->b_wait, &wait);
116 repeat:
117 current->state = TASK_UNINTERRUPTIBLE;
118 if (bh->b_lock) {
119 schedule();
120 goto repeat;
121 }
122 remove_wait_queue(&bh->b_wait, &wait);
123 bh->b_count--;
124 current->state = TASK_RUNNING;
125 }
126
127
128
129
130
131
132
133
134
135
136
137 static int sync_buffers(dev_t dev, int wait)
138 {
139 int i, retry, pass = 0, err = 0;
140 int nlist, ncount;
141 struct buffer_head * bh, *next;
142
143
144
145
146
147 repeat:
148 retry = 0;
149 repeat2:
150 ncount = 0;
151
152
153 for(nlist = 0; nlist < NR_LIST; nlist++)
154 {
155 repeat1:
156 bh = lru_list[nlist];
157 if(!bh) continue;
158 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
159 if(bh->b_list != nlist) goto repeat1;
160 next = bh->b_next_free;
161 if(!lru_list[nlist]) break;
162 if (dev && bh->b_dev != dev)
163 continue;
164 if (bh->b_lock)
165 {
166
167
168 if (!wait || !pass) {
169 retry = 1;
170 continue;
171 }
172 wait_on_buffer (bh);
173 goto repeat2;
174 }
175
176
177 if (wait && bh->b_req && !bh->b_lock &&
178 !bh->b_dirt && !bh->b_uptodate) {
179 err = 1;
180 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
181 continue;
182 }
183
184
185 if (!bh->b_dirt || pass>=2)
186 continue;
187
188 if (bh->b_lock)
189 continue;
190 bh->b_count++;
191 bh->b_flushtime = 0;
192 ll_rw_block(WRITE, 1, &bh);
193
194 if(nlist != BUF_DIRTY) {
195 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
196 ncount++;
197 };
198 bh->b_count--;
199 retry = 1;
200 }
201 }
202 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
203
204
205
206
207
208 if (wait && retry && ++pass<=2)
209 goto repeat;
210 return err;
211 }
212
213 void sync_dev(dev_t dev)
214 {
215 sync_buffers(dev, 0);
216 sync_supers(dev);
217 sync_inodes(dev);
218 sync_buffers(dev, 0);
219 }
220
221 int fsync_dev(dev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 return sync_buffers(dev, 1);
227 }
228
229 asmlinkage int sys_sync(void)
230 {
231 fsync_dev(0);
232 return 0;
233 }
234
235 int file_fsync (struct inode *inode, struct file *filp)
236 {
237 return fsync_dev(inode->i_dev);
238 }
239
240 asmlinkage int sys_fsync(unsigned int fd)
241 {
242 struct file * file;
243 struct inode * inode;
244
245 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
246 return -EBADF;
247 if (!file->f_op || !file->f_op->fsync)
248 return -EINVAL;
249 if (file->f_op->fsync(inode,file))
250 return -EIO;
251 return 0;
252 }
253
254 void invalidate_buffers(dev_t dev)
255 {
256 int i;
257 int nlist;
258 struct buffer_head * bh;
259
260 for(nlist = 0; nlist < NR_LIST; nlist++) {
261 bh = lru_list[nlist];
262 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
263 if (bh->b_dev != dev)
264 continue;
265 wait_on_buffer(bh);
266 if (bh->b_dev != dev)
267 continue;
268 if (bh->b_count)
269 continue;
270 bh->b_flushtime = bh->b_uptodate =
271 bh->b_dirt = bh->b_req = 0;
272 }
273 }
274 }
275
276 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
277 #define hash(dev,block) hash_table[_hashfn(dev,block)]
278
279 static inline void remove_from_hash_queue(struct buffer_head * bh)
280 {
281 if (bh->b_next)
282 bh->b_next->b_prev = bh->b_prev;
283 if (bh->b_prev)
284 bh->b_prev->b_next = bh->b_next;
285 if (hash(bh->b_dev,bh->b_blocknr) == bh)
286 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
287 bh->b_next = bh->b_prev = NULL;
288 }
289
290 static inline void remove_from_lru_list(struct buffer_head * bh)
291 {
292 if (!(bh->b_prev_free) || !(bh->b_next_free))
293 panic("VFS: LRU block list corrupted");
294 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
295 bh->b_prev_free->b_next_free = bh->b_next_free;
296 bh->b_next_free->b_prev_free = bh->b_prev_free;
297
298 if (lru_list[bh->b_list] == bh)
299 lru_list[bh->b_list] = bh->b_next_free;
300 if(lru_list[bh->b_list] == bh)
301 lru_list[bh->b_list] = NULL;
302 bh->b_next_free = bh->b_prev_free = NULL;
303 }
304
305 static inline void remove_from_free_list(struct buffer_head * bh)
306 {
307 int isize = BUFSIZE_INDEX(bh->b_size);
308 if (!(bh->b_prev_free) || !(bh->b_next_free))
309 panic("VFS: Free block list corrupted");
310 if(bh->b_dev != 0xffff) panic("Free list corrupted");
311 if(!free_list[isize])
312 panic("Free list empty");
313 nr_free[isize]--;
314 if(bh->b_next_free == bh)
315 free_list[isize] = NULL;
316 else {
317 bh->b_prev_free->b_next_free = bh->b_next_free;
318 bh->b_next_free->b_prev_free = bh->b_prev_free;
319 if (free_list[isize] == bh)
320 free_list[isize] = bh->b_next_free;
321 };
322 bh->b_next_free = bh->b_prev_free = NULL;
323 }
324
325 static inline void remove_from_queues(struct buffer_head * bh)
326 {
327 if(bh->b_dev == 0xffff) {
328 remove_from_free_list(bh);
329
330 return;
331 };
332 nr_buffers_type[bh->b_list]--;
333 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
334 remove_from_hash_queue(bh);
335 remove_from_lru_list(bh);
336 }
337
338 static inline void put_last_lru(struct buffer_head * bh)
339 {
340 if (!bh)
341 return;
342 if (bh == lru_list[bh->b_list]) {
343 lru_list[bh->b_list] = bh->b_next_free;
344 return;
345 }
346 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
347 remove_from_lru_list(bh);
348
349
350 if(!lru_list[bh->b_list]) {
351 lru_list[bh->b_list] = bh;
352 lru_list[bh->b_list]->b_prev_free = bh;
353 };
354
355 bh->b_next_free = lru_list[bh->b_list];
356 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
357 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
358 lru_list[bh->b_list]->b_prev_free = bh;
359 }
360
361 static inline void put_last_free(struct buffer_head * bh)
362 {
363 int isize;
364 if (!bh)
365 return;
366
367 isize = BUFSIZE_INDEX(bh->b_size);
368 bh->b_dev = 0xffff;
369
370
371 if(!free_list[isize]) {
372 free_list[isize] = bh;
373 bh->b_prev_free = bh;
374 };
375
376 nr_free[isize]++;
377 bh->b_next_free = free_list[isize];
378 bh->b_prev_free = free_list[isize]->b_prev_free;
379 free_list[isize]->b_prev_free->b_next_free = bh;
380 free_list[isize]->b_prev_free = bh;
381 }
382
383 static inline void insert_into_queues(struct buffer_head * bh)
384 {
385
386
387 if(bh->b_dev == 0xffff) {
388 put_last_free(bh);
389 return;
390 };
391 if(!lru_list[bh->b_list]) {
392 lru_list[bh->b_list] = bh;
393 bh->b_prev_free = bh;
394 };
395 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
396 bh->b_next_free = lru_list[bh->b_list];
397 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
398 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
399 lru_list[bh->b_list]->b_prev_free = bh;
400 nr_buffers_type[bh->b_list]++;
401 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
402
403 bh->b_prev = NULL;
404 bh->b_next = NULL;
405 if (!bh->b_dev)
406 return;
407 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
408 hash(bh->b_dev,bh->b_blocknr) = bh;
409 if (bh->b_next)
410 bh->b_next->b_prev = bh;
411 }
412
413 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
414 {
415 struct buffer_head * tmp;
416
417 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
418 if (tmp->b_dev==dev && tmp->b_blocknr==block)
419 if (tmp->b_size == size)
420 return tmp;
421 else {
422 printk("VFS: Wrong blocksize on device %d/%d\n",
423 MAJOR(dev), MINOR(dev));
424 return NULL;
425 }
426 return NULL;
427 }
428
429
430
431
432
433
434
435
436 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
437 {
438 struct buffer_head * bh;
439
440 for (;;) {
441 if (!(bh=find_buffer(dev,block,size)))
442 return NULL;
443 bh->b_reuse=0;
444 bh->b_count++;
445 wait_on_buffer(bh);
446 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
447 return bh;
448 bh->b_count--;
449 }
450 }
451
452 void set_blocksize(dev_t dev, int size)
453 {
454 int i, nlist;
455 struct buffer_head * bh, *bhnext;
456
457 if (!blksize_size[MAJOR(dev)])
458 return;
459
460 switch(size) {
461 default: panic("Invalid blocksize passed to set_blocksize");
462 case 512: case 1024: case 2048: case 4096:;
463 }
464
465 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
466 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
467 return;
468 }
469 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
470 return;
471 sync_buffers(dev, 2);
472 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
473
474
475
476
477 for(nlist = 0; nlist < NR_LIST; nlist++) {
478 bh = lru_list[nlist];
479 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
480 if(!bh) break;
481 bhnext = bh->b_next_free;
482 if (bh->b_dev != dev)
483 continue;
484 if (bh->b_size == size)
485 continue;
486
487 wait_on_buffer(bh);
488 if (bh->b_dev == dev && bh->b_size != size) {
489 bh->b_uptodate = bh->b_dirt = bh->b_req =
490 bh->b_flushtime = 0;
491 };
492 remove_from_hash_queue(bh);
493 }
494 }
495 }
496
497 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
498
499 void refill_freelist(int size)
500 {
501 struct buffer_head * bh, * tmp;
502 struct buffer_head * candidate[NR_LIST];
503 unsigned int best_time, winner;
504 int isize = BUFSIZE_INDEX(size);
505 int buffers[NR_LIST];
506 int i;
507 int needed;
508
509
510
511
512
513 if (nr_free[isize] > 100)
514 return;
515
516
517
518
519
520
521 needed =bdf_prm.b_un.nrefill * size;
522
523 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
524 grow_buffers(GFP_BUFFER, size)) {
525 needed -= PAGE_SIZE;
526 }
527
528 if(needed <= 0) return;
529
530
531
532
533 while(maybe_shrink_lav_buffers(size))
534 {
535 if(!grow_buffers(GFP_BUFFER, size)) break;
536 needed -= PAGE_SIZE;
537 if(needed <= 0) return;
538 };
539
540
541
542
543
544
545
546 repeat0:
547 for(i=0; i<NR_LIST; i++){
548 if(i == BUF_DIRTY || i == BUF_SHARED ||
549 nr_buffers_type[i] == 0) {
550 candidate[i] = NULL;
551 buffers[i] = 0;
552 continue;
553 }
554 buffers[i] = nr_buffers_type[i];
555 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
556 {
557 if(buffers[i] < 0) panic("Here is the problem");
558 tmp = bh->b_next_free;
559 if (!bh) break;
560
561 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
562 bh->b_dirt) {
563 refile_buffer(bh);
564 continue;
565 };
566
567 if (bh->b_count || bh->b_size != size)
568 continue;
569
570
571
572
573
574 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
575 buffers[i] = 0;
576 break;
577 }
578
579 if (BADNESS(bh)) continue;
580 break;
581 };
582 if(!buffers[i]) candidate[i] = NULL;
583 else candidate[i] = bh;
584 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
585 }
586
587 repeat:
588 if(needed <= 0) return;
589
590
591
592 winner = best_time = UINT_MAX;
593 for(i=0; i<NR_LIST; i++){
594 if(!candidate[i]) continue;
595 if(candidate[i]->b_lru_time < best_time){
596 best_time = candidate[i]->b_lru_time;
597 winner = i;
598 }
599 }
600
601
602 if(winner != UINT_MAX) {
603 i = winner;
604 bh = candidate[i];
605 candidate[i] = bh->b_next_free;
606 if(candidate[i] == bh) candidate[i] = NULL;
607 if (bh->b_count || bh->b_size != size)
608 panic("Busy buffer in candidate list\n");
609 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
610 panic("Shared buffer in candidate list\n");
611 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
612
613 if(bh->b_dev == 0xffff) panic("Wrong list");
614 remove_from_queues(bh);
615 bh->b_dev = 0xffff;
616 put_last_free(bh);
617 needed -= bh->b_size;
618 buffers[i]--;
619 if(buffers[i] < 0) panic("Here is the problem");
620
621 if(buffers[i] == 0) candidate[i] = NULL;
622
623
624
625 if(candidate[i] && buffers[i] > 0){
626 if(buffers[i] <= 0) panic("Here is another problem");
627 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
628 if(buffers[i] < 0) panic("Here is the problem");
629 tmp = bh->b_next_free;
630 if (!bh) break;
631
632 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
633 bh->b_dirt) {
634 refile_buffer(bh);
635 continue;
636 };
637
638 if (bh->b_count || bh->b_size != size)
639 continue;
640
641
642
643
644
645 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
646 buffers[i] = 0;
647 break;
648 }
649
650 if (BADNESS(bh)) continue;
651 break;
652 };
653 if(!buffers[i]) candidate[i] = NULL;
654 else candidate[i] = bh;
655 if(candidate[i] && candidate[i]->b_count)
656 panic("Here is the problem");
657 }
658
659 goto repeat;
660 }
661
662 if(needed <= 0) return;
663
664
665
666 if (nr_free_pages > 5) {
667 if (grow_buffers(GFP_BUFFER, size)) {
668 needed -= PAGE_SIZE;
669 goto repeat0;
670 };
671 }
672
673
674 if (!grow_buffers(GFP_ATOMIC, size))
675 wakeup_bdflush(1);
676 needed -= PAGE_SIZE;
677 goto repeat0;
678 }
679
680
681
682
683
684
685
686
687
688
689
690 struct buffer_head * getblk(dev_t dev, int block, int size)
691 {
692 struct buffer_head * bh;
693 int isize = BUFSIZE_INDEX(size);
694
695
696 buffer_usage[isize]++;
697
698
699
700
701 repeat:
702 bh = get_hash_table(dev, block, size);
703 if (bh) {
704 if (bh->b_uptodate && !bh->b_dirt)
705 put_last_lru(bh);
706 if(!bh->b_dirt) bh->b_flushtime = 0;
707 return bh;
708 }
709
710 while(!free_list[isize]) refill_freelist(size);
711
712 if (find_buffer(dev,block,size))
713 goto repeat;
714
715 bh = free_list[isize];
716 remove_from_free_list(bh);
717
718
719
720 bh->b_count=1;
721 bh->b_dirt=0;
722 bh->b_lock=0;
723 bh->b_uptodate=0;
724 bh->b_flushtime=0;
725 bh->b_req=0;
726 bh->b_reuse=0;
727 bh->b_dev=dev;
728 bh->b_blocknr=block;
729 insert_into_queues(bh);
730 return bh;
731 }
732
733 void set_writetime(struct buffer_head * buf, int flag)
734 {
735 int newtime;
736
737 if (buf->b_dirt){
738
739 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
740 bdf_prm.b_un.age_buffer);
741 if(!buf->b_flushtime || buf->b_flushtime > newtime)
742 buf->b_flushtime = newtime;
743 } else {
744 buf->b_flushtime = 0;
745 }
746 }
747
748
749 void refile_buffer(struct buffer_head * buf){
750 int dispose;
751 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
752 if (buf->b_dirt)
753 dispose = BUF_DIRTY;
754 else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
755 dispose = BUF_SHARED;
756 else if (buf->b_lock)
757 dispose = BUF_LOCKED;
758 else if (buf->b_list == BUF_SHARED)
759 dispose = BUF_UNSHARED;
760 else
761 dispose = BUF_CLEAN;
762 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
763 if(dispose != buf->b_list) {
764 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
765 buf->b_lru_time = jiffies;
766 if(dispose == BUF_LOCKED &&
767 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
768 dispose = BUF_LOCKED1;
769 remove_from_queues(buf);
770 buf->b_list = dispose;
771 insert_into_queues(buf);
772 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
773 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
774 bdf_prm.b_un.nfract/100)
775 wakeup_bdflush(0);
776 }
777 }
778
779 void brelse(struct buffer_head * buf)
780 {
781 if (!buf)
782 return;
783 wait_on_buffer(buf);
784
785
786 set_writetime(buf, 0);
787 refile_buffer(buf);
788
789 if (buf->b_count) {
790 if (--buf->b_count)
791 return;
792 wake_up(&buffer_wait);
793 if (buf->b_reuse) {
794 if (!buf->b_lock && !buf->b_dirt && !buf->b_wait) {
795 buf->b_reuse = 0;
796 if(buf->b_dev == 0xffff) panic("brelse: Wrong list");
797 remove_from_queues(buf);
798 buf->b_dev = 0xffff;
799 put_last_free(buf);
800 }
801 }
802 return;
803 }
804 printk("VFS: brelse: Trying to free free buffer\n");
805 }
806
807
808
809
810
811 struct buffer_head * bread(dev_t dev, int block, int size)
812 {
813 struct buffer_head * bh;
814
815 if (!(bh = getblk(dev, block, size))) {
816 printk("VFS: bread: READ error on device %d/%d\n",
817 MAJOR(dev), MINOR(dev));
818 return NULL;
819 }
820 if (bh->b_uptodate)
821 return bh;
822 ll_rw_block(READ, 1, &bh);
823 wait_on_buffer(bh);
824 if (bh->b_uptodate)
825 return bh;
826 brelse(bh);
827 return NULL;
828 }
829
830
831
832
833
834
835
836 #define NBUF 16
837
838 struct buffer_head * breada(dev_t dev, int block, int bufsize,
839 unsigned int pos, unsigned int filesize)
840 {
841 struct buffer_head * bhlist[NBUF];
842 unsigned int blocks;
843 struct buffer_head * bh;
844 int index;
845 int i, j;
846
847 if (pos >= filesize)
848 return NULL;
849
850 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
851 return NULL;
852
853 index = BUFSIZE_INDEX(bh->b_size);
854
855 if (bh->b_uptodate)
856 return bh;
857
858 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
859
860 if (blocks > (read_ahead[MAJOR(dev)] >> index))
861 blocks = read_ahead[MAJOR(dev)] >> index;
862 if (blocks > NBUF)
863 blocks = NBUF;
864
865 bhlist[0] = bh;
866 j = 1;
867 for(i=1; i<blocks; i++) {
868 bh = getblk(dev,block+i,bufsize);
869 if (bh->b_uptodate) {
870 brelse(bh);
871 break;
872 }
873 bhlist[j++] = bh;
874 }
875
876
877 ll_rw_block(READ, j, bhlist);
878
879 for(i=1; i<j; i++)
880 brelse(bhlist[i]);
881
882
883 bh = bhlist[0];
884 wait_on_buffer(bh);
885 if (bh->b_uptodate)
886 return bh;
887 brelse(bh);
888 return NULL;
889 }
890
891
892
893
894 static void put_unused_buffer_head(struct buffer_head * bh)
895 {
896 struct wait_queue * wait;
897
898 wait = ((volatile struct buffer_head *) bh)->b_wait;
899 memset(bh,0,sizeof(*bh));
900 ((volatile struct buffer_head *) bh)->b_wait = wait;
901 bh->b_next_free = unused_list;
902 unused_list = bh;
903 }
904
905 static void get_more_buffer_heads(void)
906 {
907 int i;
908 struct buffer_head * bh;
909
910 if (unused_list)
911 return;
912
913 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
914 return;
915
916 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
917 bh->b_next_free = unused_list;
918 unused_list = bh++;
919 }
920 }
921
922 static struct buffer_head * get_unused_buffer_head(void)
923 {
924 struct buffer_head * bh;
925
926 get_more_buffer_heads();
927 if (!unused_list)
928 return NULL;
929 bh = unused_list;
930 unused_list = bh->b_next_free;
931 bh->b_next_free = NULL;
932 bh->b_data = NULL;
933 bh->b_size = 0;
934 bh->b_req = 0;
935 return bh;
936 }
937
938
939
940
941
942
943
944 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
945 {
946 struct buffer_head *bh, *head;
947 unsigned long offset;
948
949 head = NULL;
950 offset = PAGE_SIZE;
951 while ((offset -= size) < PAGE_SIZE) {
952 bh = get_unused_buffer_head();
953 if (!bh)
954 goto no_grow;
955 bh->b_this_page = head;
956 head = bh;
957 bh->b_data = (char *) (page+offset);
958 bh->b_size = size;
959 bh->b_dev = 0xffff;
960 }
961 return head;
962
963
964
965 no_grow:
966 bh = head;
967 while (bh) {
968 head = bh;
969 bh = bh->b_this_page;
970 put_unused_buffer_head(head);
971 }
972 return NULL;
973 }
974
975 static void read_buffers(struct buffer_head * bh[], int nrbuf)
976 {
977 int i;
978 int bhnum = 0;
979 struct buffer_head * bhr[MAX_BUF_PER_PAGE];
980
981 for (i = 0 ; i < nrbuf ; i++) {
982 if (bh[i] && !bh[i]->b_uptodate)
983 bhr[bhnum++] = bh[i];
984 }
985 if (bhnum)
986 ll_rw_block(READ, bhnum, bhr);
987 for (i = nrbuf ; --i >= 0 ; ) {
988 if (bh[i]) {
989 wait_on_buffer(bh[i]);
990 }
991 }
992 }
993
994
995
996
997
998
999
1000
1001
1002 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
1003 unsigned long address)
1004 {
1005 while (nrbuf-- > 0)
1006 brelse(bh[nrbuf]);
1007 return 0;
1008 }
1009
1010 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1011 dev_t dev, int *b, int size)
1012 {
1013 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1014 unsigned long page;
1015 unsigned long offset;
1016 int block;
1017 int nrbuf;
1018 int aligned = 1;
1019
1020 bh[0] = first;
1021 nrbuf = 1;
1022 page = (unsigned long) first->b_data;
1023 if (page & ~PAGE_MASK)
1024 aligned = 0;
1025 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1026 block = *++b;
1027 if (!block)
1028 goto no_go;
1029 first = get_hash_table(dev, block, size);
1030 if (!first)
1031 goto no_go;
1032 bh[nrbuf++] = first;
1033 if (page+offset != (unsigned long) first->b_data)
1034 aligned = 0;
1035 }
1036 if (!aligned)
1037 return try_to_align(bh, nrbuf, address);
1038 mem_map[MAP_NR(page)]++;
1039 read_buffers(bh,nrbuf);
1040 while (nrbuf-- > 0)
1041 brelse(bh[nrbuf]);
1042 free_page(address);
1043 ++current->mm->min_flt;
1044 return page;
1045 no_go:
1046 while (nrbuf-- > 0)
1047 brelse(bh[nrbuf]);
1048 return 0;
1049 }
1050
1051 static unsigned long try_to_load_aligned(unsigned long address,
1052 dev_t dev, int b[], int size)
1053 {
1054 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1055 unsigned long offset;
1056 int isize = BUFSIZE_INDEX(size);
1057 int * p;
1058 int block;
1059
1060 bh = create_buffers(address, size);
1061 if (!bh)
1062 return 0;
1063
1064 p = b;
1065 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1066 block = *(p++);
1067 if (!block)
1068 goto not_aligned;
1069 if (find_buffer(dev, block, size))
1070 goto not_aligned;
1071 }
1072 tmp = bh;
1073 p = b;
1074 block = 0;
1075 while (1) {
1076 arr[block++] = bh;
1077 bh->b_count = 1;
1078 bh->b_dirt = 0;
1079 bh->b_reuse = 0;
1080 bh->b_flushtime = 0;
1081 bh->b_uptodate = 0;
1082 bh->b_req = 0;
1083 bh->b_dev = dev;
1084 bh->b_blocknr = *(p++);
1085 bh->b_list = BUF_CLEAN;
1086 nr_buffers++;
1087 nr_buffers_size[isize]++;
1088 insert_into_queues(bh);
1089 if (bh->b_this_page)
1090 bh = bh->b_this_page;
1091 else
1092 break;
1093 }
1094 buffermem += PAGE_SIZE;
1095 bh->b_this_page = tmp;
1096 mem_map[MAP_NR(address)]++;
1097 buffer_pages[MAP_NR(address)] = bh;
1098 read_buffers(arr,block);
1099 while (block-- > 0)
1100 brelse(arr[block]);
1101 ++current->mm->maj_flt;
1102 return address;
1103 not_aligned:
1104 while ((tmp = bh) != NULL) {
1105 bh = bh->b_this_page;
1106 put_unused_buffer_head(tmp);
1107 }
1108 return 0;
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 static inline unsigned long try_to_share_buffers(unsigned long address,
1123 dev_t dev, int *b, int size)
1124 {
1125 struct buffer_head * bh;
1126 int block;
1127
1128 block = b[0];
1129 if (!block)
1130 return 0;
1131 bh = get_hash_table(dev, block, size);
1132 if (bh)
1133 return check_aligned(bh, address, dev, b, size);
1134 return try_to_load_aligned(address, dev, b, size);
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1145 {
1146 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1147 unsigned long where;
1148 int i, j;
1149
1150 if (!no_share) {
1151 where = try_to_share_buffers(address, dev, b, size);
1152 if (where)
1153 return where;
1154 }
1155 ++current->mm->maj_flt;
1156 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1157 bh[i] = NULL;
1158 if (b[i])
1159 bh[i] = getblk(dev, b[i], size);
1160 }
1161 read_buffers(bh,i);
1162 where = address;
1163 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1164 if (bh[i]) {
1165 if (bh[i]->b_uptodate)
1166 memcpy((void *) where, bh[i]->b_data, size);
1167 brelse(bh[i]);
1168 } else
1169 memset((void *) where, 0, size);
1170 }
1171 return address;
1172 }
1173
1174 #if 0
1175
1176
1177
1178
1179 void bwrite_page(unsigned long address, dev_t dev, int b[], int size)
1180 {
1181 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1182 int i, j;
1183
1184 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1185 bh[i] = NULL;
1186 if (b[i])
1187 bh[i] = getblk(dev, b[i], size);
1188 }
1189 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, address += size) {
1190 if (bh[i]) {
1191 memcpy(bh[i]->b_data, (void *) address, size);
1192 bh[i]->b_uptodate = 1;
1193 mark_buffer_dirty(bh[i], 0);
1194 brelse(bh[i]);
1195 } else
1196 memset((void *) address, 0, size);
1197 }
1198 }
1199 #endif
1200
1201
1202
1203
1204
1205 static int grow_buffers(int pri, int size)
1206 {
1207 unsigned long page;
1208 struct buffer_head *bh, *tmp;
1209 struct buffer_head * insert_point;
1210 int isize;
1211
1212 if ((size & 511) || (size > PAGE_SIZE)) {
1213 printk("VFS: grow_buffers: size = %d\n",size);
1214 return 0;
1215 }
1216
1217 isize = BUFSIZE_INDEX(size);
1218
1219 if (!(page = __get_free_page(pri)))
1220 return 0;
1221 bh = create_buffers(page, size);
1222 if (!bh) {
1223 free_page(page);
1224 return 0;
1225 }
1226
1227 insert_point = free_list[isize];
1228
1229 tmp = bh;
1230 while (1) {
1231 nr_free[isize]++;
1232 if (insert_point) {
1233 tmp->b_next_free = insert_point->b_next_free;
1234 tmp->b_prev_free = insert_point;
1235 insert_point->b_next_free->b_prev_free = tmp;
1236 insert_point->b_next_free = tmp;
1237 } else {
1238 tmp->b_prev_free = tmp;
1239 tmp->b_next_free = tmp;
1240 }
1241 insert_point = tmp;
1242 ++nr_buffers;
1243 if (tmp->b_this_page)
1244 tmp = tmp->b_this_page;
1245 else
1246 break;
1247 }
1248 free_list[isize] = bh;
1249 buffer_pages[MAP_NR(page)] = bh;
1250 tmp->b_this_page = bh;
1251 wake_up(&buffer_wait);
1252 buffermem += PAGE_SIZE;
1253 return 1;
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1264 {
1265 unsigned long page;
1266 struct buffer_head * tmp, * p;
1267 int isize = BUFSIZE_INDEX(bh->b_size);
1268
1269 *bhp = bh;
1270 page = (unsigned long) bh->b_data;
1271 page &= PAGE_MASK;
1272 tmp = bh;
1273 do {
1274 if (!tmp)
1275 return 0;
1276 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1277 return 0;
1278 tmp = tmp->b_this_page;
1279 } while (tmp != bh);
1280 tmp = bh;
1281 do {
1282 p = tmp;
1283 tmp = tmp->b_this_page;
1284 nr_buffers--;
1285 nr_buffers_size[isize]--;
1286 if (p == *bhp)
1287 {
1288 *bhp = p->b_prev_free;
1289 if (p == *bhp)
1290 *bhp = NULL;
1291 }
1292 remove_from_queues(p);
1293 put_unused_buffer_head(p);
1294 } while (tmp != bh);
1295 buffermem -= PAGE_SIZE;
1296 buffer_pages[MAP_NR(page)] = NULL;
1297 free_page(page);
1298 return !mem_map[MAP_NR(page)];
1299 }
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 static int maybe_shrink_lav_buffers(int size)
1315 {
1316 int nlist;
1317 int isize;
1318 int total_lav, total_n_buffers, n_sizes;
1319
1320
1321
1322
1323
1324
1325 total_lav = total_n_buffers = n_sizes = 0;
1326 for(nlist = 0; nlist < NR_SIZES; nlist++)
1327 {
1328 total_lav += buffers_lav[nlist];
1329 if(nr_buffers_size[nlist]) n_sizes++;
1330 total_n_buffers += nr_buffers_size[nlist];
1331 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1332 }
1333
1334
1335
1336
1337 isize = (size ? BUFSIZE_INDEX(size) : -1);
1338
1339 if (n_sizes > 1)
1340 for(nlist = 0; nlist < NR_SIZES; nlist++)
1341 {
1342 if(nlist == isize) continue;
1343 if(nr_buffers_size[nlist] &&
1344 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1345 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1346 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1347 return 1;
1348 }
1349 return 0;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 int shrink_buffers(unsigned int priority, unsigned long limit)
1363 {
1364 if (priority < 2) {
1365 sync_buffers(0,0);
1366 }
1367
1368 if(priority == 2) wakeup_bdflush(1);
1369
1370 if(maybe_shrink_lav_buffers(0)) return 1;
1371
1372
1373 return shrink_specific_buffers(priority, 0);
1374 }
1375
1376 static int shrink_specific_buffers(unsigned int priority, int size)
1377 {
1378 struct buffer_head *bh;
1379 int nlist;
1380 int i, isize, isize1;
1381
1382 #ifdef DEBUG
1383 if(size) printk("Shrinking buffers of size %d\n", size);
1384 #endif
1385
1386
1387 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1388
1389 for(isize = 0; isize<NR_SIZES; isize++){
1390 if(isize1 != -1 && isize1 != isize) continue;
1391 bh = free_list[isize];
1392 if(!bh) continue;
1393 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1394 if (bh->b_count || !bh->b_this_page)
1395 continue;
1396 if (try_to_free(bh, &bh))
1397 return 1;
1398 if(!bh) break;
1399
1400 }
1401 }
1402
1403
1404
1405 for(nlist = 0; nlist < NR_LIST; nlist++) {
1406 repeat1:
1407 if(priority > 3 && nlist == BUF_SHARED) continue;
1408 bh = lru_list[nlist];
1409 if(!bh) continue;
1410 i = 2*nr_buffers_type[nlist] >> priority;
1411 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1412
1413 if(bh->b_list != nlist) goto repeat1;
1414 if (bh->b_count || !bh->b_this_page)
1415 continue;
1416 if(size && bh->b_size != size) continue;
1417 if (bh->b_lock)
1418 if (priority)
1419 continue;
1420 else
1421 wait_on_buffer(bh);
1422 if (bh->b_dirt) {
1423 bh->b_count++;
1424 bh->b_flushtime = 0;
1425 ll_rw_block(WRITEA, 1, &bh);
1426 bh->b_count--;
1427 continue;
1428 }
1429 if (try_to_free(bh, &bh))
1430 return 1;
1431 if(!bh) break;
1432 }
1433 }
1434 return 0;
1435 }
1436
1437
1438
1439
1440 void show_buffers(void)
1441 {
1442 struct buffer_head * bh;
1443 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1444 int shared;
1445 int nlist, isize;
1446
1447 printk("Buffer memory: %6dkB\n",buffermem>>10);
1448 printk("Buffer heads: %6d\n",nr_buffer_heads);
1449 printk("Buffer blocks: %6d\n",nr_buffers);
1450
1451 for(nlist = 0; nlist < NR_LIST; nlist++) {
1452 shared = found = locked = dirty = used = lastused = 0;
1453 bh = lru_list[nlist];
1454 if(!bh) continue;
1455 do {
1456 found++;
1457 if (bh->b_lock)
1458 locked++;
1459 if (bh->b_dirt)
1460 dirty++;
1461 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1462 if (bh->b_count)
1463 used++, lastused = found;
1464 bh = bh->b_next_free;
1465 } while (bh != lru_list[nlist]);
1466 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1467 nlist, found, used, lastused, locked, dirty, shared);
1468 };
1469 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1470 for(isize = 0; isize<NR_SIZES; isize++){
1471 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1472 buffers_lav[isize], nr_free[isize]);
1473 for(nlist = 0; nlist < NR_LIST; nlist++)
1474 printk("%7d ", nr_buffers_st[isize][nlist]);
1475 printk("\n");
1476 }
1477 }
1478
1479
1480
1481
1482
1483
1484
1485
1486 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1487 dev_t dev, unsigned int starting_block)
1488 {
1489 unsigned long page;
1490 struct buffer_head * tmp, * p;
1491
1492 *bhp = bh;
1493 page = (unsigned long) bh->b_data;
1494 page &= PAGE_MASK;
1495 if(mem_map[MAP_NR(page)] != 1) return 0;
1496 tmp = bh;
1497 do {
1498 if (!tmp)
1499 return 0;
1500
1501 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1502 return 0;
1503 tmp = tmp->b_this_page;
1504 } while (tmp != bh);
1505 tmp = bh;
1506
1507 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1508 tmp = tmp->b_this_page;
1509
1510
1511 bh = tmp;
1512 do {
1513 p = tmp;
1514 tmp = tmp->b_this_page;
1515 remove_from_queues(p);
1516 p->b_dev=dev;
1517 p->b_uptodate = 0;
1518 p->b_req = 0;
1519 p->b_blocknr=starting_block++;
1520 insert_into_queues(p);
1521 } while (tmp != bh);
1522 return 1;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 static int reassign_cluster(dev_t dev,
1540 unsigned int starting_block, int size)
1541 {
1542 struct buffer_head *bh;
1543 int isize = BUFSIZE_INDEX(size);
1544 int i;
1545
1546
1547
1548
1549
1550 while(nr_free[isize] < 32) refill_freelist(size);
1551
1552 bh = free_list[isize];
1553 if(bh)
1554 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1555 if (!bh->b_this_page) continue;
1556 if (try_to_reassign(bh, &bh, dev, starting_block))
1557 return 4;
1558 }
1559 return 0;
1560 }
1561
1562
1563
1564
1565
1566 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1567 {
1568 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1569 int isize = BUFSIZE_INDEX(size);
1570 unsigned long offset;
1571 unsigned long page;
1572 int nblock;
1573
1574 page = get_free_page(GFP_NOBUFFER);
1575 if(!page) return 0;
1576
1577 bh = create_buffers(page, size);
1578 if (!bh) {
1579 free_page(page);
1580 return 0;
1581 };
1582 nblock = block;
1583 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1584 if (find_buffer(dev, nblock++, size))
1585 goto not_aligned;
1586 }
1587 tmp = bh;
1588 nblock = 0;
1589 while (1) {
1590 arr[nblock++] = bh;
1591 bh->b_count = 1;
1592 bh->b_dirt = 0;
1593 bh->b_flushtime = 0;
1594 bh->b_lock = 0;
1595 bh->b_uptodate = 0;
1596 bh->b_req = 0;
1597 bh->b_dev = dev;
1598 bh->b_list = BUF_CLEAN;
1599 bh->b_blocknr = block++;
1600 nr_buffers++;
1601 nr_buffers_size[isize]++;
1602 insert_into_queues(bh);
1603 if (bh->b_this_page)
1604 bh = bh->b_this_page;
1605 else
1606 break;
1607 }
1608 buffermem += PAGE_SIZE;
1609 buffer_pages[MAP_NR(page)] = bh;
1610 bh->b_this_page = tmp;
1611 while (nblock-- > 0)
1612 brelse(arr[nblock]);
1613 return 4;
1614 not_aligned:
1615 while ((tmp = bh) != NULL) {
1616 bh = bh->b_this_page;
1617 put_unused_buffer_head(tmp);
1618 }
1619 free_page(page);
1620 return 0;
1621 }
1622
1623 unsigned long generate_cluster(dev_t dev, int b[], int size)
1624 {
1625 int i, offset;
1626
1627 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1628 if(i && b[i]-1 != b[i-1]) return 0;
1629 if(find_buffer(dev, b[i], size)) return 0;
1630 };
1631
1632
1633
1634
1635
1636 if(maybe_shrink_lav_buffers(size))
1637 {
1638 int retval;
1639 retval = try_to_generate_cluster(dev, b[0], size);
1640 if(retval) return retval;
1641 };
1642
1643 if (nr_free_pages > min_free_pages*2)
1644 return try_to_generate_cluster(dev, b[0], size);
1645 else
1646 return reassign_cluster(dev, b[0], size);
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 void buffer_init(void)
1660 {
1661 int i;
1662 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1663
1664 if (high_memory >= 4*1024*1024) {
1665 if(high_memory >= 16*1024*1024)
1666 nr_hash = 16381;
1667 else
1668 nr_hash = 4093;
1669 } else {
1670 nr_hash = 997;
1671 };
1672
1673 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1674 sizeof(struct buffer_head *));
1675
1676
1677 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1678 sizeof(struct buffer_head *));
1679 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1680 buffer_pages[i] = NULL;
1681
1682 for (i = 0 ; i < nr_hash ; i++)
1683 hash_table[i] = NULL;
1684 lru_list[BUF_CLEAN] = 0;
1685 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1686 if (!free_list[isize])
1687 panic("VFS: Unable to initialize buffer free list!");
1688 return;
1689 }
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 struct wait_queue * bdflush_wait = NULL;
1702 struct wait_queue * bdflush_done = NULL;
1703
1704 static int bdflush_running = 0;
1705
1706 static void wakeup_bdflush(int wait)
1707 {
1708 if(!bdflush_running){
1709 printk("Warning - bdflush not running\n");
1710 sync_buffers(0,0);
1711 return;
1712 };
1713 wake_up(&bdflush_wait);
1714 if(wait) sleep_on(&bdflush_done);
1715 }
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727 asmlinkage int sync_old_buffers(void)
1728 {
1729 int i, isize;
1730 int ndirty, nwritten;
1731 int nlist;
1732 int ncount;
1733 struct buffer_head * bh, *next;
1734
1735 sync_supers(0);
1736 sync_inodes(0);
1737
1738 ncount = 0;
1739 #ifdef DEBUG
1740 for(nlist = 0; nlist < NR_LIST; nlist++)
1741 #else
1742 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1743 #endif
1744 {
1745 ndirty = 0;
1746 nwritten = 0;
1747 repeat:
1748 bh = lru_list[nlist];
1749 if(bh)
1750 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1751
1752 if(bh->b_list != nlist) goto repeat;
1753 next = bh->b_next_free;
1754 if(!lru_list[nlist]) {
1755 printk("Dirty list empty %d\n", i);
1756 break;
1757 }
1758
1759
1760 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1761 {
1762 refile_buffer(bh);
1763 continue;
1764 }
1765
1766 if (bh->b_lock || !bh->b_dirt)
1767 continue;
1768 ndirty++;
1769 if(bh->b_flushtime > jiffies) continue;
1770 nwritten++;
1771 bh->b_count++;
1772 bh->b_flushtime = 0;
1773 #ifdef DEBUG
1774 if(nlist != BUF_DIRTY) ncount++;
1775 #endif
1776 ll_rw_block(WRITE, 1, &bh);
1777 bh->b_count--;
1778 }
1779 }
1780 #ifdef DEBUG
1781 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1782 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1783 #endif
1784
1785
1786
1787
1788 for(isize = 0; isize<NR_SIZES; isize++){
1789 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1790 buffer_usage[isize] = 0;
1791 };
1792 return 0;
1793 }
1794
1795
1796
1797
1798
1799
1800
1801
1802 asmlinkage int sys_bdflush(int func, long data)
1803 {
1804 int i, error;
1805 int ndirty;
1806 int nlist;
1807 int ncount;
1808 struct buffer_head * bh, *next;
1809
1810 if (!suser())
1811 return -EPERM;
1812
1813 if (func == 1)
1814 return sync_old_buffers();
1815
1816
1817 if (func >= 2) {
1818 i = (func-2) >> 1;
1819 if (i < 0 || i >= N_PARAM)
1820 return -EINVAL;
1821 if((func & 1) == 0) {
1822 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1823 if (error)
1824 return error;
1825 put_user(bdf_prm.data[i], (int*)data);
1826 return 0;
1827 };
1828 if (data < bdflush_min[i] || data > bdflush_max[i])
1829 return -EINVAL;
1830 bdf_prm.data[i] = data;
1831 return 0;
1832 };
1833
1834 if (bdflush_running)
1835 return -EBUSY;
1836 bdflush_running++;
1837
1838
1839
1840 for (;;) {
1841 #ifdef DEBUG
1842 printk("bdflush() activated...");
1843 #endif
1844
1845 ncount = 0;
1846 #ifdef DEBUG
1847 for(nlist = 0; nlist < NR_LIST; nlist++)
1848 #else
1849 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1850 #endif
1851 {
1852 ndirty = 0;
1853 repeat:
1854 bh = lru_list[nlist];
1855 if(bh)
1856 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1857 bh = next) {
1858
1859 if(bh->b_list != nlist) goto repeat;
1860 next = bh->b_next_free;
1861 if(!lru_list[nlist]) {
1862 printk("Dirty list empty %d\n", i);
1863 break;
1864 }
1865
1866
1867 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1868 {
1869 refile_buffer(bh);
1870 continue;
1871 }
1872
1873 if (bh->b_lock || !bh->b_dirt)
1874 continue;
1875
1876
1877 bh->b_count++;
1878 ndirty++;
1879 bh->b_flushtime = 0;
1880 ll_rw_block(WRITE, 1, &bh);
1881 #ifdef DEBUG
1882 if(nlist != BUF_DIRTY) ncount++;
1883 #endif
1884 bh->b_count--;
1885 }
1886 }
1887 #ifdef DEBUG
1888 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1889 printk("sleeping again.\n");
1890 #endif
1891 wake_up(&bdflush_done);
1892
1893
1894
1895
1896 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1897 bdf_prm.b_un.nfract/100) {
1898 if (current->signal & (1 << (SIGKILL-1))) {
1899 bdflush_running--;
1900 return 0;
1901 }
1902 current->signal = 0;
1903 interruptible_sleep_on(&bdflush_wait);
1904 }
1905 }
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924