This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- bwrite_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/string.h>
23 #include <linux/locks.h>
24 #include <linux/errno.h>
25 #include <linux/malloc.h>
26
27 #include <asm/system.h>
28 #include <asm/segment.h>
29 #include <asm/io.h>
30
31 #define NR_SIZES 4
32 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
34
35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
36 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 extern int *blksize_size[];
60
61
62 static void wakeup_bdflush(int);
63
64 #define N_PARAM 9
65 #define LAV
66
67 static union bdflush_param{
68 struct {
69 int nfract;
70
71 int ndirty;
72
73 int nrefill;
74
75 int nref_dirt;
76
77 int clu_nfract;
78
79 int age_buffer;
80
81 int age_super;
82
83 int lav_const;
84
85 int lav_ratio;
86
87
88 } b_un;
89 unsigned int data[N_PARAM];
90 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
91
92
93
94
95
96
97
98 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
100
101
102
103
104
105
106
107
108
109
110 void __wait_on_buffer(struct buffer_head * bh)
111 {
112 struct wait_queue wait = { current, NULL };
113
114 bh->b_count++;
115 add_wait_queue(&bh->b_wait, &wait);
116 repeat:
117 current->state = TASK_UNINTERRUPTIBLE;
118 if (bh->b_lock) {
119 schedule();
120 goto repeat;
121 }
122 remove_wait_queue(&bh->b_wait, &wait);
123 bh->b_count--;
124 current->state = TASK_RUNNING;
125 }
126
127
128
129
130
131
132
133
134
135
136
137 static int sync_buffers(dev_t dev, int wait)
138 {
139 int i, retry, pass = 0, err = 0;
140 int nlist, ncount;
141 struct buffer_head * bh, *next;
142
143
144
145
146
147 repeat:
148 retry = 0;
149 repeat2:
150 ncount = 0;
151
152
153 for(nlist = 0; nlist < NR_LIST; nlist++)
154 {
155 repeat1:
156 bh = lru_list[nlist];
157 if(!bh) continue;
158 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
159 if(bh->b_list != nlist) goto repeat1;
160 next = bh->b_next_free;
161 if(!lru_list[nlist]) break;
162 if (dev && bh->b_dev != dev)
163 continue;
164 if (bh->b_lock)
165 {
166
167
168 if (!wait || !pass) {
169 retry = 1;
170 continue;
171 }
172 wait_on_buffer (bh);
173 goto repeat2;
174 }
175
176
177 if (wait && bh->b_req && !bh->b_lock &&
178 !bh->b_dirt && !bh->b_uptodate) {
179 err = 1;
180 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
181 continue;
182 }
183
184
185 if (!bh->b_dirt || pass>=2)
186 continue;
187
188 if (bh->b_lock)
189 continue;
190 bh->b_count++;
191 bh->b_flushtime = 0;
192 ll_rw_block(WRITE, 1, &bh);
193
194 if(nlist != BUF_DIRTY) {
195 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
196 ncount++;
197 };
198 bh->b_count--;
199 retry = 1;
200 }
201 }
202 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
203
204
205
206
207
208 if (wait && retry && ++pass<=2)
209 goto repeat;
210 return err;
211 }
212
213 void sync_dev(dev_t dev)
214 {
215 sync_buffers(dev, 0);
216 sync_supers(dev);
217 sync_inodes(dev);
218 sync_buffers(dev, 0);
219 }
220
221 int fsync_dev(dev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 return sync_buffers(dev, 1);
227 }
228
229 asmlinkage int sys_sync(void)
230 {
231 sync_dev(0);
232 return 0;
233 }
234
235 int file_fsync (struct inode *inode, struct file *filp)
236 {
237 return fsync_dev(inode->i_dev);
238 }
239
240 asmlinkage int sys_fsync(unsigned int fd)
241 {
242 struct file * file;
243 struct inode * inode;
244
245 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
246 return -EBADF;
247 if (!file->f_op || !file->f_op->fsync)
248 return -EINVAL;
249 if (file->f_op->fsync(inode,file))
250 return -EIO;
251 return 0;
252 }
253
254 void invalidate_buffers(dev_t dev)
255 {
256 int i;
257 int nlist;
258 struct buffer_head * bh;
259
260 for(nlist = 0; nlist < NR_LIST; nlist++) {
261 bh = lru_list[nlist];
262 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
263 if (bh->b_dev != dev)
264 continue;
265 wait_on_buffer(bh);
266 if (bh->b_dev != dev)
267 continue;
268 if (bh->b_count)
269 continue;
270 bh->b_flushtime = bh->b_uptodate =
271 bh->b_dirt = bh->b_req = 0;
272 }
273 }
274 }
275
276 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
277 #define hash(dev,block) hash_table[_hashfn(dev,block)]
278
279 static inline void remove_from_hash_queue(struct buffer_head * bh)
280 {
281 if (bh->b_next)
282 bh->b_next->b_prev = bh->b_prev;
283 if (bh->b_prev)
284 bh->b_prev->b_next = bh->b_next;
285 if (hash(bh->b_dev,bh->b_blocknr) == bh)
286 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
287 bh->b_next = bh->b_prev = NULL;
288 }
289
290 static inline void remove_from_lru_list(struct buffer_head * bh)
291 {
292 if (!(bh->b_prev_free) || !(bh->b_next_free))
293 panic("VFS: LRU block list corrupted");
294 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
295 bh->b_prev_free->b_next_free = bh->b_next_free;
296 bh->b_next_free->b_prev_free = bh->b_prev_free;
297
298 if (lru_list[bh->b_list] == bh)
299 lru_list[bh->b_list] = bh->b_next_free;
300 if(lru_list[bh->b_list] == bh)
301 lru_list[bh->b_list] = NULL;
302 bh->b_next_free = bh->b_prev_free = NULL;
303 }
304
305 static inline void remove_from_free_list(struct buffer_head * bh)
306 {
307 int isize = BUFSIZE_INDEX(bh->b_size);
308 if (!(bh->b_prev_free) || !(bh->b_next_free))
309 panic("VFS: Free block list corrupted");
310 if(bh->b_dev != 0xffff) panic("Free list corrupted");
311 if(!free_list[isize])
312 panic("Free list empty");
313 nr_free[isize]--;
314 if(bh->b_next_free == bh)
315 free_list[isize] = NULL;
316 else {
317 bh->b_prev_free->b_next_free = bh->b_next_free;
318 bh->b_next_free->b_prev_free = bh->b_prev_free;
319 if (free_list[isize] == bh)
320 free_list[isize] = bh->b_next_free;
321 };
322 bh->b_next_free = bh->b_prev_free = NULL;
323 }
324
325 static inline void remove_from_queues(struct buffer_head * bh)
326 {
327 if(bh->b_dev == 0xffff) {
328 remove_from_free_list(bh);
329
330 return;
331 };
332 nr_buffers_type[bh->b_list]--;
333 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
334 remove_from_hash_queue(bh);
335 remove_from_lru_list(bh);
336 }
337
338 static inline void put_last_lru(struct buffer_head * bh)
339 {
340 if (!bh)
341 return;
342 if (bh == lru_list[bh->b_list]) {
343 lru_list[bh->b_list] = bh->b_next_free;
344 return;
345 }
346 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
347 remove_from_lru_list(bh);
348
349
350 if(!lru_list[bh->b_list]) {
351 lru_list[bh->b_list] = bh;
352 lru_list[bh->b_list]->b_prev_free = bh;
353 };
354
355 bh->b_next_free = lru_list[bh->b_list];
356 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
357 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
358 lru_list[bh->b_list]->b_prev_free = bh;
359 }
360
361 static inline void put_last_free(struct buffer_head * bh)
362 {
363 int isize;
364 if (!bh)
365 return;
366
367 isize = BUFSIZE_INDEX(bh->b_size);
368 bh->b_dev = 0xffff;
369
370
371 if(!free_list[isize]) {
372 free_list[isize] = bh;
373 bh->b_prev_free = bh;
374 };
375
376 nr_free[isize]++;
377 bh->b_next_free = free_list[isize];
378 bh->b_prev_free = free_list[isize]->b_prev_free;
379 free_list[isize]->b_prev_free->b_next_free = bh;
380 free_list[isize]->b_prev_free = bh;
381 }
382
383 static inline void insert_into_queues(struct buffer_head * bh)
384 {
385
386
387 if(bh->b_dev == 0xffff) {
388 put_last_free(bh);
389 return;
390 };
391 if(!lru_list[bh->b_list]) {
392 lru_list[bh->b_list] = bh;
393 bh->b_prev_free = bh;
394 };
395 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
396 bh->b_next_free = lru_list[bh->b_list];
397 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
398 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
399 lru_list[bh->b_list]->b_prev_free = bh;
400 nr_buffers_type[bh->b_list]++;
401 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
402
403 bh->b_prev = NULL;
404 bh->b_next = NULL;
405 if (!bh->b_dev)
406 return;
407 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
408 hash(bh->b_dev,bh->b_blocknr) = bh;
409 if (bh->b_next)
410 bh->b_next->b_prev = bh;
411 }
412
413 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
414 {
415 struct buffer_head * tmp;
416
417 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
418 if (tmp->b_dev==dev && tmp->b_blocknr==block)
419 if (tmp->b_size == size)
420 return tmp;
421 else {
422 printk("VFS: Wrong blocksize on device %d/%d\n",
423 MAJOR(dev), MINOR(dev));
424 return NULL;
425 }
426 return NULL;
427 }
428
429
430
431
432
433
434
435
436 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
437 {
438 struct buffer_head * bh;
439
440 for (;;) {
441 if (!(bh=find_buffer(dev,block,size)))
442 return NULL;
443 bh->b_count++;
444 wait_on_buffer(bh);
445 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
446 return bh;
447 bh->b_count--;
448 }
449 }
450
451 void set_blocksize(dev_t dev, int size)
452 {
453 int i, nlist;
454 struct buffer_head * bh, *bhnext;
455
456 if (!blksize_size[MAJOR(dev)])
457 return;
458
459 switch(size) {
460 default: panic("Invalid blocksize passed to set_blocksize");
461 case 512: case 1024: case 2048: case 4096:;
462 }
463
464 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
465 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
466 return;
467 }
468 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
469 return;
470 sync_buffers(dev, 2);
471 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
472
473
474
475
476 for(nlist = 0; nlist < NR_LIST; nlist++) {
477 bh = lru_list[nlist];
478 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
479 if(!bh) break;
480 bhnext = bh->b_next_free;
481 if (bh->b_dev != dev)
482 continue;
483 if (bh->b_size == size)
484 continue;
485
486 wait_on_buffer(bh);
487 if (bh->b_dev == dev && bh->b_size != size) {
488 bh->b_uptodate = bh->b_dirt = bh->b_req =
489 bh->b_flushtime = 0;
490 };
491 remove_from_hash_queue(bh);
492 }
493 }
494 }
495
496 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
497
498 void refill_freelist(int size)
499 {
500 struct buffer_head * bh, * tmp;
501 struct buffer_head * candidate[NR_LIST];
502 unsigned int best_time, winner;
503 int isize = BUFSIZE_INDEX(size);
504 int buffers[NR_LIST];
505 int i;
506 int needed;
507
508
509
510
511
512 if (nr_free[isize] > 100)
513 return;
514
515
516
517
518
519
520 needed =bdf_prm.b_un.nrefill * size;
521
522 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
523 grow_buffers(GFP_BUFFER, size)) {
524 needed -= PAGE_SIZE;
525 }
526
527 if(needed <= 0) return;
528
529
530
531
532 while(maybe_shrink_lav_buffers(size))
533 {
534 if(!grow_buffers(GFP_BUFFER, size)) break;
535 needed -= PAGE_SIZE;
536 if(needed <= 0) return;
537 };
538
539
540
541
542
543
544
545 repeat0:
546 for(i=0; i<NR_LIST; i++){
547 if(i == BUF_DIRTY || i == BUF_SHARED ||
548 nr_buffers_type[i] == 0) {
549 candidate[i] = NULL;
550 buffers[i] = 0;
551 continue;
552 }
553 buffers[i] = nr_buffers_type[i];
554 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
555 {
556 if(buffers[i] < 0) panic("Here is the problem");
557 tmp = bh->b_next_free;
558 if (!bh) break;
559
560 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
561 bh->b_dirt) {
562 refile_buffer(bh);
563 continue;
564 };
565
566 if (bh->b_count || bh->b_size != size)
567 continue;
568
569
570
571
572
573 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
574 buffers[i] = 0;
575 break;
576 }
577
578 if (BADNESS(bh)) continue;
579 break;
580 };
581 if(!buffers[i]) candidate[i] = NULL;
582 else candidate[i] = bh;
583 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
584 }
585
586 repeat:
587 if(needed <= 0) return;
588
589
590
591 winner = best_time = UINT_MAX;
592 for(i=0; i<NR_LIST; i++){
593 if(!candidate[i]) continue;
594 if(candidate[i]->b_lru_time < best_time){
595 best_time = candidate[i]->b_lru_time;
596 winner = i;
597 }
598 }
599
600
601 if(winner != UINT_MAX) {
602 i = winner;
603 bh = candidate[i];
604 candidate[i] = bh->b_next_free;
605 if(candidate[i] == bh) candidate[i] = NULL;
606 if (bh->b_count || bh->b_size != size)
607 panic("Busy buffer in candidate list\n");
608 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
609 panic("Shared buffer in candidate list\n");
610 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
611
612 if(bh->b_dev == 0xffff) panic("Wrong list");
613 remove_from_queues(bh);
614 bh->b_dev = 0xffff;
615 put_last_free(bh);
616 needed -= bh->b_size;
617 buffers[i]--;
618 if(buffers[i] < 0) panic("Here is the problem");
619
620 if(buffers[i] == 0) candidate[i] = NULL;
621
622
623
624 if(candidate[i] && buffers[i] > 0){
625 if(buffers[i] <= 0) panic("Here is another problem");
626 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
627 if(buffers[i] < 0) panic("Here is the problem");
628 tmp = bh->b_next_free;
629 if (!bh) break;
630
631 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
632 bh->b_dirt) {
633 refile_buffer(bh);
634 continue;
635 };
636
637 if (bh->b_count || bh->b_size != size)
638 continue;
639
640
641
642
643
644 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
645 buffers[i] = 0;
646 break;
647 }
648
649 if (BADNESS(bh)) continue;
650 break;
651 };
652 if(!buffers[i]) candidate[i] = NULL;
653 else candidate[i] = bh;
654 if(candidate[i] && candidate[i]->b_count)
655 panic("Here is the problem");
656 }
657
658 goto repeat;
659 }
660
661 if(needed <= 0) return;
662
663
664
665 if (nr_free_pages > 5) {
666 if (grow_buffers(GFP_BUFFER, size)) {
667 needed -= PAGE_SIZE;
668 goto repeat0;
669 };
670 }
671
672
673 if (!grow_buffers(GFP_ATOMIC, size))
674 wakeup_bdflush(1);
675 needed -= PAGE_SIZE;
676 goto repeat0;
677 }
678
679
680
681
682
683
684
685
686
687
688
689 struct buffer_head * getblk(dev_t dev, int block, int size)
690 {
691 struct buffer_head * bh;
692 int isize = BUFSIZE_INDEX(size);
693
694
695 buffer_usage[isize]++;
696
697
698
699
700 repeat:
701 bh = get_hash_table(dev, block, size);
702 if (bh) {
703 if (bh->b_uptodate && !bh->b_dirt)
704 put_last_lru(bh);
705 if(!bh->b_dirt) bh->b_flushtime = 0;
706 return bh;
707 }
708
709 while(!free_list[isize]) refill_freelist(size);
710
711 if (find_buffer(dev,block,size))
712 goto repeat;
713
714 bh = free_list[isize];
715 remove_from_free_list(bh);
716
717
718
719 bh->b_count=1;
720 bh->b_dirt=0;
721 bh->b_lock=0;
722 bh->b_uptodate=0;
723 bh->b_flushtime=0;
724 bh->b_req=0;
725 bh->b_reuse=0;
726 bh->b_dev=dev;
727 bh->b_blocknr=block;
728 insert_into_queues(bh);
729 return bh;
730 }
731
732 void set_writetime(struct buffer_head * buf, int flag)
733 {
734 int newtime;
735
736 if (buf->b_dirt){
737
738 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
739 bdf_prm.b_un.age_buffer);
740 if(!buf->b_flushtime || buf->b_flushtime > newtime)
741 buf->b_flushtime = newtime;
742 } else {
743 buf->b_flushtime = 0;
744 }
745 }
746
747
748 void refile_buffer(struct buffer_head * buf){
749 int dispose;
750 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
751 if (buf->b_dirt)
752 dispose = BUF_DIRTY;
753 else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
754 dispose = BUF_SHARED;
755 else if (buf->b_lock)
756 dispose = BUF_LOCKED;
757 else if (buf->b_list == BUF_SHARED)
758 dispose = BUF_UNSHARED;
759 else
760 dispose = BUF_CLEAN;
761 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
762 if(dispose != buf->b_list) {
763 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
764 buf->b_lru_time = jiffies;
765 if(dispose == BUF_LOCKED &&
766 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
767 dispose = BUF_LOCKED1;
768 remove_from_queues(buf);
769 buf->b_list = dispose;
770 insert_into_queues(buf);
771 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
772 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
773 bdf_prm.b_un.nfract/100)
774 wakeup_bdflush(0);
775 }
776 }
777
778 void brelse(struct buffer_head * buf)
779 {
780 if (!buf)
781 return;
782 wait_on_buffer(buf);
783
784
785 set_writetime(buf, 0);
786 refile_buffer(buf);
787
788 if (buf->b_count) {
789 if (--buf->b_count)
790 return;
791 wake_up(&buffer_wait);
792 if (buf->b_reuse) {
793 if (!buf->b_lock && !buf->b_dirt && !buf->b_wait) {
794 buf->b_reuse = 0;
795 if(buf->b_dev == 0xffff) panic("brelse: Wrong list");
796 remove_from_queues(buf);
797 buf->b_dev = 0xffff;
798 put_last_free(buf);
799 }
800 }
801 return;
802 }
803 printk("VFS: brelse: Trying to free free buffer\n");
804 }
805
806
807
808
809
810 struct buffer_head * bread(dev_t dev, int block, int size)
811 {
812 struct buffer_head * bh;
813
814 if (!(bh = getblk(dev, block, size))) {
815 printk("VFS: bread: READ error on device %d/%d\n",
816 MAJOR(dev), MINOR(dev));
817 return NULL;
818 }
819 if (bh->b_uptodate)
820 return bh;
821 ll_rw_block(READ, 1, &bh);
822 wait_on_buffer(bh);
823 if (bh->b_uptodate)
824 return bh;
825 brelse(bh);
826 return NULL;
827 }
828
829
830
831
832
833
834
835 #define NBUF 16
836
837 struct buffer_head * breada(dev_t dev, int block, int bufsize,
838 unsigned int pos, unsigned int filesize)
839 {
840 struct buffer_head * bhlist[NBUF];
841 unsigned int blocks;
842 struct buffer_head * bh;
843 int index;
844 int i, j;
845
846 if (pos >= filesize)
847 return NULL;
848
849 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
850 return NULL;
851
852 index = BUFSIZE_INDEX(bh->b_size);
853
854 if (bh->b_uptodate)
855 return bh;
856
857 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
858
859 if (blocks > (read_ahead[MAJOR(dev)] >> index))
860 blocks = read_ahead[MAJOR(dev)] >> index;
861 if (blocks > NBUF)
862 blocks = NBUF;
863
864 bhlist[0] = bh;
865 j = 1;
866 for(i=1; i<blocks; i++) {
867 bh = getblk(dev,block+i,bufsize);
868 if (bh->b_uptodate) {
869 brelse(bh);
870 break;
871 }
872 bhlist[j++] = bh;
873 }
874
875
876 ll_rw_block(READ, j, bhlist);
877
878 for(i=1; i<j; i++)
879 brelse(bhlist[i]);
880
881
882 bh = bhlist[0];
883 wait_on_buffer(bh);
884 if (bh->b_uptodate)
885 return bh;
886 brelse(bh);
887 return NULL;
888 }
889
890
891
892
893 static void put_unused_buffer_head(struct buffer_head * bh)
894 {
895 struct wait_queue * wait;
896
897 wait = ((volatile struct buffer_head *) bh)->b_wait;
898 memset(bh,0,sizeof(*bh));
899 ((volatile struct buffer_head *) bh)->b_wait = wait;
900 bh->b_next_free = unused_list;
901 unused_list = bh;
902 }
903
904 static void get_more_buffer_heads(void)
905 {
906 int i;
907 struct buffer_head * bh;
908
909 if (unused_list)
910 return;
911
912 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
913 return;
914
915 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
916 bh->b_next_free = unused_list;
917 unused_list = bh++;
918 }
919 }
920
921 static struct buffer_head * get_unused_buffer_head(void)
922 {
923 struct buffer_head * bh;
924
925 get_more_buffer_heads();
926 if (!unused_list)
927 return NULL;
928 bh = unused_list;
929 unused_list = bh->b_next_free;
930 bh->b_next_free = NULL;
931 bh->b_data = NULL;
932 bh->b_size = 0;
933 bh->b_req = 0;
934 return bh;
935 }
936
937
938
939
940
941
942
943 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
944 {
945 struct buffer_head *bh, *head;
946 unsigned long offset;
947
948 head = NULL;
949 offset = PAGE_SIZE;
950 while ((offset -= size) < PAGE_SIZE) {
951 bh = get_unused_buffer_head();
952 if (!bh)
953 goto no_grow;
954 bh->b_this_page = head;
955 head = bh;
956 bh->b_data = (char *) (page+offset);
957 bh->b_size = size;
958 bh->b_dev = 0xffff;
959 }
960 return head;
961
962
963
964 no_grow:
965 bh = head;
966 while (bh) {
967 head = bh;
968 bh = bh->b_this_page;
969 put_unused_buffer_head(head);
970 }
971 return NULL;
972 }
973
974 static void read_buffers(struct buffer_head * bh[], int nrbuf)
975 {
976 int i;
977 int bhnum = 0;
978 struct buffer_head * bhr[MAX_BUF_PER_PAGE];
979
980 for (i = 0 ; i < nrbuf ; i++) {
981 if (bh[i] && !bh[i]->b_uptodate)
982 bhr[bhnum++] = bh[i];
983 }
984 if (bhnum)
985 ll_rw_block(READ, bhnum, bhr);
986 for (i = nrbuf ; --i >= 0 ; ) {
987 if (bh[i]) {
988 wait_on_buffer(bh[i]);
989 }
990 }
991 }
992
993
994
995
996
997
998
999
1000
1001 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
1002 unsigned long address)
1003 {
1004 while (nrbuf-- > 0)
1005 brelse(bh[nrbuf]);
1006 return 0;
1007 }
1008
1009 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1010 dev_t dev, int *b, int size)
1011 {
1012 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1013 unsigned long page;
1014 unsigned long offset;
1015 int block;
1016 int nrbuf;
1017 int aligned = 1;
1018
1019 bh[0] = first;
1020 nrbuf = 1;
1021 page = (unsigned long) first->b_data;
1022 if (page & ~PAGE_MASK)
1023 aligned = 0;
1024 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1025 block = *++b;
1026 if (!block)
1027 goto no_go;
1028 first = get_hash_table(dev, block, size);
1029 if (!first)
1030 goto no_go;
1031 bh[nrbuf++] = first;
1032 if (page+offset != (unsigned long) first->b_data)
1033 aligned = 0;
1034 }
1035 if (!aligned)
1036 return try_to_align(bh, nrbuf, address);
1037 mem_map[MAP_NR(page)]++;
1038 read_buffers(bh,nrbuf);
1039 while (nrbuf-- > 0)
1040 brelse(bh[nrbuf]);
1041 free_page(address);
1042 ++current->mm->min_flt;
1043 return page;
1044 no_go:
1045 while (nrbuf-- > 0)
1046 brelse(bh[nrbuf]);
1047 return 0;
1048 }
1049
1050 static unsigned long try_to_load_aligned(unsigned long address,
1051 dev_t dev, int b[], int size)
1052 {
1053 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1054 unsigned long offset;
1055 int isize = BUFSIZE_INDEX(size);
1056 int * p;
1057 int block;
1058
1059 bh = create_buffers(address, size);
1060 if (!bh)
1061 return 0;
1062
1063 p = b;
1064 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1065 block = *(p++);
1066 if (!block)
1067 goto not_aligned;
1068 if (find_buffer(dev, block, size))
1069 goto not_aligned;
1070 }
1071 tmp = bh;
1072 p = b;
1073 block = 0;
1074 while (1) {
1075 arr[block++] = bh;
1076 bh->b_count = 1;
1077 bh->b_dirt = 0;
1078 bh->b_flushtime = 0;
1079 bh->b_uptodate = 0;
1080 bh->b_req = 0;
1081 bh->b_dev = dev;
1082 bh->b_blocknr = *(p++);
1083 bh->b_list = BUF_CLEAN;
1084 nr_buffers++;
1085 nr_buffers_size[isize]++;
1086 insert_into_queues(bh);
1087 if (bh->b_this_page)
1088 bh = bh->b_this_page;
1089 else
1090 break;
1091 }
1092 buffermem += PAGE_SIZE;
1093 bh->b_this_page = tmp;
1094 mem_map[MAP_NR(address)]++;
1095 buffer_pages[MAP_NR(address)] = bh;
1096 read_buffers(arr,block);
1097 while (block-- > 0)
1098 brelse(arr[block]);
1099 ++current->mm->maj_flt;
1100 return address;
1101 not_aligned:
1102 while ((tmp = bh) != NULL) {
1103 bh = bh->b_this_page;
1104 put_unused_buffer_head(tmp);
1105 }
1106 return 0;
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 static inline unsigned long try_to_share_buffers(unsigned long address,
1121 dev_t dev, int *b, int size)
1122 {
1123 struct buffer_head * bh;
1124 int block;
1125
1126 block = b[0];
1127 if (!block)
1128 return 0;
1129 bh = get_hash_table(dev, block, size);
1130 if (bh)
1131 return check_aligned(bh, address, dev, b, size);
1132 return try_to_load_aligned(address, dev, b, size);
1133 }
1134
1135
1136
1137
1138
1139
1140
1141
1142 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1143 {
1144 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1145 unsigned long where;
1146 int i, j;
1147
1148 if (!no_share) {
1149 where = try_to_share_buffers(address, dev, b, size);
1150 if (where)
1151 return where;
1152 }
1153 ++current->mm->maj_flt;
1154 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1155 bh[i] = NULL;
1156 if (b[i])
1157 bh[i] = getblk(dev, b[i], size);
1158 }
1159 read_buffers(bh,i);
1160 where = address;
1161 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1162 if (bh[i]) {
1163 if (bh[i]->b_uptodate)
1164 memcpy((void *) where, bh[i]->b_data, size);
1165 brelse(bh[i]);
1166 } else
1167 memset((void *) where, 0, size);
1168 }
1169 return address;
1170 }
1171
1172
1173
1174
1175
1176 void bwrite_page(unsigned long address, dev_t dev, int b[], int size)
1177 {
1178 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1179 int i, j;
1180
1181 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1182 bh[i] = NULL;
1183 if (b[i])
1184 bh[i] = getblk(dev, b[i], size);
1185 }
1186 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, address += size) {
1187 if (bh[i]) {
1188 memcpy(bh[i]->b_data, (void *) address, size);
1189 bh[i]->b_uptodate = 1;
1190 mark_buffer_dirty(bh[i], 0);
1191 brelse(bh[i]);
1192 } else
1193 memset((void *) address, 0, size);
1194 }
1195 }
1196
1197
1198
1199
1200
1201 static int grow_buffers(int pri, int size)
1202 {
1203 unsigned long page;
1204 struct buffer_head *bh, *tmp;
1205 struct buffer_head * insert_point;
1206 int isize;
1207
1208 if ((size & 511) || (size > PAGE_SIZE)) {
1209 printk("VFS: grow_buffers: size = %d\n",size);
1210 return 0;
1211 }
1212
1213 isize = BUFSIZE_INDEX(size);
1214
1215 if (!(page = __get_free_page(pri)))
1216 return 0;
1217 bh = create_buffers(page, size);
1218 if (!bh) {
1219 free_page(page);
1220 return 0;
1221 }
1222
1223 insert_point = free_list[isize];
1224
1225 tmp = bh;
1226 while (1) {
1227 nr_free[isize]++;
1228 if (insert_point) {
1229 tmp->b_next_free = insert_point->b_next_free;
1230 tmp->b_prev_free = insert_point;
1231 insert_point->b_next_free->b_prev_free = tmp;
1232 insert_point->b_next_free = tmp;
1233 } else {
1234 tmp->b_prev_free = tmp;
1235 tmp->b_next_free = tmp;
1236 }
1237 insert_point = tmp;
1238 ++nr_buffers;
1239 if (tmp->b_this_page)
1240 tmp = tmp->b_this_page;
1241 else
1242 break;
1243 }
1244 free_list[isize] = bh;
1245 buffer_pages[MAP_NR(page)] = bh;
1246 tmp->b_this_page = bh;
1247 wake_up(&buffer_wait);
1248 buffermem += PAGE_SIZE;
1249 return 1;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1260 {
1261 unsigned long page;
1262 struct buffer_head * tmp, * p;
1263 int isize = BUFSIZE_INDEX(bh->b_size);
1264
1265 *bhp = bh;
1266 page = (unsigned long) bh->b_data;
1267 page &= PAGE_MASK;
1268 tmp = bh;
1269 do {
1270 if (!tmp)
1271 return 0;
1272 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1273 return 0;
1274 tmp = tmp->b_this_page;
1275 } while (tmp != bh);
1276 tmp = bh;
1277 do {
1278 p = tmp;
1279 tmp = tmp->b_this_page;
1280 nr_buffers--;
1281 nr_buffers_size[isize]--;
1282 if (p == *bhp)
1283 {
1284 *bhp = p->b_prev_free;
1285 if (p == *bhp)
1286 *bhp = NULL;
1287 }
1288 remove_from_queues(p);
1289 put_unused_buffer_head(p);
1290 } while (tmp != bh);
1291 buffermem -= PAGE_SIZE;
1292 buffer_pages[MAP_NR(page)] = NULL;
1293 free_page(page);
1294 return !mem_map[MAP_NR(page)];
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 static int maybe_shrink_lav_buffers(int size)
1311 {
1312 int nlist;
1313 int isize;
1314 int total_lav, total_n_buffers, n_sizes;
1315
1316
1317
1318
1319
1320
1321 total_lav = total_n_buffers = n_sizes = 0;
1322 for(nlist = 0; nlist < NR_SIZES; nlist++)
1323 {
1324 total_lav += buffers_lav[nlist];
1325 if(nr_buffers_size[nlist]) n_sizes++;
1326 total_n_buffers += nr_buffers_size[nlist];
1327 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1328 }
1329
1330
1331
1332
1333 isize = (size ? BUFSIZE_INDEX(size) : -1);
1334
1335 if (n_sizes > 1)
1336 for(nlist = 0; nlist < NR_SIZES; nlist++)
1337 {
1338 if(nlist == isize) continue;
1339 if(nr_buffers_size[nlist] &&
1340 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1341 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1342 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1343 return 1;
1344 }
1345 return 0;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 int shrink_buffers(unsigned int priority, unsigned long limit)
1359 {
1360 if (priority < 2) {
1361 sync_buffers(0,0);
1362 }
1363
1364 if(priority == 2) wakeup_bdflush(1);
1365
1366 if(maybe_shrink_lav_buffers(0)) return 1;
1367
1368
1369 return shrink_specific_buffers(priority, 0);
1370 }
1371
1372 static int shrink_specific_buffers(unsigned int priority, int size)
1373 {
1374 struct buffer_head *bh;
1375 int nlist;
1376 int i, isize, isize1;
1377
1378 #ifdef DEBUG
1379 if(size) printk("Shrinking buffers of size %d\n", size);
1380 #endif
1381
1382
1383 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1384
1385 for(isize = 0; isize<NR_SIZES; isize++){
1386 if(isize1 != -1 && isize1 != isize) continue;
1387 bh = free_list[isize];
1388 if(!bh) continue;
1389 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1390 if (bh->b_count || !bh->b_this_page)
1391 continue;
1392 if (try_to_free(bh, &bh))
1393 return 1;
1394 if(!bh) break;
1395
1396 }
1397 }
1398
1399
1400
1401 for(nlist = 0; nlist < NR_LIST; nlist++) {
1402 repeat1:
1403 if(priority > 3 && nlist == BUF_SHARED) continue;
1404 bh = lru_list[nlist];
1405 if(!bh) continue;
1406 i = 2*nr_buffers_type[nlist] >> priority;
1407 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1408
1409 if(bh->b_list != nlist) goto repeat1;
1410 if (bh->b_count || !bh->b_this_page)
1411 continue;
1412 if(size && bh->b_size != size) continue;
1413 if (bh->b_lock)
1414 if (priority)
1415 continue;
1416 else
1417 wait_on_buffer(bh);
1418 if (bh->b_dirt) {
1419 bh->b_count++;
1420 bh->b_flushtime = 0;
1421 ll_rw_block(WRITEA, 1, &bh);
1422 bh->b_count--;
1423 continue;
1424 }
1425 if (try_to_free(bh, &bh))
1426 return 1;
1427 if(!bh) break;
1428 }
1429 }
1430 return 0;
1431 }
1432
1433
1434
1435
1436 void show_buffers(void)
1437 {
1438 struct buffer_head * bh;
1439 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1440 int shared;
1441 int nlist, isize;
1442
1443 printk("Buffer memory: %6dkB\n",buffermem>>10);
1444 printk("Buffer heads: %6d\n",nr_buffer_heads);
1445 printk("Buffer blocks: %6d\n",nr_buffers);
1446
1447 for(nlist = 0; nlist < NR_LIST; nlist++) {
1448 shared = found = locked = dirty = used = lastused = 0;
1449 bh = lru_list[nlist];
1450 if(!bh) continue;
1451 do {
1452 found++;
1453 if (bh->b_lock)
1454 locked++;
1455 if (bh->b_dirt)
1456 dirty++;
1457 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1458 if (bh->b_count)
1459 used++, lastused = found;
1460 bh = bh->b_next_free;
1461 } while (bh != lru_list[nlist]);
1462 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1463 nlist, found, used, lastused, locked, dirty, shared);
1464 };
1465 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1466 for(isize = 0; isize<NR_SIZES; isize++){
1467 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1468 buffers_lav[isize], nr_free[isize]);
1469 for(nlist = 0; nlist < NR_LIST; nlist++)
1470 printk("%7d ", nr_buffers_st[isize][nlist]);
1471 printk("\n");
1472 }
1473 }
1474
1475
1476
1477
1478
1479
1480
1481
1482 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1483 dev_t dev, unsigned int starting_block)
1484 {
1485 unsigned long page;
1486 struct buffer_head * tmp, * p;
1487
1488 *bhp = bh;
1489 page = (unsigned long) bh->b_data;
1490 page &= PAGE_MASK;
1491 if(mem_map[MAP_NR(page)] != 1) return 0;
1492 tmp = bh;
1493 do {
1494 if (!tmp)
1495 return 0;
1496
1497 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1498 return 0;
1499 tmp = tmp->b_this_page;
1500 } while (tmp != bh);
1501 tmp = bh;
1502
1503 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1504 tmp = tmp->b_this_page;
1505
1506
1507 bh = tmp;
1508 do {
1509 p = tmp;
1510 tmp = tmp->b_this_page;
1511 remove_from_queues(p);
1512 p->b_dev=dev;
1513 p->b_uptodate = 0;
1514 p->b_req = 0;
1515 p->b_blocknr=starting_block++;
1516 insert_into_queues(p);
1517 } while (tmp != bh);
1518 return 1;
1519 }
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 static int reassign_cluster(dev_t dev,
1536 unsigned int starting_block, int size)
1537 {
1538 struct buffer_head *bh;
1539 int isize = BUFSIZE_INDEX(size);
1540 int i;
1541
1542
1543
1544
1545
1546 while(nr_free[isize] < 32) refill_freelist(size);
1547
1548 bh = free_list[isize];
1549 if(bh)
1550 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1551 if (!bh->b_this_page) continue;
1552 if (try_to_reassign(bh, &bh, dev, starting_block))
1553 return 4;
1554 }
1555 return 0;
1556 }
1557
1558
1559
1560
1561
1562 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1563 {
1564 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1565 int isize = BUFSIZE_INDEX(size);
1566 unsigned long offset;
1567 unsigned long page;
1568 int nblock;
1569
1570 page = get_free_page(GFP_NOBUFFER);
1571 if(!page) return 0;
1572
1573 bh = create_buffers(page, size);
1574 if (!bh) {
1575 free_page(page);
1576 return 0;
1577 };
1578 nblock = block;
1579 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1580 if (find_buffer(dev, nblock++, size))
1581 goto not_aligned;
1582 }
1583 tmp = bh;
1584 nblock = 0;
1585 while (1) {
1586 arr[nblock++] = bh;
1587 bh->b_count = 1;
1588 bh->b_dirt = 0;
1589 bh->b_flushtime = 0;
1590 bh->b_lock = 0;
1591 bh->b_uptodate = 0;
1592 bh->b_req = 0;
1593 bh->b_dev = dev;
1594 bh->b_list = BUF_CLEAN;
1595 bh->b_blocknr = block++;
1596 nr_buffers++;
1597 nr_buffers_size[isize]++;
1598 insert_into_queues(bh);
1599 if (bh->b_this_page)
1600 bh = bh->b_this_page;
1601 else
1602 break;
1603 }
1604 buffermem += PAGE_SIZE;
1605 buffer_pages[MAP_NR(page)] = bh;
1606 bh->b_this_page = tmp;
1607 while (nblock-- > 0)
1608 brelse(arr[nblock]);
1609 return 4;
1610 not_aligned:
1611 while ((tmp = bh) != NULL) {
1612 bh = bh->b_this_page;
1613 put_unused_buffer_head(tmp);
1614 }
1615 free_page(page);
1616 return 0;
1617 }
1618
1619 unsigned long generate_cluster(dev_t dev, int b[], int size)
1620 {
1621 int i, offset;
1622
1623 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1624 if(i && b[i]-1 != b[i-1]) return 0;
1625 if(find_buffer(dev, b[i], size)) return 0;
1626 };
1627
1628
1629
1630
1631
1632 if(maybe_shrink_lav_buffers(size))
1633 {
1634 int retval;
1635 retval = try_to_generate_cluster(dev, b[0], size);
1636 if(retval) return retval;
1637 };
1638
1639 if (nr_free_pages > min_free_pages*2)
1640 return try_to_generate_cluster(dev, b[0], size);
1641 else
1642 return reassign_cluster(dev, b[0], size);
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 void buffer_init(void)
1656 {
1657 int i;
1658 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1659
1660 if (high_memory >= 4*1024*1024) {
1661 if(high_memory >= 16*1024*1024)
1662 nr_hash = 16381;
1663 else
1664 nr_hash = 4093;
1665 } else {
1666 nr_hash = 997;
1667 };
1668
1669 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1670 sizeof(struct buffer_head *));
1671
1672
1673 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1674 sizeof(struct buffer_head *));
1675 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1676 buffer_pages[i] = NULL;
1677
1678 for (i = 0 ; i < nr_hash ; i++)
1679 hash_table[i] = NULL;
1680 lru_list[BUF_CLEAN] = 0;
1681 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1682 if (!free_list[isize])
1683 panic("VFS: Unable to initialize buffer free list!");
1684 return;
1685 }
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 struct wait_queue * bdflush_wait = NULL;
1698 struct wait_queue * bdflush_done = NULL;
1699
1700 static int bdflush_running = 0;
1701
1702 static void wakeup_bdflush(int wait)
1703 {
1704 if(!bdflush_running){
1705 printk("Warning - bdflush not running\n");
1706 sync_buffers(0,0);
1707 return;
1708 };
1709 wake_up(&bdflush_wait);
1710 if(wait) sleep_on(&bdflush_done);
1711 }
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723 asmlinkage int sync_old_buffers(void)
1724 {
1725 int i, isize;
1726 int ndirty, nwritten;
1727 int nlist;
1728 int ncount;
1729 struct buffer_head * bh, *next;
1730
1731 sync_supers(0);
1732 sync_inodes(0);
1733
1734 ncount = 0;
1735 #ifdef DEBUG
1736 for(nlist = 0; nlist < NR_LIST; nlist++)
1737 #else
1738 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1739 #endif
1740 {
1741 ndirty = 0;
1742 nwritten = 0;
1743 repeat:
1744 bh = lru_list[nlist];
1745 if(bh)
1746 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1747
1748 if(bh->b_list != nlist) goto repeat;
1749 next = bh->b_next_free;
1750 if(!lru_list[nlist]) {
1751 printk("Dirty list empty %d\n", i);
1752 break;
1753 }
1754
1755
1756 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1757 {
1758 refile_buffer(bh);
1759 continue;
1760 }
1761
1762 if (bh->b_lock || !bh->b_dirt)
1763 continue;
1764 ndirty++;
1765 if(bh->b_flushtime > jiffies) continue;
1766 nwritten++;
1767 bh->b_count++;
1768 bh->b_flushtime = 0;
1769 #ifdef DEBUG
1770 if(nlist != BUF_DIRTY) ncount++;
1771 #endif
1772 ll_rw_block(WRITE, 1, &bh);
1773 bh->b_count--;
1774 }
1775 }
1776 #ifdef DEBUG
1777 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1778 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1779 #endif
1780
1781
1782
1783
1784 for(isize = 0; isize<NR_SIZES; isize++){
1785 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1786 buffer_usage[isize] = 0;
1787 };
1788 return 0;
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798 asmlinkage int sys_bdflush(int func, long data)
1799 {
1800 int i, error;
1801 int ndirty;
1802 int nlist;
1803 int ncount;
1804 struct buffer_head * bh, *next;
1805
1806 if (!suser())
1807 return -EPERM;
1808
1809 if (func == 1)
1810 return sync_old_buffers();
1811
1812
1813 if (func >= 2) {
1814 i = (func-2) >> 1;
1815 if (i < 0 || i >= N_PARAM)
1816 return -EINVAL;
1817 if((func & 1) == 0) {
1818 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1819 if (error)
1820 return error;
1821 put_user(bdf_prm.data[i], (int*)data);
1822 return 0;
1823 };
1824 if (data < bdflush_min[i] || data > bdflush_max[i])
1825 return -EINVAL;
1826 bdf_prm.data[i] = data;
1827 return 0;
1828 };
1829
1830 if (bdflush_running)
1831 return -EBUSY;
1832 bdflush_running++;
1833
1834
1835
1836 for (;;) {
1837 #ifdef DEBUG
1838 printk("bdflush() activated...");
1839 #endif
1840
1841 ncount = 0;
1842 #ifdef DEBUG
1843 for(nlist = 0; nlist < NR_LIST; nlist++)
1844 #else
1845 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1846 #endif
1847 {
1848 ndirty = 0;
1849 repeat:
1850 bh = lru_list[nlist];
1851 if(bh)
1852 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1853 bh = next) {
1854
1855 if(bh->b_list != nlist) goto repeat;
1856 next = bh->b_next_free;
1857 if(!lru_list[nlist]) {
1858 printk("Dirty list empty %d\n", i);
1859 break;
1860 }
1861
1862
1863 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1864 {
1865 refile_buffer(bh);
1866 continue;
1867 }
1868
1869 if (bh->b_lock || !bh->b_dirt)
1870 continue;
1871
1872
1873 bh->b_count++;
1874 ndirty++;
1875 bh->b_flushtime = 0;
1876 ll_rw_block(WRITE, 1, &bh);
1877 #ifdef DEBUG
1878 if(nlist != BUF_DIRTY) ncount++;
1879 #endif
1880 bh->b_count--;
1881 }
1882 }
1883 #ifdef DEBUG
1884 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1885 printk("sleeping again.\n");
1886 #endif
1887 wake_up(&bdflush_done);
1888
1889
1890
1891
1892 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1893 bdf_prm.b_un.nfract/100) {
1894 if (current->signal & (1 << (SIGKILL-1))) {
1895 bdflush_running--;
1896 return 0;
1897 }
1898 current->signal = 0;
1899 interruptible_sleep_on(&bdflush_wait);
1900 }
1901 }
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920