This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- sys_fdatasync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- __brelse
- __bforget
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- recover_reusable_buffer_heads
- get_unused_buffer_head
- create_buffers
- after_unlock_page
- free_async_buffers
- brw_page
- mark_buffer_uptodate
- unlock_buffer
- generic_readpage
- grow_buffers
- try_to_free_buffer
- age_buffer
- maybe_shrink_lav_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
- bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26 #include <linux/errno.h>
27 #include <linux/malloc.h>
28 #include <linux/pagemap.h>
29 #include <linux/swap.h>
30 #include <linux/swapctl.h>
31 #include <linux/smp.h>
32 #include <linux/smp_lock.h>
33
34 #include <asm/system.h>
35 #include <asm/segment.h>
36 #include <asm/io.h>
37
38 #define NR_SIZES 5
39 static char buffersize_index[17] =
40 {-1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
41 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096, 8192};
42
43 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46 static int grow_buffers(int pri, int size);
47 static int shrink_specific_buffers(unsigned int priority, int size);
48 static int maybe_shrink_lav_buffers(int);
49
50 static int nr_hash = 0;
51 static struct buffer_head ** hash_table;
52 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
53
54
55
56 static struct buffer_head * next_to_age[NR_LIST] = {NULL, };
57 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
58
59 static struct buffer_head * unused_list = NULL;
60 struct buffer_head * reuse_list = NULL;
61 static struct wait_queue * buffer_wait = NULL;
62
63 int nr_buffers = 0;
64 int nr_buffers_type[NR_LIST] = {0,};
65 int nr_buffers_size[NR_SIZES] = {0,};
66 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
67 int buffer_usage[NR_SIZES] = {0,};
68 int buffers_lav[NR_SIZES] = {0,};
69 int nr_free[NR_SIZES] = {0,};
70 int buffermem = 0;
71 int nr_buffer_heads = 0;
72 extern int *blksize_size[];
73
74
75
76
77
78 static void wakeup_bdflush(int);
79
80 #define N_PARAM 9
81 #define LAV
82
83 union bdflush_param{
84 struct {
85 int nfract;
86
87 int ndirty;
88
89 int nrefill;
90
91 int nref_dirt;
92
93 int clu_nfract;
94
95 int age_buffer;
96
97 int age_super;
98
99 int lav_const;
100
101 int lav_ratio;
102
103
104 } b_un;
105 unsigned int data[N_PARAM];
106 } bdf_prm = {{60, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
107
108
109
110
111
112
113
114 int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
115 int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
116
117
118
119
120
121
122
123
124
125
126 void __wait_on_buffer(struct buffer_head * bh)
127 {
128 struct wait_queue wait = { current, NULL };
129
130 bh->b_count++;
131 add_wait_queue(&bh->b_wait, &wait);
132 repeat:
133 run_task_queue(&tq_disk);
134 current->state = TASK_UNINTERRUPTIBLE;
135 if (buffer_locked(bh)) {
136 schedule();
137 goto repeat;
138 }
139 remove_wait_queue(&bh->b_wait, &wait);
140 bh->b_count--;
141 current->state = TASK_RUNNING;
142 }
143
144
145
146
147
148
149
150
151
152
153
154 static int sync_buffers(kdev_t dev, int wait)
155 {
156 int i, retry, pass = 0, err = 0;
157 int nlist, ncount;
158 struct buffer_head * bh, *next;
159
160
161
162
163
164 repeat:
165 retry = 0;
166 repeat2:
167 ncount = 0;
168
169
170 for(nlist = 0; nlist < NR_LIST; nlist++)
171 {
172 repeat1:
173 bh = lru_list[nlist];
174 if(!bh) continue;
175 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
176 if(bh->b_list != nlist) goto repeat1;
177 next = bh->b_next_free;
178 if(!lru_list[nlist]) break;
179 if (dev && bh->b_dev != dev)
180 continue;
181 if (buffer_locked(bh))
182 {
183
184
185 if (!wait || !pass) {
186 retry = 1;
187 continue;
188 }
189 wait_on_buffer (bh);
190 goto repeat2;
191 }
192
193
194 if (wait && buffer_req(bh) && !buffer_locked(bh) &&
195 !buffer_dirty(bh) && !buffer_uptodate(bh)) {
196 err = 1;
197 continue;
198 }
199
200
201 if (!buffer_dirty(bh) || pass>=2)
202 continue;
203
204 if (buffer_locked(bh))
205 continue;
206 bh->b_count++;
207 bh->b_flushtime = 0;
208 ll_rw_block(WRITE, 1, &bh);
209
210 if(nlist != BUF_DIRTY) {
211 printk("[%d %s %ld] ", nlist,
212 kdevname(bh->b_dev), bh->b_blocknr);
213 ncount++;
214 }
215 bh->b_count--;
216 retry = 1;
217 }
218 }
219 if (ncount)
220 printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
221
222
223
224
225
226 if (wait && retry && ++pass<=2)
227 goto repeat;
228 return err;
229 }
230
231 void sync_dev(kdev_t dev)
232 {
233 sync_buffers(dev, 0);
234 sync_supers(dev);
235 sync_inodes(dev);
236 sync_buffers(dev, 0);
237 sync_dquots(dev, -1);
238 }
239
240 int fsync_dev(kdev_t dev)
241 {
242 sync_buffers(dev, 0);
243 sync_supers(dev);
244 sync_inodes(dev);
245 sync_dquots(dev, -1);
246 return sync_buffers(dev, 1);
247 }
248
249 asmlinkage int sys_sync(void)
250 {
251 fsync_dev(0);
252 return 0;
253 }
254
255 int file_fsync (struct inode *inode, struct file *filp)
256 {
257 return fsync_dev(inode->i_dev);
258 }
259
260 asmlinkage int sys_fsync(unsigned int fd)
261 {
262 struct file * file;
263 struct inode * inode;
264
265 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
266 return -EBADF;
267 if (!file->f_op || !file->f_op->fsync)
268 return -EINVAL;
269 if (file->f_op->fsync(inode,file))
270 return -EIO;
271 return 0;
272 }
273
274 asmlinkage int sys_fdatasync(unsigned int fd)
275 {
276 struct file * file;
277 struct inode * inode;
278
279 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
280 return -EBADF;
281 if (!file->f_op || !file->f_op->fsync)
282 return -EINVAL;
283
284 if (file->f_op->fsync(inode,file))
285 return -EIO;
286 return 0;
287 }
288
289 void invalidate_buffers(kdev_t dev)
290 {
291 int i;
292 int nlist;
293 struct buffer_head * bh;
294
295 for(nlist = 0; nlist < NR_LIST; nlist++) {
296 bh = lru_list[nlist];
297 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
298 if (bh->b_dev != dev)
299 continue;
300 wait_on_buffer(bh);
301 if (bh->b_dev != dev)
302 continue;
303 if (bh->b_count)
304 continue;
305 bh->b_flushtime = 0;
306 clear_bit(BH_Protected, &bh->b_state);
307 clear_bit(BH_Uptodate, &bh->b_state);
308 clear_bit(BH_Dirty, &bh->b_state);
309 clear_bit(BH_Req, &bh->b_state);
310 }
311 }
312 }
313
314 #define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))%nr_hash)
315 #define hash(dev,block) hash_table[_hashfn(dev,block)]
316
317 static inline void remove_from_hash_queue(struct buffer_head * bh)
318 {
319 if (bh->b_next)
320 bh->b_next->b_prev = bh->b_prev;
321 if (bh->b_prev)
322 bh->b_prev->b_next = bh->b_next;
323 if (hash(bh->b_dev,bh->b_blocknr) == bh)
324 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
325 bh->b_next = bh->b_prev = NULL;
326 }
327
328 static inline void remove_from_lru_list(struct buffer_head * bh)
329 {
330 if (!(bh->b_prev_free) || !(bh->b_next_free))
331 panic("VFS: LRU block list corrupted");
332 if (bh->b_dev == B_FREE)
333 panic("LRU list corrupted");
334 bh->b_prev_free->b_next_free = bh->b_next_free;
335 bh->b_next_free->b_prev_free = bh->b_prev_free;
336
337 if (lru_list[bh->b_list] == bh)
338 lru_list[bh->b_list] = bh->b_next_free;
339 if (lru_list[bh->b_list] == bh)
340 lru_list[bh->b_list] = NULL;
341 if (next_to_age[bh->b_list] == bh)
342 next_to_age[bh->b_list] = bh->b_next_free;
343 if (next_to_age[bh->b_list] == bh)
344 next_to_age[bh->b_list] = NULL;
345
346 bh->b_next_free = bh->b_prev_free = NULL;
347 }
348
349 static inline void remove_from_free_list(struct buffer_head * bh)
350 {
351 int isize = BUFSIZE_INDEX(bh->b_size);
352 if (!(bh->b_prev_free) || !(bh->b_next_free))
353 panic("VFS: Free block list corrupted");
354 if(bh->b_dev != B_FREE)
355 panic("Free list corrupted");
356 if(!free_list[isize])
357 panic("Free list empty");
358 nr_free[isize]--;
359 if(bh->b_next_free == bh)
360 free_list[isize] = NULL;
361 else {
362 bh->b_prev_free->b_next_free = bh->b_next_free;
363 bh->b_next_free->b_prev_free = bh->b_prev_free;
364 if (free_list[isize] == bh)
365 free_list[isize] = bh->b_next_free;
366 }
367 bh->b_next_free = bh->b_prev_free = NULL;
368 }
369
370 static inline void remove_from_queues(struct buffer_head * bh)
371 {
372 if(bh->b_dev == B_FREE) {
373 remove_from_free_list(bh);
374
375 return;
376 }
377 nr_buffers_type[bh->b_list]--;
378 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
379 remove_from_hash_queue(bh);
380 remove_from_lru_list(bh);
381 }
382
383 static inline void put_last_lru(struct buffer_head * bh)
384 {
385 if (!bh)
386 return;
387 if (bh == lru_list[bh->b_list]) {
388 lru_list[bh->b_list] = bh->b_next_free;
389 if (next_to_age[bh->b_list] == bh)
390 next_to_age[bh->b_list] = bh->b_next_free;
391 return;
392 }
393 if(bh->b_dev == B_FREE)
394 panic("Wrong block for lru list");
395 remove_from_lru_list(bh);
396
397
398 if(!lru_list[bh->b_list]) {
399 lru_list[bh->b_list] = bh;
400 lru_list[bh->b_list]->b_prev_free = bh;
401 }
402 if (!next_to_age[bh->b_list])
403 next_to_age[bh->b_list] = bh;
404
405 bh->b_next_free = lru_list[bh->b_list];
406 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
407 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
408 lru_list[bh->b_list]->b_prev_free = bh;
409 }
410
411 static inline void put_last_free(struct buffer_head * bh)
412 {
413 int isize;
414 if (!bh)
415 return;
416
417 isize = BUFSIZE_INDEX(bh->b_size);
418 bh->b_dev = B_FREE;
419
420 if(!free_list[isize]) {
421 free_list[isize] = bh;
422 bh->b_prev_free = bh;
423 }
424
425 nr_free[isize]++;
426 bh->b_next_free = free_list[isize];
427 bh->b_prev_free = free_list[isize]->b_prev_free;
428 free_list[isize]->b_prev_free->b_next_free = bh;
429 free_list[isize]->b_prev_free = bh;
430 }
431
432 static inline void insert_into_queues(struct buffer_head * bh)
433 {
434
435 if(bh->b_dev == B_FREE) {
436 put_last_free(bh);
437 return;
438 }
439 if(!lru_list[bh->b_list]) {
440 lru_list[bh->b_list] = bh;
441 bh->b_prev_free = bh;
442 }
443 if (!next_to_age[bh->b_list])
444 next_to_age[bh->b_list] = bh;
445 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
446 bh->b_next_free = lru_list[bh->b_list];
447 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
448 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
449 lru_list[bh->b_list]->b_prev_free = bh;
450 nr_buffers_type[bh->b_list]++;
451 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
452
453 bh->b_prev = NULL;
454 bh->b_next = NULL;
455 if (!(bh->b_dev))
456 return;
457 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
458 hash(bh->b_dev,bh->b_blocknr) = bh;
459 if (bh->b_next)
460 bh->b_next->b_prev = bh;
461 }
462
463 static inline struct buffer_head * find_buffer(kdev_t dev, int block, int size)
464 {
465 struct buffer_head * tmp;
466
467 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
468 if (tmp->b_blocknr == block && tmp->b_dev == dev)
469 if (tmp->b_size == size)
470 return tmp;
471 else {
472 printk("VFS: Wrong blocksize on device %s\n",
473 kdevname(dev));
474 return NULL;
475 }
476 return NULL;
477 }
478
479
480
481
482
483
484
485
486 struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
487 {
488 struct buffer_head * bh;
489
490 for (;;) {
491 if (!(bh=find_buffer(dev,block,size)))
492 return NULL;
493 bh->b_count++;
494 wait_on_buffer(bh);
495 if (bh->b_dev == dev && bh->b_blocknr == block
496 && bh->b_size == size)
497 return bh;
498 bh->b_count--;
499 }
500 }
501
502 void set_blocksize(kdev_t dev, int size)
503 {
504 int i, nlist;
505 struct buffer_head * bh, *bhnext;
506
507 if (!blksize_size[MAJOR(dev)])
508 return;
509
510 if (size > PAGE_SIZE)
511 size = 0;
512
513 switch (size) {
514 default: panic("Invalid blocksize passed to set_blocksize");
515 case 512: case 1024: case 2048: case 4096: case 8192: ;
516 }
517
518 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
519 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
520 return;
521 }
522 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
523 return;
524 sync_buffers(dev, 2);
525 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
526
527
528
529
530 for(nlist = 0; nlist < NR_LIST; nlist++) {
531 bh = lru_list[nlist];
532 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
533 if(!bh) break;
534 bhnext = bh->b_next_free;
535 if (bh->b_dev != dev)
536 continue;
537 if (bh->b_size == size)
538 continue;
539
540 wait_on_buffer(bh);
541 if (bh->b_dev == dev && bh->b_size != size) {
542 clear_bit(BH_Dirty, &bh->b_state);
543 clear_bit(BH_Uptodate, &bh->b_state);
544 clear_bit(BH_Req, &bh->b_state);
545 bh->b_flushtime = 0;
546 }
547 remove_from_hash_queue(bh);
548 }
549 }
550 }
551
552 #define BADNESS(bh) (buffer_dirty(bh) || buffer_locked(bh))
553
554 void refill_freelist(int size)
555 {
556 struct buffer_head * bh, * tmp;
557 struct buffer_head * candidate[NR_LIST];
558 unsigned int best_time, winner;
559 int isize = BUFSIZE_INDEX(size);
560 int buffers[NR_LIST];
561 int i;
562 int needed;
563
564
565
566
567
568 if (nr_free[isize] > 100)
569 return;
570
571
572
573
574
575
576 needed =bdf_prm.b_un.nrefill * size;
577
578 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
579 grow_buffers(GFP_BUFFER, size)) {
580 needed -= PAGE_SIZE;
581 }
582
583 if(needed <= 0) return;
584
585
586
587
588 while(maybe_shrink_lav_buffers(size))
589 {
590 if(!grow_buffers(GFP_BUFFER, size)) break;
591 needed -= PAGE_SIZE;
592 if(needed <= 0) return;
593 };
594
595
596
597
598
599
600
601 repeat0:
602 for(i=0; i<NR_LIST; i++){
603 if(i == BUF_DIRTY || i == BUF_SHARED ||
604 nr_buffers_type[i] == 0) {
605 candidate[i] = NULL;
606 buffers[i] = 0;
607 continue;
608 }
609 buffers[i] = nr_buffers_type[i];
610 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
611 {
612 if(buffers[i] < 0) panic("Here is the problem");
613 tmp = bh->b_next_free;
614 if (!bh) break;
615
616 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
617 buffer_dirty(bh)) {
618 refile_buffer(bh);
619 continue;
620 }
621
622 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
623 continue;
624
625
626
627
628
629 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
630 buffers[i] = 0;
631 break;
632 }
633
634 if (BADNESS(bh)) continue;
635 break;
636 };
637 if(!buffers[i]) candidate[i] = NULL;
638 else candidate[i] = bh;
639 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
640 }
641
642 repeat:
643 if(needed <= 0) return;
644
645
646
647 winner = best_time = UINT_MAX;
648 for(i=0; i<NR_LIST; i++){
649 if(!candidate[i]) continue;
650 if(candidate[i]->b_lru_time < best_time){
651 best_time = candidate[i]->b_lru_time;
652 winner = i;
653 }
654 }
655
656
657 if(winner != UINT_MAX) {
658 i = winner;
659 bh = candidate[i];
660 candidate[i] = bh->b_next_free;
661 if(candidate[i] == bh) candidate[i] = NULL;
662 if (bh->b_count || bh->b_size != size)
663 panic("Busy buffer in candidate list\n");
664 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1)
665 panic("Shared buffer in candidate list\n");
666 if (buffer_protected(bh))
667 panic("Protected buffer in candidate list\n");
668 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
669
670 if(bh->b_dev == B_FREE)
671 panic("Wrong list");
672 remove_from_queues(bh);
673 bh->b_dev = B_FREE;
674 put_last_free(bh);
675 needed -= bh->b_size;
676 buffers[i]--;
677 if(buffers[i] < 0) panic("Here is the problem");
678
679 if(buffers[i] == 0) candidate[i] = NULL;
680
681
682
683 if(candidate[i] && buffers[i] > 0){
684 if(buffers[i] <= 0) panic("Here is another problem");
685 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
686 if(buffers[i] < 0) panic("Here is the problem");
687 tmp = bh->b_next_free;
688 if (!bh) break;
689
690 if (mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 ||
691 buffer_dirty(bh)) {
692 refile_buffer(bh);
693 continue;
694 };
695
696 if (bh->b_count || buffer_protected(bh) || bh->b_size != size)
697 continue;
698
699
700
701
702
703 if (buffer_locked(bh) && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
704 buffers[i] = 0;
705 break;
706 }
707
708 if (BADNESS(bh)) continue;
709 break;
710 };
711 if(!buffers[i]) candidate[i] = NULL;
712 else candidate[i] = bh;
713 if(candidate[i] && candidate[i]->b_count)
714 panic("Here is the problem");
715 }
716
717 goto repeat;
718 }
719
720 if(needed <= 0) return;
721
722
723
724 if (nr_free_pages > min_free_pages + 5) {
725 if (grow_buffers(GFP_BUFFER, size)) {
726 needed -= PAGE_SIZE;
727 goto repeat0;
728 };
729 }
730
731
732 if (!grow_buffers(GFP_ATOMIC, size))
733 wakeup_bdflush(1);
734 needed -= PAGE_SIZE;
735 goto repeat0;
736 }
737
738
739
740
741
742
743
744
745
746
747
748 struct buffer_head * getblk(kdev_t dev, int block, int size)
749 {
750 struct buffer_head * bh;
751 int isize = BUFSIZE_INDEX(size);
752
753
754 buffer_usage[isize]++;
755
756
757
758
759 repeat:
760 bh = get_hash_table(dev, block, size);
761 if (bh) {
762 if (!buffer_dirty(bh)) {
763 if (buffer_uptodate(bh))
764 put_last_lru(bh);
765 bh->b_flushtime = 0;
766 }
767 set_bit(BH_Touched, &bh->b_state);
768 return bh;
769 }
770
771 while(!free_list[isize]) refill_freelist(size);
772
773 if (find_buffer(dev,block,size))
774 goto repeat;
775
776 bh = free_list[isize];
777 remove_from_free_list(bh);
778
779
780
781 bh->b_count=1;
782 bh->b_flushtime=0;
783 bh->b_state=(1<<BH_Touched);
784 bh->b_dev=dev;
785 bh->b_blocknr=block;
786 insert_into_queues(bh);
787 return bh;
788 }
789
790 void set_writetime(struct buffer_head * buf, int flag)
791 {
792 int newtime;
793
794 if (buffer_dirty(buf)) {
795
796 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
797 bdf_prm.b_un.age_buffer);
798 if(!buf->b_flushtime || buf->b_flushtime > newtime)
799 buf->b_flushtime = newtime;
800 } else {
801 buf->b_flushtime = 0;
802 }
803 }
804
805
806
807
808
809
810 void refile_buffer(struct buffer_head * buf)
811 {
812 int dispose;
813
814 if(buf->b_dev == B_FREE) {
815 printk("Attempt to refile free buffer\n");
816 return;
817 }
818 if (buffer_dirty(buf))
819 dispose = BUF_DIRTY;
820 else if ((mem_map[MAP_NR((unsigned long) buf->b_data)].count > 1) || buffer_protected(buf))
821 dispose = BUF_SHARED;
822 else if (buffer_locked(buf))
823 dispose = BUF_LOCKED;
824 else if (buf->b_list == BUF_SHARED)
825 dispose = BUF_UNSHARED;
826 else
827 dispose = BUF_CLEAN;
828 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
829 if(dispose != buf->b_list) {
830 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
831 buf->b_lru_time = jiffies;
832 if(dispose == BUF_LOCKED &&
833 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
834 dispose = BUF_LOCKED1;
835 remove_from_queues(buf);
836 buf->b_list = dispose;
837 insert_into_queues(buf);
838 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
839 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
840 bdf_prm.b_un.nfract/100)
841 wakeup_bdflush(0);
842 }
843 }
844
845
846
847
848 void __brelse(struct buffer_head * buf)
849 {
850 wait_on_buffer(buf);
851
852
853 set_writetime(buf, 0);
854 refile_buffer(buf);
855
856 if (buf->b_count) {
857 buf->b_count--;
858 return;
859 }
860 printk("VFS: brelse: Trying to free free buffer\n");
861 }
862
863
864
865
866
867
868 void __bforget(struct buffer_head * buf)
869 {
870 wait_on_buffer(buf);
871 mark_buffer_clean(buf);
872 clear_bit(BH_Protected, &buf->b_state);
873 buf->b_count--;
874 remove_from_hash_queue(buf);
875 buf->b_dev = NODEV;
876 refile_buffer(buf);
877 }
878
879
880
881
882
883 struct buffer_head * bread(kdev_t dev, int block, int size)
884 {
885 struct buffer_head * bh;
886
887 if (!(bh = getblk(dev, block, size))) {
888 printk("VFS: bread: impossible error\n");
889 return NULL;
890 }
891 if (buffer_uptodate(bh))
892 return bh;
893 ll_rw_block(READ, 1, &bh);
894 wait_on_buffer(bh);
895 if (buffer_uptodate(bh))
896 return bh;
897 brelse(bh);
898 return NULL;
899 }
900
901
902
903
904
905
906
907 #define NBUF 16
908
909 struct buffer_head * breada(kdev_t dev, int block, int bufsize,
910 unsigned int pos, unsigned int filesize)
911 {
912 struct buffer_head * bhlist[NBUF];
913 unsigned int blocks;
914 struct buffer_head * bh;
915 int index;
916 int i, j;
917
918 if (pos >= filesize)
919 return NULL;
920
921 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
922 return NULL;
923
924 index = BUFSIZE_INDEX(bh->b_size);
925
926 if (buffer_uptodate(bh))
927 return(bh);
928 else ll_rw_block(READ, 1, &bh);
929
930 blocks = (filesize - pos) >> (9+index);
931
932 if (blocks < (read_ahead[MAJOR(dev)] >> index))
933 blocks = read_ahead[MAJOR(dev)] >> index;
934 if (blocks > NBUF)
935 blocks = NBUF;
936
937
938
939
940 bhlist[0] = bh;
941 j = 1;
942 for(i=1; i<blocks; i++) {
943 bh = getblk(dev,block+i,bufsize);
944 if (buffer_uptodate(bh)) {
945 brelse(bh);
946 break;
947 }
948 else bhlist[j++] = bh;
949 }
950
951
952 if (j>1)
953 ll_rw_block(READA, (j-1), bhlist+1);
954 for(i=1; i<j; i++)
955 brelse(bhlist[i]);
956
957
958 bh = bhlist[0];
959 wait_on_buffer(bh);
960 if (buffer_uptodate(bh))
961 return bh;
962 brelse(bh);
963 return NULL;
964 }
965
966
967
968
969 static void put_unused_buffer_head(struct buffer_head * bh)
970 {
971 struct wait_queue * wait;
972
973 wait = ((volatile struct buffer_head *) bh)->b_wait;
974 memset(bh,0,sizeof(*bh));
975 ((volatile struct buffer_head *) bh)->b_wait = wait;
976 bh->b_next_free = unused_list;
977 unused_list = bh;
978 wake_up(&buffer_wait);
979 }
980
981 static void get_more_buffer_heads(void)
982 {
983 int i;
984 struct buffer_head * bh;
985
986 for (;;) {
987 if (unused_list)
988 return;
989
990
991
992
993
994
995 bh = (struct buffer_head *) get_free_page(GFP_ATOMIC);
996 if (bh)
997 break;
998
999
1000
1001
1002
1003
1004 run_task_queue(&tq_disk);
1005 sleep_on(&buffer_wait);
1006 }
1007
1008 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
1009 bh->b_next_free = unused_list;
1010 unused_list = bh++;
1011 }
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 static inline void recover_reusable_buffer_heads(void)
1028 {
1029 if (reuse_list) {
1030 struct buffer_head *bh;
1031 unsigned long flags;
1032
1033 save_flags(flags);
1034 do {
1035 cli();
1036 bh = reuse_list;
1037 reuse_list = bh->b_next_free;
1038 restore_flags(flags);
1039 put_unused_buffer_head(bh);
1040 } while (reuse_list);
1041 }
1042 }
1043
1044 static struct buffer_head * get_unused_buffer_head(void)
1045 {
1046 struct buffer_head * bh;
1047
1048 recover_reusable_buffer_heads();
1049 get_more_buffer_heads();
1050 if (!unused_list)
1051 return NULL;
1052 bh = unused_list;
1053 unused_list = bh->b_next_free;
1054 bh->b_next_free = NULL;
1055 bh->b_data = NULL;
1056 bh->b_size = 0;
1057 bh->b_state = 0;
1058 return bh;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
1068 {
1069 struct buffer_head *bh, *head;
1070 long offset;
1071
1072 head = NULL;
1073 offset = PAGE_SIZE;
1074 while ((offset -= size) >= 0) {
1075 bh = get_unused_buffer_head();
1076 if (!bh)
1077 goto no_grow;
1078 bh->b_this_page = head;
1079 head = bh;
1080 bh->b_data = (char *) (page+offset);
1081 bh->b_size = size;
1082 bh->b_dev = B_FREE;
1083 }
1084 return head;
1085
1086
1087
1088 no_grow:
1089 bh = head;
1090 while (bh) {
1091 head = bh;
1092 bh = bh->b_this_page;
1093 put_unused_buffer_head(head);
1094 }
1095 return NULL;
1096 }
1097
1098
1099 static inline void after_unlock_page (struct page * page)
1100 {
1101 if (clear_bit(PG_decr_after, &page->flags))
1102 nr_async_pages--;
1103 if (clear_bit(PG_free_after, &page->flags))
1104 free_page(page_address(page));
1105 if (clear_bit(PG_swap_unlock_after, &page->flags))
1106 swap_after_unlock_page(page->swap_unlock_entry);
1107 }
1108
1109
1110 static inline void free_async_buffers (struct buffer_head * bh)
1111 {
1112 struct buffer_head * tmp;
1113 unsigned long flags;
1114
1115 tmp = bh;
1116 save_flags(flags);
1117 cli();
1118 do {
1119 if (!test_bit(BH_FreeOnIO, &tmp->b_state)) {
1120 printk ("Whoops: unlock_buffer: "
1121 "async IO mismatch on page.\n");
1122 restore_flags(flags);
1123 return;
1124 }
1125 tmp->b_next_free = reuse_list;
1126 reuse_list = tmp;
1127 clear_bit(BH_FreeOnIO, &tmp->b_state);
1128 tmp = tmp->b_this_page;
1129 } while (tmp != bh);
1130 restore_flags(flags);
1131 }
1132
1133
1134
1135
1136
1137
1138 int brw_page(int rw, unsigned long address, kdev_t dev, int b[], int size, int bmap)
1139 {
1140 struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];
1141 int block, nr;
1142 struct page *page;
1143
1144 page = mem_map + MAP_NR(address);
1145 if (!PageLocked(page))
1146 panic("brw_page: page not locked for I/O");
1147 clear_bit(PG_uptodate, &page->flags);
1148
1149
1150
1151
1152
1153 bh = create_buffers(address, size);
1154 if (!bh) {
1155 clear_bit(PG_locked, &page->flags);
1156 wake_up(&page->wait);
1157 return -ENOMEM;
1158 }
1159 nr = 0;
1160 next = bh;
1161 do {
1162 struct buffer_head * tmp;
1163 block = *(b++);
1164
1165 set_bit(BH_FreeOnIO, &next->b_state);
1166 next->b_list = BUF_CLEAN;
1167 next->b_dev = dev;
1168 next->b_blocknr = block;
1169 next->b_count = 1;
1170 next->b_flushtime = 0;
1171 set_bit(BH_Uptodate, &next->b_state);
1172
1173
1174
1175
1176
1177
1178 if (bmap && !block) {
1179 memset(next->b_data, 0, size);
1180 next->b_count--;
1181 continue;
1182 }
1183 tmp = get_hash_table(dev, block, size);
1184 if (tmp) {
1185 if (!buffer_uptodate(tmp)) {
1186 if (rw == READ)
1187 ll_rw_block(READ, 1, &tmp);
1188 wait_on_buffer(tmp);
1189 }
1190 if (rw == READ)
1191 memcpy(next->b_data, tmp->b_data, size);
1192 else {
1193 memcpy(tmp->b_data, next->b_data, size);
1194 mark_buffer_dirty(tmp, 0);
1195 }
1196 brelse(tmp);
1197 next->b_count--;
1198 continue;
1199 }
1200 if (rw == READ)
1201 clear_bit(BH_Uptodate, &next->b_state);
1202 else
1203 set_bit(BH_Dirty, &next->b_state);
1204 arr[nr++] = next;
1205 } while (prev = next, (next = next->b_this_page) != NULL);
1206 prev->b_this_page = bh;
1207
1208 if (nr) {
1209 ll_rw_block(rw, nr, arr);
1210
1211
1212 } else {
1213 clear_bit(PG_locked, &page->flags);
1214 set_bit(PG_uptodate, &page->flags);
1215 wake_up(&page->wait);
1216 free_async_buffers(bh);
1217 after_unlock_page(page);
1218 }
1219 ++current->maj_flt;
1220 return 0;
1221 }
1222
1223
1224
1225
1226 void mark_buffer_uptodate(struct buffer_head * bh, int on)
1227 {
1228 if (on) {
1229 struct buffer_head *tmp = bh;
1230 set_bit(BH_Uptodate, &bh->b_state);
1231
1232
1233 do {
1234 if (!test_bit(BH_Uptodate, &tmp->b_state))
1235 return;
1236 tmp=tmp->b_this_page;
1237 } while (tmp && tmp != bh);
1238 set_bit(PG_uptodate, &mem_map[MAP_NR(bh->b_data)].flags);
1239 return;
1240 }
1241 clear_bit(BH_Uptodate, &bh->b_state);
1242 }
1243
1244
1245
1246
1247 void unlock_buffer(struct buffer_head * bh)
1248 {
1249 struct buffer_head *tmp;
1250 struct page *page;
1251
1252 clear_bit(BH_Lock, &bh->b_state);
1253 wake_up(&bh->b_wait);
1254
1255 if (!test_bit(BH_FreeOnIO, &bh->b_state))
1256 return;
1257
1258 page = mem_map + MAP_NR(bh->b_data);
1259 if (!PageLocked(page)) {
1260 printk ("Whoops: unlock_buffer: "
1261 "async io complete on unlocked page\n");
1262 return;
1263 }
1264 if (bh->b_count != 1) {
1265 printk ("Whoops: unlock_buffer: b_count != 1 on async io.\n");
1266 return;
1267 }
1268
1269
1270
1271
1272 bh->b_count--;
1273 for (tmp = bh; tmp=tmp->b_this_page, tmp!=bh; ) {
1274 if (test_bit(BH_Lock, &tmp->b_state) || tmp->b_count)
1275 return;
1276 }
1277
1278 clear_bit(PG_locked, &page->flags);
1279 wake_up(&page->wait);
1280 free_async_buffers(bh);
1281 after_unlock_page(page);
1282 wake_up(&buffer_wait);
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292 int generic_readpage(struct inode * inode, struct page * page)
1293 {
1294 unsigned long block, address;
1295 int *p, nr[PAGE_SIZE/512];
1296 int i;
1297
1298 address = page_address(page);
1299 page->count++;
1300 set_bit(PG_locked, &page->flags);
1301 set_bit(PG_free_after, &page->flags);
1302
1303 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
1304 block = page->offset >> inode->i_sb->s_blocksize_bits;
1305 p = nr;
1306 do {
1307 *p = inode->i_op->bmap(inode, block);
1308 i--;
1309 block++;
1310 p++;
1311 } while (i > 0);
1312
1313
1314 brw_page(READ, address, inode->i_dev, nr, inode->i_sb->s_blocksize, 1);
1315 return 0;
1316 }
1317
1318
1319
1320
1321
1322 static int grow_buffers(int pri, int size)
1323 {
1324 unsigned long page;
1325 struct buffer_head *bh, *tmp;
1326 struct buffer_head * insert_point;
1327 int isize;
1328
1329 if ((size & 511) || (size > PAGE_SIZE)) {
1330 printk("VFS: grow_buffers: size = %d\n",size);
1331 return 0;
1332 }
1333
1334 isize = BUFSIZE_INDEX(size);
1335
1336 if (!(page = __get_free_page(pri)))
1337 return 0;
1338 bh = create_buffers(page, size);
1339 if (!bh) {
1340 free_page(page);
1341 return 0;
1342 }
1343
1344 insert_point = free_list[isize];
1345
1346 tmp = bh;
1347 while (1) {
1348 nr_free[isize]++;
1349 if (insert_point) {
1350 tmp->b_next_free = insert_point->b_next_free;
1351 tmp->b_prev_free = insert_point;
1352 insert_point->b_next_free->b_prev_free = tmp;
1353 insert_point->b_next_free = tmp;
1354 } else {
1355 tmp->b_prev_free = tmp;
1356 tmp->b_next_free = tmp;
1357 }
1358 insert_point = tmp;
1359 ++nr_buffers;
1360 if (tmp->b_this_page)
1361 tmp = tmp->b_this_page;
1362 else
1363 break;
1364 }
1365 tmp->b_this_page = bh;
1366 free_list[isize] = bh;
1367 mem_map[MAP_NR(page)].buffers = bh;
1368 buffermem += PAGE_SIZE;
1369 return 1;
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379 int try_to_free_buffer(struct buffer_head * bh, struct buffer_head ** bhp,
1380 int priority)
1381 {
1382 unsigned long page;
1383 struct buffer_head * tmp, * p;
1384 int isize = BUFSIZE_INDEX(bh->b_size);
1385
1386 *bhp = bh;
1387 page = (unsigned long) bh->b_data;
1388 page &= PAGE_MASK;
1389 tmp = bh;
1390 do {
1391 if (!tmp)
1392 return 0;
1393 if (tmp->b_count || buffer_protected(tmp) ||
1394 buffer_dirty(tmp) || buffer_locked(tmp) || tmp->b_wait)
1395 return 0;
1396 if (priority && buffer_touched(tmp))
1397 return 0;
1398 tmp = tmp->b_this_page;
1399 } while (tmp != bh);
1400 tmp = bh;
1401 do {
1402 p = tmp;
1403 tmp = tmp->b_this_page;
1404 nr_buffers--;
1405 nr_buffers_size[isize]--;
1406 if (p == *bhp)
1407 {
1408 *bhp = p->b_prev_free;
1409 if (p == *bhp)
1410 *bhp = NULL;
1411 }
1412 remove_from_queues(p);
1413 put_unused_buffer_head(p);
1414 } while (tmp != bh);
1415 buffermem -= PAGE_SIZE;
1416 mem_map[MAP_NR(page)].buffers = NULL;
1417 free_page(page);
1418 return !mem_map[MAP_NR(page)].count;
1419 }
1420
1421
1422
1423 static inline void age_buffer(struct buffer_head *bh)
1424 {
1425 struct buffer_head *tmp = bh;
1426 int touched = 0;
1427
1428
1429
1430
1431
1432
1433
1434
1435 if (clear_bit(BH_Has_aged, &bh->b_state))
1436 return;
1437
1438 do {
1439 touched |= clear_bit(BH_Touched, &tmp->b_state);
1440 tmp = tmp->b_this_page;
1441 set_bit(BH_Has_aged, &tmp->b_state);
1442 } while (tmp != bh);
1443 clear_bit(BH_Has_aged, &bh->b_state);
1444
1445 if (touched)
1446 touch_page(mem_map + MAP_NR((unsigned long) bh->b_data));
1447 else
1448 age_page(mem_map + MAP_NR((unsigned long) bh->b_data));
1449 }
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 static int maybe_shrink_lav_buffers(int size)
1464 {
1465 int nlist;
1466 int isize;
1467 int total_lav, total_n_buffers, n_sizes;
1468
1469
1470
1471
1472
1473
1474 total_lav = total_n_buffers = n_sizes = 0;
1475 for(nlist = 0; nlist < NR_SIZES; nlist++)
1476 {
1477 total_lav += buffers_lav[nlist];
1478 if(nr_buffers_size[nlist]) n_sizes++;
1479 total_n_buffers += nr_buffers_size[nlist];
1480 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1481 }
1482
1483
1484
1485
1486 isize = (size ? BUFSIZE_INDEX(size) : -1);
1487
1488 if (n_sizes > 1)
1489 for(nlist = 0; nlist < NR_SIZES; nlist++)
1490 {
1491 if(nlist == isize) continue;
1492 if(nr_buffers_size[nlist] &&
1493 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1494 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1495 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1496 return 1;
1497 }
1498 return 0;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 static int shrink_specific_buffers(unsigned int priority, int size)
1514 {
1515 struct buffer_head *bh;
1516 int nlist;
1517 int i, isize, isize1;
1518
1519 #ifdef DEBUG
1520 if(size) printk("Shrinking buffers of size %d\n", size);
1521 #endif
1522
1523
1524 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1525
1526 for(isize = 0; isize<NR_SIZES; isize++){
1527 if(isize1 != -1 && isize1 != isize) continue;
1528 bh = free_list[isize];
1529 if(!bh) continue;
1530 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1531 if (bh->b_count || buffer_protected(bh) ||
1532 !bh->b_this_page)
1533 continue;
1534 if (!age_of((unsigned long) bh->b_data) &&
1535 try_to_free_buffer(bh, &bh, 6))
1536 return 1;
1537 if(!bh) break;
1538
1539
1540 }
1541 }
1542
1543
1544
1545 for(nlist = 0; nlist < NR_LIST; nlist++) {
1546 repeat1:
1547 if(priority > 2 && nlist == BUF_SHARED) continue;
1548 i = nr_buffers_type[nlist];
1549 i = ((BUFFEROUT_WEIGHT * i) >> 10) >> priority;
1550 for ( ; i > 0; i-- ) {
1551 bh = next_to_age[nlist];
1552 if (!bh)
1553 break;
1554 next_to_age[nlist] = bh->b_next_free;
1555
1556
1557 age_buffer(bh);
1558
1559
1560 if(bh->b_list != nlist) goto repeat1;
1561 if (bh->b_count || buffer_protected(bh) ||
1562 !bh->b_this_page)
1563 continue;
1564 if(size && bh->b_size != size) continue;
1565 if (buffer_locked(bh))
1566 if (priority)
1567 continue;
1568 else
1569 wait_on_buffer(bh);
1570 if (buffer_dirty(bh)) {
1571 bh->b_count++;
1572 bh->b_flushtime = 0;
1573 ll_rw_block(WRITEA, 1, &bh);
1574 bh->b_count--;
1575 continue;
1576 }
1577
1578
1579
1580 if ((age_of((unsigned long) bh->b_data) >>
1581 (6-priority)) > 0)
1582 continue;
1583 if (try_to_free_buffer(bh, &bh, 0))
1584 return 1;
1585 if(!bh) break;
1586 }
1587 }
1588 return 0;
1589 }
1590
1591
1592
1593
1594 void show_buffers(void)
1595 {
1596 struct buffer_head * bh;
1597 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1598 int protected = 0;
1599 int shared;
1600 int nlist, isize;
1601
1602 printk("Buffer memory: %6dkB\n",buffermem>>10);
1603 printk("Buffer heads: %6d\n",nr_buffer_heads);
1604 printk("Buffer blocks: %6d\n",nr_buffers);
1605
1606 for(nlist = 0; nlist < NR_LIST; nlist++) {
1607 shared = found = locked = dirty = used = lastused = protected = 0;
1608 bh = lru_list[nlist];
1609 if(!bh) continue;
1610 do {
1611 found++;
1612 if (buffer_locked(bh))
1613 locked++;
1614 if (buffer_protected(bh))
1615 protected++;
1616 if (buffer_dirty(bh))
1617 dirty++;
1618 if (mem_map[MAP_NR(((unsigned long) bh->b_data))].count != 1)
1619 shared++;
1620 if (bh->b_count)
1621 used++, lastused = found;
1622 bh = bh->b_next_free;
1623 } while (bh != lru_list[nlist]);
1624 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), "
1625 "%d locked, %d protected, %d dirty %d shrd\n",
1626 nlist, found, used, lastused,
1627 locked, protected, dirty, shared);
1628 };
1629 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared \n");
1630 for(isize = 0; isize<NR_SIZES; isize++){
1631 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1632 buffers_lav[isize], nr_free[isize]);
1633 for(nlist = 0; nlist < NR_LIST; nlist++)
1634 printk("%7d ", nr_buffers_st[isize][nlist]);
1635 printk("\n");
1636 }
1637 }
1638
1639
1640
1641
1642
1643
1644
1645
1646 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1647 kdev_t dev, unsigned int starting_block)
1648 {
1649 unsigned long page;
1650 struct buffer_head * tmp, * p;
1651
1652 *bhp = bh;
1653 page = (unsigned long) bh->b_data;
1654 page &= PAGE_MASK;
1655 if(mem_map[MAP_NR(page)].count != 1) return 0;
1656 tmp = bh;
1657 do {
1658 if (!tmp)
1659 return 0;
1660
1661 if (tmp->b_count || buffer_protected(tmp) ||
1662 buffer_dirty(tmp) || buffer_locked(tmp))
1663 return 0;
1664 tmp = tmp->b_this_page;
1665 } while (tmp != bh);
1666 tmp = bh;
1667
1668 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1669 tmp = tmp->b_this_page;
1670
1671
1672 bh = tmp;
1673 do {
1674 p = tmp;
1675 tmp = tmp->b_this_page;
1676 remove_from_queues(p);
1677 p->b_dev = dev;
1678 mark_buffer_uptodate(p, 0);
1679 clear_bit(BH_Req, &p->b_state);
1680 p->b_blocknr = starting_block++;
1681 insert_into_queues(p);
1682 } while (tmp != bh);
1683 return 1;
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 static int reassign_cluster(kdev_t dev,
1701 unsigned int starting_block, int size)
1702 {
1703 struct buffer_head *bh;
1704 int isize = BUFSIZE_INDEX(size);
1705 int i;
1706
1707
1708
1709
1710
1711 while(nr_free[isize] < 32) refill_freelist(size);
1712
1713 bh = free_list[isize];
1714 if(bh)
1715 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1716 if (!bh->b_this_page) continue;
1717 if (try_to_reassign(bh, &bh, dev, starting_block))
1718 return 4;
1719 }
1720 return 0;
1721 }
1722
1723
1724
1725
1726
1727 static unsigned long try_to_generate_cluster(kdev_t dev, int block, int size)
1728 {
1729 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1730 int isize = BUFSIZE_INDEX(size);
1731 unsigned long offset;
1732 unsigned long page;
1733 int nblock;
1734
1735 page = get_free_page(GFP_NOBUFFER);
1736 if(!page) return 0;
1737
1738 bh = create_buffers(page, size);
1739 if (!bh) {
1740 free_page(page);
1741 return 0;
1742 };
1743 nblock = block;
1744 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1745 if (find_buffer(dev, nblock++, size))
1746 goto not_aligned;
1747 }
1748 tmp = bh;
1749 nblock = 0;
1750 while (1) {
1751 arr[nblock++] = bh;
1752 bh->b_count = 1;
1753 bh->b_flushtime = 0;
1754 bh->b_state = 0;
1755 bh->b_dev = dev;
1756 bh->b_list = BUF_CLEAN;
1757 bh->b_blocknr = block++;
1758 nr_buffers++;
1759 nr_buffers_size[isize]++;
1760 insert_into_queues(bh);
1761 if (bh->b_this_page)
1762 bh = bh->b_this_page;
1763 else
1764 break;
1765 }
1766 buffermem += PAGE_SIZE;
1767 mem_map[MAP_NR(page)].buffers = bh;
1768 bh->b_this_page = tmp;
1769 while (nblock-- > 0)
1770 brelse(arr[nblock]);
1771 return 4;
1772 not_aligned:
1773 while ((tmp = bh) != NULL) {
1774 bh = bh->b_this_page;
1775 put_unused_buffer_head(tmp);
1776 }
1777 free_page(page);
1778 return 0;
1779 }
1780
1781 unsigned long generate_cluster(kdev_t dev, int b[], int size)
1782 {
1783 int i, offset;
1784
1785 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1786 if(i && b[i]-1 != b[i-1]) return 0;
1787 if(find_buffer(dev, b[i], size)) return 0;
1788 };
1789
1790
1791
1792
1793
1794 if(maybe_shrink_lav_buffers(size))
1795 {
1796 int retval;
1797 retval = try_to_generate_cluster(dev, b[0], size);
1798 if(retval) return retval;
1799 };
1800
1801 if (nr_free_pages > min_free_pages*2)
1802 return try_to_generate_cluster(dev, b[0], size);
1803 else
1804 return reassign_cluster(dev, b[0], size);
1805 }
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 void buffer_init(void)
1818 {
1819 int i;
1820 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1821 long memsize = MAP_NR(high_memory) << PAGE_SHIFT;
1822
1823 if (memsize >= 64*1024*1024)
1824 nr_hash = 65521;
1825 else if (memsize >= 32*1024*1024)
1826 nr_hash = 32749;
1827 else if (memsize >= 16*1024*1024)
1828 nr_hash = 16381;
1829 else if (memsize >= 8*1024*1024)
1830 nr_hash = 8191;
1831 else if (memsize >= 4*1024*1024)
1832 nr_hash = 4093;
1833 else nr_hash = 997;
1834
1835 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1836 sizeof(struct buffer_head *));
1837
1838
1839 for (i = 0 ; i < nr_hash ; i++)
1840 hash_table[i] = NULL;
1841 lru_list[BUF_CLEAN] = 0;
1842 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1843 if (!free_list[isize])
1844 panic("VFS: Unable to initialize buffer free list!");
1845 return;
1846 }
1847
1848
1849
1850
1851
1852
1853
1854
1855 struct wait_queue * bdflush_wait = NULL;
1856 struct wait_queue * bdflush_done = NULL;
1857
1858 static void wakeup_bdflush(int wait)
1859 {
1860 wake_up(&bdflush_wait);
1861 if (wait) {
1862 run_task_queue(&tq_disk);
1863 sleep_on(&bdflush_done);
1864 }
1865 }
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876 asmlinkage int sync_old_buffers(void)
1877 {
1878 int i, isize;
1879 int ndirty, nwritten;
1880 int nlist;
1881 int ncount;
1882 struct buffer_head * bh, *next;
1883
1884 sync_supers(0);
1885 sync_inodes(0);
1886
1887 ncount = 0;
1888 #ifdef DEBUG
1889 for(nlist = 0; nlist < NR_LIST; nlist++)
1890 #else
1891 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1892 #endif
1893 {
1894 ndirty = 0;
1895 nwritten = 0;
1896 repeat:
1897 bh = lru_list[nlist];
1898 if(bh)
1899 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1900
1901 if(bh->b_list != nlist) goto repeat;
1902 next = bh->b_next_free;
1903 if(!lru_list[nlist]) {
1904 printk("Dirty list empty %d\n", i);
1905 break;
1906 }
1907
1908
1909 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
1910 {
1911 refile_buffer(bh);
1912 continue;
1913 }
1914
1915 if (buffer_locked(bh) || !buffer_dirty(bh))
1916 continue;
1917 ndirty++;
1918 if(bh->b_flushtime > jiffies) continue;
1919 nwritten++;
1920 bh->b_count++;
1921 bh->b_flushtime = 0;
1922 #ifdef DEBUG
1923 if(nlist != BUF_DIRTY) ncount++;
1924 #endif
1925 ll_rw_block(WRITE, 1, &bh);
1926 bh->b_count--;
1927 }
1928 }
1929 #ifdef DEBUG
1930 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1931 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1932 #endif
1933
1934
1935
1936
1937 for(isize = 0; isize<NR_SIZES; isize++){
1938 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1939 buffer_usage[isize] = 0;
1940 }
1941 return 0;
1942 }
1943
1944
1945
1946
1947
1948
1949
1950 asmlinkage int sys_bdflush(int func, long data)
1951 {
1952 int i, error;
1953
1954 if (!suser())
1955 return -EPERM;
1956
1957 if (func == 1)
1958 return sync_old_buffers();
1959
1960
1961 if (func >= 2) {
1962 i = (func-2) >> 1;
1963 if (i < 0 || i >= N_PARAM)
1964 return -EINVAL;
1965 if((func & 1) == 0) {
1966 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1967 if (error)
1968 return error;
1969 put_user(bdf_prm.data[i], (int*)data);
1970 return 0;
1971 };
1972 if (data < bdflush_min[i] || data > bdflush_max[i])
1973 return -EINVAL;
1974 bdf_prm.data[i] = data;
1975 return 0;
1976 };
1977
1978
1979
1980
1981
1982 return 0;
1983 }
1984
1985
1986
1987
1988
1989 int bdflush(void * unused)
1990 {
1991 int i;
1992 int ndirty;
1993 int nlist;
1994 int ncount;
1995 struct buffer_head * bh, *next;
1996
1997
1998
1999
2000
2001
2002
2003 current->session = 1;
2004 current->pgrp = 1;
2005 sprintf(current->comm, "kflushd");
2006
2007
2008
2009
2010
2011
2012
2013 #ifdef __SMP__
2014 lock_kernel();
2015 syscall_count++;
2016 #endif
2017
2018 for (;;) {
2019 #ifdef DEBUG
2020 printk("bdflush() activated...");
2021 #endif
2022
2023 ncount = 0;
2024 #ifdef DEBUG
2025 for(nlist = 0; nlist < NR_LIST; nlist++)
2026 #else
2027 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
2028 #endif
2029 {
2030 ndirty = 0;
2031 repeat:
2032 bh = lru_list[nlist];
2033 if(bh)
2034 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
2035 bh = next) {
2036
2037 if(bh->b_list != nlist) goto repeat;
2038 next = bh->b_next_free;
2039 if(!lru_list[nlist]) {
2040 printk("Dirty list empty %d\n", i);
2041 break;
2042 }
2043
2044
2045 if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
2046 {
2047 refile_buffer(bh);
2048 continue;
2049 }
2050
2051 if (buffer_locked(bh) || !buffer_dirty(bh))
2052 continue;
2053
2054
2055 bh->b_count++;
2056 ndirty++;
2057 bh->b_flushtime = 0;
2058 ll_rw_block(WRITE, 1, &bh);
2059 #ifdef DEBUG
2060 if(nlist != BUF_DIRTY) ncount++;
2061 #endif
2062 bh->b_count--;
2063 }
2064 }
2065 #ifdef DEBUG
2066 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
2067 printk("sleeping again.\n");
2068 #endif
2069 run_task_queue(&tq_disk);
2070 wake_up(&bdflush_done);
2071
2072
2073
2074
2075 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
2076 bdf_prm.b_un.nfract/100) {
2077 current->signal = 0;
2078 interruptible_sleep_on(&bdflush_wait);
2079 }
2080 }
2081 }
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099