This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/major.h>
26 #include <linux/string.h>
27 #include <linux/locks.h>
28 #include <linux/errno.h>
29 #include <linux/malloc.h>
30
31 #include <asm/system.h>
32 #include <asm/segment.h>
33 #include <asm/io.h>
34
35 #ifdef CONFIG_SCSI
36 #ifdef CONFIG_BLK_DEV_SR
37 extern int check_cdrom_media_change(int, int);
38 #endif
39 #ifdef CONFIG_BLK_DEV_SD
40 extern int check_scsidisk_media_change(int, int);
41 extern int revalidate_scsidisk(int, int);
42 #endif
43 #endif
44 #ifdef CONFIG_CDU31A
45 extern int check_cdu31a_media_change(int, int);
46 #endif
47 #ifdef CONFIG_MCD
48 extern int check_mcd_media_change(int, int);
49 #endif
50
51 #define NR_SIZES 4
52 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
53 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
54
55 #define BUFSIZE_INDEX(X) (buffersize_index[(X)>>9])
56
57 static int grow_buffers(int pri, int size);
58 static int shrink_specific_buffers(unsigned int priority, int size);
59 static int maybe_shrink_lav_buffers(int);
60
61 static int nr_hash = 0;
62 static struct buffer_head ** hash_table;
63 struct buffer_head ** buffer_pages;
64 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
65 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
66 static struct buffer_head * unused_list = NULL;
67 static struct wait_queue * buffer_wait = NULL;
68
69 int nr_buffers = 0;
70 int nr_buffers_type[NR_LIST] = {0,};
71 int nr_buffers_size[NR_SIZES] = {0,};
72 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
73 int buffer_usage[NR_SIZES] = {0,};
74 int buffers_lav[NR_SIZES] = {0,};
75 int nr_free[NR_SIZES] = {0,};
76 int buffermem = 0;
77 int nr_buffer_heads = 0;
78 static int min_free_pages = 20;
79 extern int *blksize_size[];
80
81
82 static void wakeup_bdflush(int);
83
84 #define N_PARAM 9
85 #define LAV
86
87 static union bdflush_param{
88 struct {
89 int nfract;
90
91 int ndirty;
92
93 int nrefill;
94
95 int nref_dirt;
96
97 int clu_nfract;
98
99 int age_buffer;
100
101 int age_super;
102
103 int lav_const;
104
105 int lav_ratio;
106
107
108 } b_un;
109 unsigned int data[N_PARAM];
110 } bdf_prm = {{25, 500, 64, 256, 15, 3000, 500, 1884, 2}};
111
112
113
114
115
116
117
118 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
119 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
120
121
122
123
124
125
126
127
128
129
130 void __wait_on_buffer(struct buffer_head * bh)
131 {
132 struct wait_queue wait = { current, NULL };
133
134 bh->b_count++;
135 add_wait_queue(&bh->b_wait, &wait);
136 repeat:
137 current->state = TASK_UNINTERRUPTIBLE;
138 if (bh->b_lock) {
139 schedule();
140 goto repeat;
141 }
142 remove_wait_queue(&bh->b_wait, &wait);
143 bh->b_count--;
144 current->state = TASK_RUNNING;
145 }
146
147
148
149
150
151
152
153
154
155
156
157 static int sync_buffers(dev_t dev, int wait)
158 {
159 int i, retry, pass = 0, err = 0;
160 int nlist, ncount;
161 struct buffer_head * bh, *next;
162
163
164
165
166
167 repeat:
168 retry = 0;
169 ncount = 0;
170
171
172 for(nlist = 0; nlist < NR_LIST; nlist++)
173 {
174 repeat1:
175 bh = lru_list[nlist];
176 if(!bh) continue;
177 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
178 if(bh->b_list != nlist) goto repeat1;
179 next = bh->b_next_free;
180 if(!lru_list[nlist]) break;
181 if (dev && bh->b_dev != dev)
182 continue;
183 if (bh->b_lock)
184 {
185
186
187 if (!wait || !pass) {
188 retry = 1;
189 continue;
190 }
191 wait_on_buffer (bh);
192 }
193
194
195 if (wait && bh->b_req && !bh->b_lock &&
196 !bh->b_dirt && !bh->b_uptodate) {
197 err = 1;
198 printk("Weird - unlocked, clean and not uptodate buffer on list %d\n", nlist);
199 continue;
200 }
201
202
203 if (!bh->b_dirt || pass>=2)
204 continue;
205 bh->b_count++;
206 bh->b_flushtime = 0;
207 ll_rw_block(WRITE, 1, &bh);
208
209 if(nlist != BUF_DIRTY) {
210 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
211 ncount++;
212 };
213 bh->b_count--;
214 retry = 1;
215 }
216 }
217 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
218
219
220
221
222
223 if (wait && retry && ++pass<=2)
224 goto repeat;
225 return err;
226 }
227
228 void sync_dev(dev_t dev)
229 {
230 sync_buffers(dev, 0);
231 sync_supers(dev);
232 sync_inodes(dev);
233 sync_buffers(dev, 0);
234 }
235
236 int fsync_dev(dev_t dev)
237 {
238 sync_buffers(dev, 0);
239 sync_supers(dev);
240 sync_inodes(dev);
241 return sync_buffers(dev, 1);
242 }
243
244 asmlinkage int sys_sync(void)
245 {
246 sync_dev(0);
247 return 0;
248 }
249
250 int file_fsync (struct inode *inode, struct file *filp)
251 {
252 return fsync_dev(inode->i_dev);
253 }
254
255 asmlinkage int sys_fsync(unsigned int fd)
256 {
257 struct file * file;
258 struct inode * inode;
259
260 if (fd>=NR_OPEN || !(file=current->filp[fd]) || !(inode=file->f_inode))
261 return -EBADF;
262 if (!file->f_op || !file->f_op->fsync)
263 return -EINVAL;
264 if (file->f_op->fsync(inode,file))
265 return -EIO;
266 return 0;
267 }
268
269 void invalidate_buffers(dev_t dev)
270 {
271 int i;
272 int nlist;
273 struct buffer_head * bh;
274
275 for(nlist = 0; nlist < NR_LIST; nlist++) {
276 bh = lru_list[nlist];
277 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ;
278 bh = bh->b_next_free) {
279 if (bh->b_dev != dev)
280 continue;
281 wait_on_buffer(bh);
282 if (bh->b_dev == dev)
283 bh->b_flushtime = bh->b_uptodate =
284 bh->b_dirt = bh->b_req = 0;
285 }
286 }
287 }
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 void check_disk_change(dev_t dev)
304 {
305 int i;
306 struct buffer_head * bh;
307
308 switch(MAJOR(dev)){
309 case FLOPPY_MAJOR:
310 if (!(bh = getblk(dev,0,1024)))
311 return;
312 i = floppy_change(bh);
313 brelse(bh);
314 break;
315
316 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
317 case SCSI_DISK_MAJOR:
318 i = check_scsidisk_media_change(dev, 0);
319 break;
320 #endif
321
322 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
323 case SCSI_CDROM_MAJOR:
324 i = check_cdrom_media_change(dev, 0);
325 break;
326 #endif
327
328 #if defined(CONFIG_CDU31A)
329 case CDU31A_CDROM_MAJOR:
330 i = check_cdu31a_media_change(dev, 0);
331 break;
332 #endif
333
334 #if defined(CONFIG_MCD)
335 case MITSUMI_CDROM_MAJOR:
336 i = check_mcd_media_change(dev, 0);
337 break;
338 #endif
339
340 default:
341 return;
342 };
343
344 if (!i) return;
345
346 printk("VFS: Disk change detected on device %d/%d\n",
347 MAJOR(dev), MINOR(dev));
348 for (i=0 ; i<NR_SUPER ; i++)
349 if (super_blocks[i].s_dev == dev)
350 put_super(super_blocks[i].s_dev);
351 invalidate_inodes(dev);
352 invalidate_buffers(dev);
353
354 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
355
356
357 if (MAJOR(dev) == SCSI_DISK_MAJOR)
358 revalidate_scsidisk(dev, 0);
359 #endif
360 }
361
362 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
363 #define hash(dev,block) hash_table[_hashfn(dev,block)]
364
365 static inline void remove_from_hash_queue(struct buffer_head * bh)
366 {
367 if (bh->b_next)
368 bh->b_next->b_prev = bh->b_prev;
369 if (bh->b_prev)
370 bh->b_prev->b_next = bh->b_next;
371 if (hash(bh->b_dev,bh->b_blocknr) == bh)
372 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
373 bh->b_next = bh->b_prev = NULL;
374 }
375
376 static inline void remove_from_lru_list(struct buffer_head * bh)
377 {
378 if (!(bh->b_prev_free) || !(bh->b_next_free))
379 panic("VFS: LRU block list corrupted");
380 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
381 bh->b_prev_free->b_next_free = bh->b_next_free;
382 bh->b_next_free->b_prev_free = bh->b_prev_free;
383
384 if (lru_list[bh->b_list] == bh)
385 lru_list[bh->b_list] = bh->b_next_free;
386 if(lru_list[bh->b_list] == bh)
387 lru_list[bh->b_list] = NULL;
388 bh->b_next_free = bh->b_prev_free = NULL;
389 }
390
391 static inline void remove_from_free_list(struct buffer_head * bh)
392 {
393 int isize = BUFSIZE_INDEX(bh->b_size);
394 if (!(bh->b_prev_free) || !(bh->b_next_free))
395 panic("VFS: Free block list corrupted");
396 if(bh->b_dev != 0xffff) panic("Free list corrupted");
397 if(!free_list[isize])
398 panic("Free list empty");
399 nr_free[isize]--;
400 if(bh->b_next_free == bh)
401 free_list[isize] = NULL;
402 else {
403 bh->b_prev_free->b_next_free = bh->b_next_free;
404 bh->b_next_free->b_prev_free = bh->b_prev_free;
405 if (free_list[isize] == bh)
406 free_list[isize] = bh->b_next_free;
407 };
408 bh->b_next_free = bh->b_prev_free = NULL;
409 }
410
411 static inline void remove_from_queues(struct buffer_head * bh)
412 {
413 if(bh->b_dev == 0xffff) {
414 remove_from_free_list(bh);
415
416 return;
417 };
418 nr_buffers_type[bh->b_list]--;
419 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
420 remove_from_hash_queue(bh);
421 remove_from_lru_list(bh);
422 }
423
424 static inline void put_last_lru(struct buffer_head * bh)
425 {
426 if (!bh)
427 return;
428 if (bh == lru_list[bh->b_list]) {
429 lru_list[bh->b_list] = bh->b_next_free;
430 return;
431 }
432 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
433 remove_from_lru_list(bh);
434
435
436 if(!lru_list[bh->b_list]) {
437 lru_list[bh->b_list] = bh;
438 lru_list[bh->b_list]->b_prev_free = bh;
439 };
440
441 bh->b_next_free = lru_list[bh->b_list];
442 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
443 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
444 lru_list[bh->b_list]->b_prev_free = bh;
445 }
446
447 static inline void put_last_free(struct buffer_head * bh)
448 {
449 int isize;
450 if (!bh)
451 return;
452
453 isize = BUFSIZE_INDEX(bh->b_size);
454 bh->b_dev = 0xffff;
455
456
457 if(!free_list[isize]) {
458 free_list[isize] = bh;
459 bh->b_prev_free = bh;
460 };
461
462 nr_free[isize]++;
463 bh->b_next_free = free_list[isize];
464 bh->b_prev_free = free_list[isize]->b_prev_free;
465 free_list[isize]->b_prev_free->b_next_free = bh;
466 free_list[isize]->b_prev_free = bh;
467 }
468
469 static inline void insert_into_queues(struct buffer_head * bh)
470 {
471
472
473 if(bh->b_dev == 0xffff) {
474 put_last_free(bh);
475 return;
476 };
477 if(!lru_list[bh->b_list]) {
478 lru_list[bh->b_list] = bh;
479 bh->b_prev_free = bh;
480 };
481 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
482 bh->b_next_free = lru_list[bh->b_list];
483 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
484 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
485 lru_list[bh->b_list]->b_prev_free = bh;
486 nr_buffers_type[bh->b_list]++;
487 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
488
489 bh->b_prev = NULL;
490 bh->b_next = NULL;
491 if (!bh->b_dev)
492 return;
493 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
494 hash(bh->b_dev,bh->b_blocknr) = bh;
495 if (bh->b_next)
496 bh->b_next->b_prev = bh;
497 }
498
499 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
500 {
501 struct buffer_head * tmp;
502
503 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
504 if (tmp->b_dev==dev && tmp->b_blocknr==block)
505 if (tmp->b_size == size)
506 return tmp;
507 else {
508 printk("VFS: Wrong blocksize on device %d/%d\n",
509 MAJOR(dev), MINOR(dev));
510 return NULL;
511 }
512 return NULL;
513 }
514
515
516
517
518
519
520
521
522 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
523 {
524 struct buffer_head * bh;
525
526 for (;;) {
527 if (!(bh=find_buffer(dev,block,size)))
528 return NULL;
529 bh->b_count++;
530 wait_on_buffer(bh);
531 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
532 return bh;
533 bh->b_count--;
534 }
535 }
536
537 void set_blocksize(dev_t dev, int size)
538 {
539 int i, nlist;
540 struct buffer_head * bh, *bhnext;
541
542 if (!blksize_size[MAJOR(dev)])
543 return;
544
545 switch(size) {
546 default: panic("Invalid blocksize passed to set_blocksize");
547 case 512: case 1024: case 2048: case 4096:;
548 }
549
550 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
551 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
552 return;
553 }
554 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
555 return;
556 sync_buffers(dev, 2);
557 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
558
559
560
561
562 for(nlist = 0; nlist < NR_LIST; nlist++) {
563 bh = lru_list[nlist];
564 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
565 if(!bh) break;
566 bhnext = bh->b_next_free;
567 if (bh->b_dev != dev)
568 continue;
569 if (bh->b_size == size)
570 continue;
571
572 wait_on_buffer(bh);
573 if (bh->b_dev == dev && bh->b_size != size) {
574 bh->b_uptodate = bh->b_dirt =
575 bh->b_flushtime = 0;
576 };
577 remove_from_hash_queue(bh);
578 }
579 }
580 }
581
582 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
583
584 void refill_freelist(int size)
585 {
586 struct buffer_head * bh, * tmp;
587 struct buffer_head * candidate[NR_LIST];
588 unsigned int best_time, winner;
589 int isize = BUFSIZE_INDEX(size);
590 int buffers[NR_LIST];
591 int i;
592 int needed;
593
594
595
596
597
598 if (nr_free[isize] > 100)
599 return;
600
601
602
603
604
605
606 needed =bdf_prm.b_un.nrefill * size;
607
608 while (nr_free_pages > min_free_pages && needed > 0 &&
609 grow_buffers(GFP_BUFFER, size)) {
610 needed -= PAGE_SIZE;
611 }
612
613 if(needed <= 0) return;
614
615
616
617
618 while(maybe_shrink_lav_buffers(size))
619 {
620 if(!grow_buffers(GFP_BUFFER, size)) break;
621 needed -= PAGE_SIZE;
622 if(needed <= 0) return;
623 };
624
625
626
627
628
629
630
631 repeat0:
632 for(i=0; i<NR_LIST; i++){
633 if(i == BUF_DIRTY || i == BUF_SHARED ||
634 nr_buffers_type[i] == 0) {
635 candidate[i] = NULL;
636 buffers[i] = 0;
637 continue;
638 }
639 buffers[i] = nr_buffers_type[i];
640 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
641 {
642 if(buffers[i] < 0) panic("Here is the problem");
643 tmp = bh->b_next_free;
644 if (!bh) break;
645
646 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
647 bh->b_dirt) {
648 refile_buffer(bh);
649 continue;
650 };
651
652 if (bh->b_count || bh->b_size != size)
653 continue;
654
655
656
657
658
659 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
660 buffers[i] = 0;
661 break;
662 }
663
664 if (BADNESS(bh)) continue;
665 break;
666 };
667 if(!buffers[i]) candidate[i] = NULL;
668 else candidate[i] = bh;
669 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
670 }
671
672 repeat:
673 if(needed <= 0) return;
674
675
676
677 winner = best_time = UINT_MAX;
678 for(i=0; i<NR_LIST; i++){
679 if(!candidate[i]) continue;
680 if(candidate[i]->b_lru_time < best_time){
681 best_time = candidate[i]->b_lru_time;
682 winner = i;
683 }
684 }
685
686
687 if(winner != UINT_MAX) {
688 i = winner;
689 bh = candidate[i];
690 candidate[i] = bh->b_next_free;
691 if(candidate[i] == bh) candidate[i] = NULL;
692 if (bh->b_count || bh->b_size != size)
693 panic("Busy buffer in candidate list\n");
694 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
695 panic("Shared buffer in candidate list\n");
696 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
697
698 if(bh->b_dev == 0xffff) panic("Wrong list");
699 remove_from_queues(bh);
700 bh->b_dev = 0xffff;
701 put_last_free(bh);
702 needed -= bh->b_size;
703 buffers[i]--;
704 if(buffers[i] < 0) panic("Here is the problem");
705
706 if(buffers[i] == 0) candidate[i] = NULL;
707
708
709
710 if(candidate[i] && buffers[i] > 0){
711 if(buffers[i] <= 0) panic("Here is another problem");
712 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
713 if(buffers[i] < 0) panic("Here is the problem");
714 tmp = bh->b_next_free;
715 if (!bh) break;
716
717 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
718 bh->b_dirt) {
719 refile_buffer(bh);
720 continue;
721 };
722
723 if (bh->b_count || bh->b_size != size)
724 continue;
725
726
727
728
729
730 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
731 buffers[i] = 0;
732 break;
733 }
734
735 if (BADNESS(bh)) continue;
736 break;
737 };
738 if(!buffers[i]) candidate[i] = NULL;
739 else candidate[i] = bh;
740 if(candidate[i] && candidate[i]->b_count)
741 panic("Here is the problem");
742 }
743
744 goto repeat;
745 }
746
747 if(needed <= 0) return;
748
749
750
751 if (nr_free_pages > 5) {
752 if (grow_buffers(GFP_BUFFER, size)) {
753 needed -= PAGE_SIZE;
754 goto repeat0;
755 };
756 }
757
758
759 if (!grow_buffers(GFP_ATOMIC, size))
760 wakeup_bdflush(1);
761 needed -= PAGE_SIZE;
762 goto repeat0;
763 }
764
765
766
767
768
769
770
771
772
773
774
775 struct buffer_head * getblk(dev_t dev, int block, int size)
776 {
777 struct buffer_head * bh;
778 int isize = BUFSIZE_INDEX(size);
779
780
781 buffer_usage[isize]++;
782
783
784
785
786 repeat:
787 bh = get_hash_table(dev, block, size);
788 if (bh) {
789 if (bh->b_uptodate && !bh->b_dirt)
790 put_last_lru(bh);
791 if(!bh->b_dirt) bh->b_flushtime = 0;
792 return bh;
793 }
794
795 while(!free_list[isize]) refill_freelist(size);
796
797 if (find_buffer(dev,block,size))
798 goto repeat;
799
800 bh = free_list[isize];
801 remove_from_free_list(bh);
802
803
804
805 bh->b_count=1;
806 bh->b_dirt=0;
807 bh->b_lock=0;
808 bh->b_uptodate=0;
809 bh->b_flushtime = 0;
810 bh->b_req=0;
811 bh->b_dev=dev;
812 bh->b_blocknr=block;
813 insert_into_queues(bh);
814 return bh;
815 }
816
817 void set_writetime(struct buffer_head * buf, int flag)
818 {
819 int newtime;
820
821 if (buf->b_dirt){
822
823 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
824 bdf_prm.b_un.age_buffer);
825 if(!buf->b_flushtime || buf->b_flushtime > newtime)
826 buf->b_flushtime = newtime;
827 } else {
828 buf->b_flushtime = 0;
829 }
830 }
831
832
833 static char buffer_disposition[] = {BUF_CLEAN, BUF_SHARED, BUF_LOCKED, BUF_SHARED,
834 BUF_DIRTY, BUF_DIRTY, BUF_DIRTY, BUF_DIRTY};
835
836 void refile_buffer(struct buffer_head * buf){
837 int i, dispose;
838 i = 0;
839 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
840 if(mem_map[MAP_NR((unsigned long) buf->b_data)] != 1) i = 1;
841 if(buf->b_lock) i |= 2;
842 if(buf->b_dirt) i |= 4;
843 dispose = buffer_disposition[i];
844 if(buf->b_list == BUF_SHARED && dispose == BUF_CLEAN)
845 dispose = BUF_UNSHARED;
846 if(dispose == -1) panic("Bad buffer settings (%d)\n", i);
847 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
848 if(dispose != buf->b_list) {
849 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
850 buf->b_lru_time = jiffies;
851 if(dispose == BUF_LOCKED &&
852 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
853 dispose = BUF_LOCKED1;
854 remove_from_queues(buf);
855 buf->b_list = dispose;
856 insert_into_queues(buf);
857 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
858 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
859 bdf_prm.b_un.nfract/100)
860 wakeup_bdflush(0);
861 }
862 }
863
864 void brelse(struct buffer_head * buf)
865 {
866 if (!buf)
867 return;
868 wait_on_buffer(buf);
869
870
871 set_writetime(buf, 0);
872 refile_buffer(buf);
873
874 if (buf->b_count) {
875 if (--buf->b_count)
876 return;
877 wake_up(&buffer_wait);
878 return;
879 }
880 printk("VFS: brelse: Trying to free free buffer\n");
881 }
882
883
884
885
886
887 struct buffer_head * bread(dev_t dev, int block, int size)
888 {
889 struct buffer_head * bh;
890
891 if (!(bh = getblk(dev, block, size))) {
892 printk("VFS: bread: READ error on device %d/%d\n",
893 MAJOR(dev), MINOR(dev));
894 return NULL;
895 }
896 if (bh->b_uptodate)
897 return bh;
898 ll_rw_block(READ, 1, &bh);
899 wait_on_buffer(bh);
900 if (bh->b_uptodate)
901 return bh;
902 brelse(bh);
903 return NULL;
904 }
905
906
907
908
909
910
911
912 #define NBUF 16
913
914 struct buffer_head * breada(dev_t dev, int block, int bufsize,
915 unsigned int pos, unsigned int filesize)
916 {
917 struct buffer_head * bhlist[NBUF];
918 unsigned int blocks;
919 struct buffer_head * bh;
920 int index;
921 int i, j;
922
923 if (pos >= filesize)
924 return NULL;
925
926 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
927 return NULL;
928
929 index = BUFSIZE_INDEX(bh->b_size);
930
931 if (bh->b_uptodate)
932 return bh;
933
934 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
935
936 if (blocks > (read_ahead[MAJOR(dev)] >> index))
937 blocks = read_ahead[MAJOR(dev)] >> index;
938 if (blocks > NBUF)
939 blocks = NBUF;
940
941 bhlist[0] = bh;
942 j = 1;
943 for(i=1; i<blocks; i++) {
944 bh = getblk(dev,block+i,bufsize);
945 if (bh->b_uptodate) {
946 brelse(bh);
947 break;
948 }
949 bhlist[j++] = bh;
950 }
951
952
953 ll_rw_block(READ, j, bhlist);
954
955 for(i=1; i<j; i++)
956 brelse(bhlist[i]);
957
958
959 bh = bhlist[0];
960 wait_on_buffer(bh);
961 if (bh->b_uptodate)
962 return bh;
963 brelse(bh);
964 return NULL;
965 }
966
967
968
969
970 static void put_unused_buffer_head(struct buffer_head * bh)
971 {
972 struct wait_queue * wait;
973
974 wait = ((volatile struct buffer_head *) bh)->b_wait;
975 memset((void *) bh,0,sizeof(*bh));
976 ((volatile struct buffer_head *) bh)->b_wait = wait;
977 bh->b_next_free = unused_list;
978 unused_list = bh;
979 }
980
981 static void get_more_buffer_heads(void)
982 {
983 int i;
984 struct buffer_head * bh;
985
986 if (unused_list)
987 return;
988
989 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
990 return;
991
992 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
993 bh->b_next_free = unused_list;
994 unused_list = bh++;
995 }
996 }
997
998 static struct buffer_head * get_unused_buffer_head(void)
999 {
1000 struct buffer_head * bh;
1001
1002 get_more_buffer_heads();
1003 if (!unused_list)
1004 return NULL;
1005 bh = unused_list;
1006 unused_list = bh->b_next_free;
1007 bh->b_next_free = NULL;
1008 bh->b_data = NULL;
1009 bh->b_size = 0;
1010 bh->b_req = 0;
1011 return bh;
1012 }
1013
1014
1015
1016
1017
1018
1019
1020 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
1021 {
1022 struct buffer_head *bh, *head;
1023 unsigned long offset;
1024
1025 head = NULL;
1026 offset = PAGE_SIZE;
1027 while ((offset -= size) < PAGE_SIZE) {
1028 bh = get_unused_buffer_head();
1029 if (!bh)
1030 goto no_grow;
1031 bh->b_this_page = head;
1032 head = bh;
1033 bh->b_data = (char *) (page+offset);
1034 bh->b_size = size;
1035 bh->b_dev = 0xffff;
1036 }
1037 return head;
1038
1039
1040
1041 no_grow:
1042 bh = head;
1043 while (bh) {
1044 head = bh;
1045 bh = bh->b_this_page;
1046 put_unused_buffer_head(head);
1047 }
1048 return NULL;
1049 }
1050
1051 static void read_buffers(struct buffer_head * bh[], int nrbuf)
1052 {
1053 int i;
1054 int bhnum = 0;
1055 struct buffer_head * bhr[8];
1056
1057 for (i = 0 ; i < nrbuf ; i++) {
1058 if (bh[i] && !bh[i]->b_uptodate)
1059 bhr[bhnum++] = bh[i];
1060 }
1061 if (bhnum)
1062 ll_rw_block(READ, bhnum, bhr);
1063 for (i = 0 ; i < nrbuf ; i++) {
1064 if (bh[i]) {
1065 wait_on_buffer(bh[i]);
1066 }
1067 }
1068 }
1069
1070 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1071 dev_t dev, int *b, int size)
1072 {
1073 struct buffer_head * bh[8];
1074 unsigned long page;
1075 unsigned long offset;
1076 int block;
1077 int nrbuf;
1078
1079 page = (unsigned long) first->b_data;
1080 if (page & ~PAGE_MASK) {
1081 brelse(first);
1082 return 0;
1083 }
1084 mem_map[MAP_NR(page)]++;
1085 bh[0] = first;
1086 nrbuf = 1;
1087 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1088 block = *++b;
1089 if (!block)
1090 goto no_go;
1091 first = get_hash_table(dev, block, size);
1092 if (!first)
1093 goto no_go;
1094 bh[nrbuf++] = first;
1095 if (page+offset != (unsigned long) first->b_data)
1096 goto no_go;
1097 }
1098 read_buffers(bh,nrbuf);
1099 while (nrbuf-- > 0)
1100 brelse(bh[nrbuf]);
1101 free_page(address);
1102 ++current->min_flt;
1103 return page;
1104 no_go:
1105 while (nrbuf-- > 0)
1106 brelse(bh[nrbuf]);
1107 free_page(page);
1108 return 0;
1109 }
1110
1111 static unsigned long try_to_load_aligned(unsigned long address,
1112 dev_t dev, int b[], int size)
1113 {
1114 struct buffer_head * bh, * tmp, * arr[8];
1115 unsigned long offset;
1116 int isize = BUFSIZE_INDEX(size);
1117 int * p;
1118 int block;
1119
1120 bh = create_buffers(address, size);
1121 if (!bh)
1122 return 0;
1123
1124 p = b;
1125 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1126 block = *(p++);
1127 if (!block)
1128 goto not_aligned;
1129 if (find_buffer(dev, block, size))
1130 goto not_aligned;
1131 }
1132 tmp = bh;
1133 p = b;
1134 block = 0;
1135 while (1) {
1136 arr[block++] = bh;
1137 bh->b_count = 1;
1138 bh->b_dirt = 0;
1139 bh->b_flushtime = 0;
1140 bh->b_uptodate = 0;
1141 bh->b_req = 0;
1142 bh->b_dev = dev;
1143 bh->b_blocknr = *(p++);
1144 bh->b_list = BUF_CLEAN;
1145 nr_buffers++;
1146 nr_buffers_size[isize]++;
1147 insert_into_queues(bh);
1148 if (bh->b_this_page)
1149 bh = bh->b_this_page;
1150 else
1151 break;
1152 }
1153 buffermem += PAGE_SIZE;
1154 bh->b_this_page = tmp;
1155 mem_map[MAP_NR(address)]++;
1156 buffer_pages[address >> PAGE_SHIFT] = bh;
1157 read_buffers(arr,block);
1158 while (block-- > 0)
1159 brelse(arr[block]);
1160 ++current->maj_flt;
1161 return address;
1162 not_aligned:
1163 while ((tmp = bh) != NULL) {
1164 bh = bh->b_this_page;
1165 put_unused_buffer_head(tmp);
1166 }
1167 return 0;
1168 }
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 static inline unsigned long try_to_share_buffers(unsigned long address,
1182 dev_t dev, int *b, int size)
1183 {
1184 struct buffer_head * bh;
1185 int block;
1186
1187 block = b[0];
1188 if (!block)
1189 return 0;
1190 bh = get_hash_table(dev, block, size);
1191 if (bh)
1192 return check_aligned(bh, address, dev, b, size);
1193 return try_to_load_aligned(address, dev, b, size);
1194 }
1195
1196 #define COPYBLK(size,from,to) \
1197 __asm__ __volatile__("rep ; movsl": \
1198 :"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
1199 :"cx","di","si")
1200
1201
1202
1203
1204
1205
1206
1207
1208 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
1209 {
1210 struct buffer_head * bh[8];
1211 unsigned long where;
1212 int i, j;
1213
1214 if (!(prot & PAGE_RW)) {
1215 where = try_to_share_buffers(address,dev,b,size);
1216 if (where)
1217 return where;
1218 }
1219 ++current->maj_flt;
1220 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1221 bh[i] = NULL;
1222 if (b[i])
1223 bh[i] = getblk(dev, b[i], size);
1224 }
1225 read_buffers(bh,i);
1226 where = address;
1227 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
1228 if (bh[i]) {
1229 if (bh[i]->b_uptodate)
1230 COPYBLK(size, (unsigned long) bh[i]->b_data,address);
1231 brelse(bh[i]);
1232 }
1233 }
1234 return where;
1235 }
1236
1237
1238
1239
1240
1241 static int grow_buffers(int pri, int size)
1242 {
1243 unsigned long page;
1244 struct buffer_head *bh, *tmp;
1245 struct buffer_head * insert_point;
1246 int isize;
1247
1248 if ((size & 511) || (size > PAGE_SIZE)) {
1249 printk("VFS: grow_buffers: size = %d\n",size);
1250 return 0;
1251 }
1252
1253 isize = BUFSIZE_INDEX(size);
1254
1255 if (!(page = __get_free_page(pri)))
1256 return 0;
1257 bh = create_buffers(page, size);
1258 if (!bh) {
1259 free_page(page);
1260 return 0;
1261 }
1262
1263 insert_point = free_list[isize];
1264
1265 tmp = bh;
1266 while (1) {
1267 nr_free[isize]++;
1268 if (insert_point) {
1269 tmp->b_next_free = insert_point->b_next_free;
1270 tmp->b_prev_free = insert_point;
1271 insert_point->b_next_free->b_prev_free = tmp;
1272 insert_point->b_next_free = tmp;
1273 } else {
1274 tmp->b_prev_free = tmp;
1275 tmp->b_next_free = tmp;
1276 }
1277 insert_point = tmp;
1278 ++nr_buffers;
1279 if (tmp->b_this_page)
1280 tmp = tmp->b_this_page;
1281 else
1282 break;
1283 }
1284 free_list[isize] = bh;
1285 buffer_pages[page >> PAGE_SHIFT] = bh;
1286 tmp->b_this_page = bh;
1287 wake_up(&buffer_wait);
1288 buffermem += PAGE_SIZE;
1289 return 1;
1290 }
1291
1292
1293
1294
1295
1296 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1297 {
1298 unsigned long page;
1299 struct buffer_head * tmp, * p;
1300 int isize = BUFSIZE_INDEX(bh->b_size);
1301
1302 *bhp = bh;
1303 page = (unsigned long) bh->b_data;
1304 page &= PAGE_MASK;
1305 tmp = bh;
1306 do {
1307 if (!tmp)
1308 return 0;
1309 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1310 return 0;
1311 tmp = tmp->b_this_page;
1312 } while (tmp != bh);
1313 tmp = bh;
1314 do {
1315 p = tmp;
1316 tmp = tmp->b_this_page;
1317 nr_buffers--;
1318 nr_buffers_size[isize]--;
1319 if (p == *bhp)
1320 {
1321 *bhp = p->b_prev_free;
1322 if (p == *bhp)
1323 *bhp = NULL;
1324 }
1325 remove_from_queues(p);
1326 put_unused_buffer_head(p);
1327 } while (tmp != bh);
1328 buffermem -= PAGE_SIZE;
1329 buffer_pages[page >> PAGE_SHIFT] = NULL;
1330 free_page(page);
1331 return !mem_map[MAP_NR(page)];
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 static int maybe_shrink_lav_buffers(int size)
1348 {
1349 int nlist;
1350 int isize;
1351 int total_lav, total_n_buffers, n_sizes;
1352
1353
1354
1355
1356
1357
1358 total_lav = total_n_buffers = n_sizes = 0;
1359 for(nlist = 0; nlist < NR_SIZES; nlist++)
1360 {
1361 total_lav += buffers_lav[nlist];
1362 if(nr_buffers_size[nlist]) n_sizes++;
1363 total_n_buffers += nr_buffers_size[nlist];
1364 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1365 }
1366
1367
1368
1369
1370 isize = (size ? BUFSIZE_INDEX(size) : -1);
1371
1372 if (n_sizes > 1)
1373 for(nlist = 0; nlist < NR_SIZES; nlist++)
1374 {
1375 if(nlist == isize) continue;
1376 if(nr_buffers_size[nlist] &&
1377 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1378 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1379 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1380 return 1;
1381 }
1382 return 0;
1383 }
1384
1385
1386
1387
1388
1389
1390
1391 int shrink_buffers(unsigned int priority)
1392 {
1393 if (priority < 2) {
1394 sync_buffers(0,0);
1395 }
1396
1397 if(priority == 2) wakeup_bdflush(1);
1398
1399 if(maybe_shrink_lav_buffers(0)) return 1;
1400
1401
1402 return shrink_specific_buffers(priority, 0);
1403 }
1404
1405 static int shrink_specific_buffers(unsigned int priority, int size)
1406 {
1407 struct buffer_head *bh;
1408 int nlist;
1409 int i, isize, isize1;
1410
1411 #ifdef DEBUG
1412 if(size) printk("Shrinking buffers of size %d\n", size);
1413 #endif
1414
1415
1416 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1417
1418 for(isize = 0; isize<NR_SIZES; isize++){
1419 if(isize1 != -1 && isize1 != isize) continue;
1420 bh = free_list[isize];
1421 if(!bh) continue;
1422 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1423 if (bh->b_count || !bh->b_this_page)
1424 continue;
1425 if (try_to_free(bh, &bh))
1426 return 1;
1427 if(!bh) break;
1428
1429 }
1430 }
1431
1432
1433
1434 for(nlist = 0; nlist < NR_LIST; nlist++) {
1435 repeat1:
1436 if(priority > 3 && nlist == BUF_SHARED) continue;
1437 bh = lru_list[nlist];
1438 if(!bh) continue;
1439 i = nr_buffers_type[nlist] >> priority;
1440 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1441
1442 if(bh->b_list != nlist) goto repeat1;
1443 if (bh->b_count || !bh->b_this_page)
1444 continue;
1445 if(size && bh->b_size != size) continue;
1446 if (bh->b_lock)
1447 if (priority)
1448 continue;
1449 else
1450 wait_on_buffer(bh);
1451 if (bh->b_dirt) {
1452 bh->b_count++;
1453 bh->b_flushtime = 0;
1454 ll_rw_block(WRITEA, 1, &bh);
1455 bh->b_count--;
1456 continue;
1457 }
1458 if (try_to_free(bh, &bh))
1459 return 1;
1460 if(!bh) break;
1461 }
1462 }
1463 return 0;
1464 }
1465
1466
1467 void show_buffers(void)
1468 {
1469 struct buffer_head * bh;
1470 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1471 int shared;
1472 int nlist, isize;
1473
1474 printk("Buffer memory: %6dkB\n",buffermem>>10);
1475 printk("Buffer heads: %6d\n",nr_buffer_heads);
1476 printk("Buffer blocks: %6d\n",nr_buffers);
1477
1478 for(nlist = 0; nlist < NR_LIST; nlist++) {
1479 shared = found = locked = dirty = used = lastused = 0;
1480 bh = lru_list[nlist];
1481 if(!bh) continue;
1482 do {
1483 found++;
1484 if (bh->b_lock)
1485 locked++;
1486 if (bh->b_dirt)
1487 dirty++;
1488 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1489 if (bh->b_count)
1490 used++, lastused = found;
1491 bh = bh->b_next_free;
1492 } while (bh != lru_list[nlist]);
1493 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1494 nlist, found, used, lastused, locked, dirty, shared);
1495 };
1496 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1497 for(isize = 0; isize<NR_SIZES; isize++){
1498 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1499 buffers_lav[isize], nr_free[isize]);
1500 for(nlist = 0; nlist < NR_LIST; nlist++)
1501 printk("%7d ", nr_buffers_st[isize][nlist]);
1502 printk("\n");
1503 }
1504 }
1505
1506
1507
1508
1509
1510 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1511 dev_t dev, unsigned int starting_block)
1512 {
1513 unsigned long page;
1514 struct buffer_head * tmp, * p;
1515
1516 *bhp = bh;
1517 page = (unsigned long) bh->b_data;
1518 page &= PAGE_MASK;
1519 if(mem_map[MAP_NR(page)] != 1) return 0;
1520 tmp = bh;
1521 do {
1522 if (!tmp)
1523 return 0;
1524
1525 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1526 return 0;
1527 tmp = tmp->b_this_page;
1528 } while (tmp != bh);
1529 tmp = bh;
1530
1531 while((unsigned int) tmp->b_data & (PAGE_SIZE - 1))
1532 tmp = tmp->b_this_page;
1533
1534
1535 bh = tmp;
1536 do {
1537 p = tmp;
1538 tmp = tmp->b_this_page;
1539 remove_from_queues(p);
1540 p->b_dev=dev;
1541 p->b_uptodate = 0;
1542 p->b_req = 0;
1543 p->b_blocknr=starting_block++;
1544 insert_into_queues(p);
1545 } while (tmp != bh);
1546 return 1;
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 static int reassign_cluster(dev_t dev,
1564 unsigned int starting_block, int size)
1565 {
1566 struct buffer_head *bh;
1567 int isize = BUFSIZE_INDEX(size);
1568 int i;
1569
1570
1571
1572
1573
1574 while(nr_free[isize] < 32) refill_freelist(size);
1575
1576 bh = free_list[isize];
1577 if(bh)
1578 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1579 if (!bh->b_this_page) continue;
1580 if (try_to_reassign(bh, &bh, dev, starting_block))
1581 return 4;
1582 }
1583 return 0;
1584 }
1585
1586
1587
1588
1589
1590 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1591 {
1592 struct buffer_head * bh, * tmp, * arr[8];
1593 int isize = BUFSIZE_INDEX(size);
1594 unsigned long offset;
1595 unsigned long page;
1596 int nblock;
1597
1598 page = get_free_page(GFP_NOBUFFER);
1599 if(!page) return 0;
1600
1601 bh = create_buffers(page, size);
1602 if (!bh) {
1603 free_page(page);
1604 return 0;
1605 };
1606 nblock = block;
1607 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1608 if (find_buffer(dev, nblock++, size))
1609 goto not_aligned;
1610 }
1611 tmp = bh;
1612 nblock = 0;
1613 while (1) {
1614 arr[nblock++] = bh;
1615 bh->b_count = 1;
1616 bh->b_dirt = 0;
1617 bh->b_flushtime = 0;
1618 bh->b_lock = 0;
1619 bh->b_uptodate = 0;
1620 bh->b_req = 0;
1621 bh->b_dev = dev;
1622 bh->b_list = BUF_CLEAN;
1623 bh->b_blocknr = block++;
1624 nr_buffers++;
1625 nr_buffers_size[isize]++;
1626 insert_into_queues(bh);
1627 if (bh->b_this_page)
1628 bh = bh->b_this_page;
1629 else
1630 break;
1631 }
1632 buffermem += PAGE_SIZE;
1633 buffer_pages[page >> PAGE_SHIFT] = bh;
1634 bh->b_this_page = tmp;
1635 while (nblock-- > 0)
1636 brelse(arr[nblock]);
1637 return 4;
1638 not_aligned:
1639 while ((tmp = bh) != NULL) {
1640 bh = bh->b_this_page;
1641 put_unused_buffer_head(tmp);
1642 }
1643 free_page(page);
1644 return 0;
1645 }
1646
1647 unsigned long generate_cluster(dev_t dev, int b[], int size)
1648 {
1649 int i, offset;
1650
1651 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1652 if(i && b[i]-1 != b[i-1]) return 0;
1653 if(find_buffer(dev, b[i], size)) return 0;
1654 };
1655
1656
1657
1658
1659
1660 if(maybe_shrink_lav_buffers(size))
1661 {
1662 int retval;
1663 retval = try_to_generate_cluster(dev, b[0], size);
1664 if(retval) return retval;
1665 };
1666
1667 if (nr_free_pages > min_free_pages)
1668 return try_to_generate_cluster(dev, b[0], size);
1669 else
1670 return reassign_cluster(dev, b[0], size);
1671 }
1672
1673
1674
1675
1676
1677
1678
1679
1680 void buffer_init(void)
1681 {
1682 int i;
1683 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1684
1685 if (high_memory >= 4*1024*1024) {
1686 min_free_pages = 200;
1687 if(high_memory >= 16*1024*1024)
1688 nr_hash = 16381;
1689 else
1690 nr_hash = 4093;
1691 } else {
1692 min_free_pages = 20;
1693 nr_hash = 997;
1694 };
1695
1696 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1697 sizeof(struct buffer_head *));
1698
1699
1700 buffer_pages = (struct buffer_head **) vmalloc((high_memory >>PAGE_SHIFT) *
1701 sizeof(struct buffer_head *));
1702 for (i = 0 ; i < high_memory >> PAGE_SHIFT ; i++)
1703 buffer_pages[i] = NULL;
1704
1705 for (i = 0 ; i < nr_hash ; i++)
1706 hash_table[i] = NULL;
1707 lru_list[BUF_CLEAN] = 0;
1708 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1709 if (!free_list[isize])
1710 panic("VFS: Unable to initialize buffer free list!");
1711 return;
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721 struct wait_queue * bdflush_wait = NULL;
1722 struct wait_queue * bdflush_done = NULL;
1723
1724 static int bdflush_running = 0;
1725
1726 static void wakeup_bdflush(int wait)
1727 {
1728 if(!bdflush_running){
1729 printk("Warning - bdflush not running\n");
1730 sync_buffers(0,0);
1731 return;
1732 };
1733 wake_up(&bdflush_wait);
1734 if(wait) sleep_on(&bdflush_done);
1735 }
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 asmlinkage int sync_old_buffers(void)
1748 {
1749 int i, isize;
1750 int ndirty, nwritten;
1751 int nlist;
1752 int ncount;
1753 struct buffer_head * bh, *next;
1754
1755 sync_supers(0);
1756 sync_inodes(0);
1757
1758 ncount = 0;
1759 #ifdef DEBUG
1760 for(nlist = 0; nlist < NR_LIST; nlist++)
1761 #else
1762 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1763 #endif
1764 {
1765 ndirty = 0;
1766 nwritten = 0;
1767 repeat:
1768 bh = lru_list[nlist];
1769 if(bh)
1770 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1771
1772 if(bh->b_list != nlist) goto repeat;
1773 next = bh->b_next_free;
1774 if(!lru_list[nlist]) {
1775 printk("Dirty list empty %d\n", i);
1776 break;
1777 }
1778
1779
1780 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1781 {
1782 refile_buffer(bh);
1783 continue;
1784 }
1785
1786 if (bh->b_lock || !bh->b_dirt)
1787 continue;
1788 ndirty++;
1789 if(bh->b_flushtime > jiffies) continue;
1790 nwritten++;
1791 bh->b_count++;
1792 bh->b_flushtime = 0;
1793 #ifdef DEBUG
1794 if(nlist != BUF_DIRTY) ncount++;
1795 #endif
1796 ll_rw_block(WRITE, 1, &bh);
1797 bh->b_count--;
1798 }
1799 }
1800 #ifdef DEBUG
1801 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1802 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1803 #endif
1804
1805
1806
1807
1808 for(isize = 0; isize<NR_SIZES; isize++){
1809 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1810 buffer_usage[isize] = 0;
1811 };
1812 return 0;
1813 }
1814
1815
1816
1817
1818
1819
1820
1821
1822 asmlinkage int sys_bdflush(int func, int data)
1823 {
1824 int i, error;
1825 int ndirty;
1826 int nlist;
1827 int ncount;
1828 struct buffer_head * bh, *next;
1829
1830 if(!suser()) return -EPERM;
1831
1832 if(func == 1)
1833 return sync_old_buffers();
1834
1835
1836 if(func >= 2){
1837 i = (func-2) >> 1;
1838 if (i < 0 || i >= N_PARAM) return -EINVAL;
1839 if((func & 1) == 0) {
1840 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1841 if(error) return error;
1842 put_fs_long(bdf_prm.data[i], data);
1843 return 0;
1844 };
1845 if(data < bdflush_min[i] || data > bdflush_max[i]) return -EINVAL;
1846 bdf_prm.data[i] = data;
1847 return 0;
1848 };
1849
1850 if(bdflush_running++) return -EBUSY;
1851
1852
1853
1854 while(1==1){
1855 #ifdef DEBUG
1856 printk("bdflush() activated...");
1857 #endif
1858
1859 ncount = 0;
1860 #ifdef DEBUG
1861 for(nlist = 0; nlist < NR_LIST; nlist++)
1862 #else
1863 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1864 #endif
1865 {
1866 ndirty = 0;
1867 repeat:
1868 bh = lru_list[nlist];
1869 if(bh)
1870 for (i = nr_buffers_type[nlist]; --i > 0 && ndirty < bdf_prm.b_un.ndirty;
1871 bh = next) {
1872
1873 if(bh->b_list != nlist) goto repeat;
1874 next = bh->b_next_free;
1875 if(!lru_list[nlist]) {
1876 printk("Dirty list empty %d\n", i);
1877 break;
1878 }
1879
1880
1881 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1882 {
1883 refile_buffer(bh);
1884 continue;
1885 }
1886
1887 if (bh->b_lock || !bh->b_dirt)
1888 continue;
1889
1890
1891 bh->b_count++;
1892 ndirty++;
1893 bh->b_flushtime = 0;
1894 ll_rw_block(WRITE, 1, &bh);
1895 #ifdef DEBUG
1896 if(nlist != BUF_DIRTY) ncount++;
1897 #endif
1898 bh->b_count--;
1899 }
1900 }
1901 #ifdef DEBUG
1902 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1903 printk("sleeping again.\n");
1904 #endif
1905 wake_up(&bdflush_done);
1906
1907
1908
1909
1910 if(nr_buffers_type[BUF_DIRTY] < (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1911 bdf_prm.b_un.nfract/100) {
1912 if (current->signal & (1 << (SIGKILL-1)))
1913 return 0;
1914 current->signal = 0;
1915 interruptible_sleep_on(&bdflush_wait);
1916 }
1917 }
1918 }
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936