This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- fsync_dev
- sys_sync
- file_fsync
- sys_fsync
- invalidate_buffers
- remove_from_hash_queue
- remove_from_lru_list
- remove_from_free_list
- remove_from_queues
- put_last_lru
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- set_blocksize
- refill_freelist
- getblk
- set_writetime
- refile_buffer
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- create_buffers
- read_buffers
- try_to_align
- check_aligned
- try_to_load_aligned
- try_to_share_buffers
- bread_page
- bwrite_page
- grow_buffers
- try_to_free
- maybe_shrink_lav_buffers
- shrink_buffers
- shrink_specific_buffers
- show_buffers
- try_to_reassign
- reassign_cluster
- try_to_generate_cluster
- generate_cluster
- buffer_init
- wakeup_bdflush
- sync_old_buffers
- sys_bdflush
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/major.h>
22 #include <linux/string.h>
23 #include <linux/locks.h>
24 #include <linux/errno.h>
25 #include <linux/malloc.h>
26
27 #include <asm/system.h>
28 #include <asm/segment.h>
29 #include <asm/io.h>
30
31 #define NR_SIZES 4
32 static char buffersize_index[9] = {-1, 0, 1, -1, 2, -1, -1, -1, 3};
33 static short int bufferindex_size[NR_SIZES] = {512, 1024, 2048, 4096};
34
35 #define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
36 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
37
38 static int grow_buffers(int pri, int size);
39 static int shrink_specific_buffers(unsigned int priority, int size);
40 static int maybe_shrink_lav_buffers(int);
41
42 static int nr_hash = 0;
43 static struct buffer_head ** hash_table;
44 struct buffer_head ** buffer_pages;
45 static struct buffer_head * lru_list[NR_LIST] = {NULL, };
46 static struct buffer_head * free_list[NR_SIZES] = {NULL, };
47 static struct buffer_head * unused_list = NULL;
48 static struct wait_queue * buffer_wait = NULL;
49
50 int nr_buffers = 0;
51 int nr_buffers_type[NR_LIST] = {0,};
52 int nr_buffers_size[NR_SIZES] = {0,};
53 int nr_buffers_st[NR_SIZES][NR_LIST] = {{0,},};
54 int buffer_usage[NR_SIZES] = {0,};
55 int buffers_lav[NR_SIZES] = {0,};
56 int nr_free[NR_SIZES] = {0,};
57 int buffermem = 0;
58 int nr_buffer_heads = 0;
59 extern int *blksize_size[];
60
61
62 static void wakeup_bdflush(int);
63
64 #define N_PARAM 9
65 #define LAV
66
67 static union bdflush_param{
68 struct {
69 int nfract;
70
71 int ndirty;
72
73 int nrefill;
74
75 int nref_dirt;
76
77 int clu_nfract;
78
79 int age_buffer;
80
81 int age_super;
82
83 int lav_const;
84
85 int lav_ratio;
86
87
88 } b_un;
89 unsigned int data[N_PARAM];
90 } bdf_prm = {{25, 500, 64, 256, 15, 30*HZ, 5*HZ, 1884, 2}};
91
92
93
94
95
96
97
98 static int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 100, 100, 1, 1};
99 static int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};
100
101
102
103
104
105
106
107
108
109
110 void __wait_on_buffer(struct buffer_head * bh)
111 {
112 struct wait_queue wait = { current, NULL };
113
114 bh->b_count++;
115 add_wait_queue(&bh->b_wait, &wait);
116 repeat:
117 current->state = TASK_UNINTERRUPTIBLE;
118 if (bh->b_lock) {
119 schedule();
120 goto repeat;
121 }
122 remove_wait_queue(&bh->b_wait, &wait);
123 bh->b_count--;
124 current->state = TASK_RUNNING;
125 }
126
127
128
129
130
131
132
133
134
135
136
137 static int sync_buffers(dev_t dev, int wait)
138 {
139 int i, retry, pass = 0, err = 0;
140 int nlist, ncount;
141 struct buffer_head * bh, *next;
142
143
144
145
146
147 repeat:
148 retry = 0;
149 repeat2:
150 ncount = 0;
151
152
153 for(nlist = 0; nlist < NR_LIST; nlist++)
154 {
155 repeat1:
156 bh = lru_list[nlist];
157 if(!bh) continue;
158 for (i = nr_buffers_type[nlist]*2 ; i-- > 0 ; bh = next) {
159 if(bh->b_list != nlist) goto repeat1;
160 next = bh->b_next_free;
161 if(!lru_list[nlist]) break;
162 if (dev && bh->b_dev != dev)
163 continue;
164 if (bh->b_lock)
165 {
166
167
168 if (!wait || !pass) {
169 retry = 1;
170 continue;
171 }
172 wait_on_buffer (bh);
173 goto repeat2;
174 }
175
176
177 if (wait && bh->b_req && !bh->b_lock &&
178 !bh->b_dirt && !bh->b_uptodate) {
179 err = 1;
180 printk("Weird - unlocked, clean and not uptodate buffer on list %d %x %lu\n", nlist, bh->b_dev, bh->b_blocknr);
181 continue;
182 }
183
184
185 if (!bh->b_dirt || pass>=2)
186 continue;
187
188 if (bh->b_lock)
189 continue;
190 bh->b_count++;
191 bh->b_flushtime = 0;
192 ll_rw_block(WRITE, 1, &bh);
193
194 if(nlist != BUF_DIRTY) {
195 printk("[%d %x %ld] ", nlist, bh->b_dev, bh->b_blocknr);
196 ncount++;
197 };
198 bh->b_count--;
199 retry = 1;
200 }
201 }
202 if (ncount) printk("sys_sync: %d dirty buffers not on dirty list\n", ncount);
203
204
205
206
207
208 if (wait && retry && ++pass<=2)
209 goto repeat;
210 return err;
211 }
212
213 void sync_dev(dev_t dev)
214 {
215 sync_buffers(dev, 0);
216 sync_supers(dev);
217 sync_inodes(dev);
218 sync_buffers(dev, 0);
219 }
220
221 int fsync_dev(dev_t dev)
222 {
223 sync_buffers(dev, 0);
224 sync_supers(dev);
225 sync_inodes(dev);
226 return sync_buffers(dev, 1);
227 }
228
229 asmlinkage int sys_sync(void)
230 {
231 fsync_dev(0);
232 return 0;
233 }
234
235 int file_fsync (struct inode *inode, struct file *filp)
236 {
237 return fsync_dev(inode->i_dev);
238 }
239
240 asmlinkage int sys_fsync(unsigned int fd)
241 {
242 struct file * file;
243 struct inode * inode;
244
245 if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))
246 return -EBADF;
247 if (!file->f_op || !file->f_op->fsync)
248 return -EINVAL;
249 if (file->f_op->fsync(inode,file))
250 return -EIO;
251 return 0;
252 }
253
254 void invalidate_buffers(dev_t dev)
255 {
256 int i;
257 int nlist;
258 struct buffer_head * bh;
259
260 for(nlist = 0; nlist < NR_LIST; nlist++) {
261 bh = lru_list[nlist];
262 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {
263 if (bh->b_dev != dev)
264 continue;
265 wait_on_buffer(bh);
266 if (bh->b_dev != dev)
267 continue;
268 if (bh->b_count)
269 continue;
270 bh->b_flushtime = bh->b_uptodate =
271 bh->b_dirt = bh->b_req = 0;
272 }
273 }
274 }
275
276 #define _hashfn(dev,block) (((unsigned)(dev^block))%nr_hash)
277 #define hash(dev,block) hash_table[_hashfn(dev,block)]
278
279 static inline void remove_from_hash_queue(struct buffer_head * bh)
280 {
281 if (bh->b_next)
282 bh->b_next->b_prev = bh->b_prev;
283 if (bh->b_prev)
284 bh->b_prev->b_next = bh->b_next;
285 if (hash(bh->b_dev,bh->b_blocknr) == bh)
286 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
287 bh->b_next = bh->b_prev = NULL;
288 }
289
290 static inline void remove_from_lru_list(struct buffer_head * bh)
291 {
292 if (!(bh->b_prev_free) || !(bh->b_next_free))
293 panic("VFS: LRU block list corrupted");
294 if (bh->b_dev == 0xffff) panic("LRU list corrupted");
295 bh->b_prev_free->b_next_free = bh->b_next_free;
296 bh->b_next_free->b_prev_free = bh->b_prev_free;
297
298 if (lru_list[bh->b_list] == bh)
299 lru_list[bh->b_list] = bh->b_next_free;
300 if(lru_list[bh->b_list] == bh)
301 lru_list[bh->b_list] = NULL;
302 bh->b_next_free = bh->b_prev_free = NULL;
303 }
304
305 static inline void remove_from_free_list(struct buffer_head * bh)
306 {
307 int isize = BUFSIZE_INDEX(bh->b_size);
308 if (!(bh->b_prev_free) || !(bh->b_next_free))
309 panic("VFS: Free block list corrupted");
310 if(bh->b_dev != 0xffff) panic("Free list corrupted");
311 if(!free_list[isize])
312 panic("Free list empty");
313 nr_free[isize]--;
314 if(bh->b_next_free == bh)
315 free_list[isize] = NULL;
316 else {
317 bh->b_prev_free->b_next_free = bh->b_next_free;
318 bh->b_next_free->b_prev_free = bh->b_prev_free;
319 if (free_list[isize] == bh)
320 free_list[isize] = bh->b_next_free;
321 };
322 bh->b_next_free = bh->b_prev_free = NULL;
323 }
324
325 static inline void remove_from_queues(struct buffer_head * bh)
326 {
327 if(bh->b_dev == 0xffff) {
328 remove_from_free_list(bh);
329
330 return;
331 };
332 nr_buffers_type[bh->b_list]--;
333 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]--;
334 remove_from_hash_queue(bh);
335 remove_from_lru_list(bh);
336 }
337
338 static inline void put_last_lru(struct buffer_head * bh)
339 {
340 if (!bh)
341 return;
342 if (bh == lru_list[bh->b_list]) {
343 lru_list[bh->b_list] = bh->b_next_free;
344 return;
345 }
346 if(bh->b_dev == 0xffff) panic("Wrong block for lru list");
347 remove_from_lru_list(bh);
348
349
350 if(!lru_list[bh->b_list]) {
351 lru_list[bh->b_list] = bh;
352 lru_list[bh->b_list]->b_prev_free = bh;
353 };
354
355 bh->b_next_free = lru_list[bh->b_list];
356 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
357 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
358 lru_list[bh->b_list]->b_prev_free = bh;
359 }
360
361 static inline void put_last_free(struct buffer_head * bh)
362 {
363 int isize;
364 if (!bh)
365 return;
366
367 isize = BUFSIZE_INDEX(bh->b_size);
368 bh->b_dev = 0xffff;
369
370
371 if(!free_list[isize]) {
372 free_list[isize] = bh;
373 bh->b_prev_free = bh;
374 };
375
376 nr_free[isize]++;
377 bh->b_next_free = free_list[isize];
378 bh->b_prev_free = free_list[isize]->b_prev_free;
379 free_list[isize]->b_prev_free->b_next_free = bh;
380 free_list[isize]->b_prev_free = bh;
381 }
382
383 static inline void insert_into_queues(struct buffer_head * bh)
384 {
385
386
387 if(bh->b_dev == 0xffff) {
388 put_last_free(bh);
389 return;
390 };
391 if(!lru_list[bh->b_list]) {
392 lru_list[bh->b_list] = bh;
393 bh->b_prev_free = bh;
394 };
395 if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");
396 bh->b_next_free = lru_list[bh->b_list];
397 bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;
398 lru_list[bh->b_list]->b_prev_free->b_next_free = bh;
399 lru_list[bh->b_list]->b_prev_free = bh;
400 nr_buffers_type[bh->b_list]++;
401 nr_buffers_st[BUFSIZE_INDEX(bh->b_size)][bh->b_list]++;
402
403 bh->b_prev = NULL;
404 bh->b_next = NULL;
405 if (!bh->b_dev)
406 return;
407 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
408 hash(bh->b_dev,bh->b_blocknr) = bh;
409 if (bh->b_next)
410 bh->b_next->b_prev = bh;
411 }
412
413 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
414 {
415 struct buffer_head * tmp;
416
417 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
418 if (tmp->b_dev==dev && tmp->b_blocknr==block)
419 if (tmp->b_size == size)
420 return tmp;
421 else {
422 printk("VFS: Wrong blocksize on device %d/%d\n",
423 MAJOR(dev), MINOR(dev));
424 return NULL;
425 }
426 return NULL;
427 }
428
429
430
431
432
433
434
435
436 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
437 {
438 struct buffer_head * bh;
439
440 for (;;) {
441 if (!(bh=find_buffer(dev,block,size)))
442 return NULL;
443 bh->b_reuse=0;
444 bh->b_count++;
445 wait_on_buffer(bh);
446 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
447 return bh;
448 bh->b_count--;
449 }
450 }
451
452 void set_blocksize(dev_t dev, int size)
453 {
454 int i, nlist;
455 struct buffer_head * bh, *bhnext;
456
457 if (!blksize_size[MAJOR(dev)])
458 return;
459
460 switch(size) {
461 default: panic("Invalid blocksize passed to set_blocksize");
462 case 512: case 1024: case 2048: case 4096:;
463 }
464
465 if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
466 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
467 return;
468 }
469 if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
470 return;
471 sync_buffers(dev, 2);
472 blksize_size[MAJOR(dev)][MINOR(dev)] = size;
473
474
475
476
477 for(nlist = 0; nlist < NR_LIST; nlist++) {
478 bh = lru_list[nlist];
479 for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) {
480 if(!bh) break;
481 bhnext = bh->b_next_free;
482 if (bh->b_dev != dev)
483 continue;
484 if (bh->b_size == size)
485 continue;
486
487 wait_on_buffer(bh);
488 if (bh->b_dev == dev && bh->b_size != size) {
489 bh->b_uptodate = bh->b_dirt = bh->b_req =
490 bh->b_flushtime = 0;
491 };
492 remove_from_hash_queue(bh);
493 }
494 }
495 }
496
497 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
498
499 void refill_freelist(int size)
500 {
501 struct buffer_head * bh, * tmp;
502 struct buffer_head * candidate[NR_LIST];
503 unsigned int best_time, winner;
504 int isize = BUFSIZE_INDEX(size);
505 int buffers[NR_LIST];
506 int i;
507 int needed;
508
509
510
511
512
513 if (nr_free[isize] > 100)
514 return;
515
516
517
518
519
520
521 needed =bdf_prm.b_un.nrefill * size;
522
523 while (nr_free_pages > min_free_pages*2 && needed > 0 &&
524 grow_buffers(GFP_BUFFER, size)) {
525 needed -= PAGE_SIZE;
526 }
527
528 if(needed <= 0) return;
529
530
531
532
533 while(maybe_shrink_lav_buffers(size))
534 {
535 if(!grow_buffers(GFP_BUFFER, size)) break;
536 needed -= PAGE_SIZE;
537 if(needed <= 0) return;
538 };
539
540
541
542
543
544
545
546 repeat0:
547 for(i=0; i<NR_LIST; i++){
548 if(i == BUF_DIRTY || i == BUF_SHARED ||
549 nr_buffers_type[i] == 0) {
550 candidate[i] = NULL;
551 buffers[i] = 0;
552 continue;
553 }
554 buffers[i] = nr_buffers_type[i];
555 for (bh = lru_list[i]; buffers[i] > 0; bh = tmp, buffers[i]--)
556 {
557 if(buffers[i] < 0) panic("Here is the problem");
558 tmp = bh->b_next_free;
559 if (!bh) break;
560
561 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
562 bh->b_dirt) {
563 refile_buffer(bh);
564 continue;
565 };
566
567 if (bh->b_count || bh->b_size != size)
568 continue;
569
570
571
572
573
574 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
575 buffers[i] = 0;
576 break;
577 }
578
579 if (BADNESS(bh)) continue;
580 break;
581 };
582 if(!buffers[i]) candidate[i] = NULL;
583 else candidate[i] = bh;
584 if(candidate[i] && candidate[i]->b_count) panic("Here is the problem");
585 }
586
587 repeat:
588 if(needed <= 0) return;
589
590
591
592 winner = best_time = UINT_MAX;
593 for(i=0; i<NR_LIST; i++){
594 if(!candidate[i]) continue;
595 if(candidate[i]->b_lru_time < best_time){
596 best_time = candidate[i]->b_lru_time;
597 winner = i;
598 }
599 }
600
601
602 if(winner != UINT_MAX) {
603 i = winner;
604 bh = candidate[i];
605 candidate[i] = bh->b_next_free;
606 if(candidate[i] == bh) candidate[i] = NULL;
607 if (bh->b_count || bh->b_size != size)
608 panic("Busy buffer in candidate list\n");
609 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1)
610 panic("Shared buffer in candidate list\n");
611 if (BADNESS(bh)) panic("Buffer in candidate list with BADNESS != 0\n");
612
613 if(bh->b_dev == 0xffff) panic("Wrong list");
614 remove_from_queues(bh);
615 bh->b_dev = 0xffff;
616 put_last_free(bh);
617 needed -= bh->b_size;
618 buffers[i]--;
619 if(buffers[i] < 0) panic("Here is the problem");
620
621 if(buffers[i] == 0) candidate[i] = NULL;
622
623
624
625 if(candidate[i] && buffers[i] > 0){
626 if(buffers[i] <= 0) panic("Here is another problem");
627 for (bh = candidate[i]; buffers[i] > 0; bh = tmp, buffers[i]--) {
628 if(buffers[i] < 0) panic("Here is the problem");
629 tmp = bh->b_next_free;
630 if (!bh) break;
631
632 if (mem_map[MAP_NR((unsigned long) bh->b_data)] != 1 ||
633 bh->b_dirt) {
634 refile_buffer(bh);
635 continue;
636 };
637
638 if (bh->b_count || bh->b_size != size)
639 continue;
640
641
642
643
644
645 if(bh->b_lock && (i == BUF_LOCKED || i == BUF_LOCKED1)) {
646 buffers[i] = 0;
647 break;
648 }
649
650 if (BADNESS(bh)) continue;
651 break;
652 };
653 if(!buffers[i]) candidate[i] = NULL;
654 else candidate[i] = bh;
655 if(candidate[i] && candidate[i]->b_count)
656 panic("Here is the problem");
657 }
658
659 goto repeat;
660 }
661
662 if(needed <= 0) return;
663
664
665
666 if (nr_free_pages > 5) {
667 if (grow_buffers(GFP_BUFFER, size)) {
668 needed -= PAGE_SIZE;
669 goto repeat0;
670 };
671 }
672
673
674 if (!grow_buffers(GFP_ATOMIC, size))
675 wakeup_bdflush(1);
676 needed -= PAGE_SIZE;
677 goto repeat0;
678 }
679
680
681
682
683
684
685
686
687
688
689
690 struct buffer_head * getblk(dev_t dev, int block, int size)
691 {
692 struct buffer_head * bh;
693 int isize = BUFSIZE_INDEX(size);
694
695
696 buffer_usage[isize]++;
697
698
699
700
701 repeat:
702 bh = get_hash_table(dev, block, size);
703 if (bh) {
704 if (bh->b_uptodate && !bh->b_dirt)
705 put_last_lru(bh);
706 if(!bh->b_dirt) bh->b_flushtime = 0;
707 return bh;
708 }
709
710 while(!free_list[isize]) refill_freelist(size);
711
712 if (find_buffer(dev,block,size))
713 goto repeat;
714
715 bh = free_list[isize];
716 remove_from_free_list(bh);
717
718
719
720 bh->b_count=1;
721 bh->b_dirt=0;
722 bh->b_lock=0;
723 bh->b_uptodate=0;
724 bh->b_flushtime=0;
725 bh->b_req=0;
726 bh->b_reuse=0;
727 bh->b_dev=dev;
728 bh->b_blocknr=block;
729 insert_into_queues(bh);
730 return bh;
731 }
732
733 void set_writetime(struct buffer_head * buf, int flag)
734 {
735 int newtime;
736
737 if (buf->b_dirt){
738
739 newtime = jiffies + (flag ? bdf_prm.b_un.age_super :
740 bdf_prm.b_un.age_buffer);
741 if(!buf->b_flushtime || buf->b_flushtime > newtime)
742 buf->b_flushtime = newtime;
743 } else {
744 buf->b_flushtime = 0;
745 }
746 }
747
748
749 void refile_buffer(struct buffer_head * buf){
750 int dispose;
751 if(buf->b_dev == 0xffff) panic("Attempt to refile free buffer\n");
752 if (buf->b_dirt)
753 dispose = BUF_DIRTY;
754 else if (mem_map[MAP_NR((unsigned long) buf->b_data)] > 1)
755 dispose = BUF_SHARED;
756 else if (buf->b_lock)
757 dispose = BUF_LOCKED;
758 else if (buf->b_list == BUF_SHARED)
759 dispose = BUF_UNSHARED;
760 else
761 dispose = BUF_CLEAN;
762 if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies;
763 if(dispose != buf->b_list) {
764 if(dispose == BUF_DIRTY || dispose == BUF_UNSHARED)
765 buf->b_lru_time = jiffies;
766 if(dispose == BUF_LOCKED &&
767 (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super)
768 dispose = BUF_LOCKED1;
769 remove_from_queues(buf);
770 buf->b_list = dispose;
771 insert_into_queues(buf);
772 if(dispose == BUF_DIRTY && nr_buffers_type[BUF_DIRTY] >
773 (nr_buffers - nr_buffers_type[BUF_SHARED]) *
774 bdf_prm.b_un.nfract/100)
775 wakeup_bdflush(0);
776 }
777 }
778
779 void brelse(struct buffer_head * buf)
780 {
781 if (!buf)
782 return;
783 wait_on_buffer(buf);
784
785
786 set_writetime(buf, 0);
787 refile_buffer(buf);
788
789 if (buf->b_count) {
790 if (--buf->b_count)
791 return;
792 wake_up(&buffer_wait);
793 #if 0
794 if (buf->b_reuse) {
795 buf->b_reuse = 0;
796 if (!buf->b_lock && !buf->b_dirt && !buf->b_wait) {
797 if(buf->b_dev == 0xffff) panic("brelse: Wrong list");
798 remove_from_queues(buf);
799 buf->b_dev = 0xffff;
800 put_last_free(buf);
801 }
802 }
803 #endif
804 return;
805 }
806 printk("VFS: brelse: Trying to free free buffer\n");
807 }
808
809
810
811
812
813 struct buffer_head * bread(dev_t dev, int block, int size)
814 {
815 struct buffer_head * bh;
816
817 if (!(bh = getblk(dev, block, size))) {
818 printk("VFS: bread: READ error on device %d/%d\n",
819 MAJOR(dev), MINOR(dev));
820 return NULL;
821 }
822 if (bh->b_uptodate)
823 return bh;
824 ll_rw_block(READ, 1, &bh);
825 wait_on_buffer(bh);
826 if (bh->b_uptodate)
827 return bh;
828 brelse(bh);
829 return NULL;
830 }
831
832
833
834
835
836
837
838 #define NBUF 16
839
840 struct buffer_head * breada(dev_t dev, int block, int bufsize,
841 unsigned int pos, unsigned int filesize)
842 {
843 struct buffer_head * bhlist[NBUF];
844 unsigned int blocks;
845 struct buffer_head * bh;
846 int index;
847 int i, j;
848
849 if (pos >= filesize)
850 return NULL;
851
852 if (block < 0 || !(bh = getblk(dev,block,bufsize)))
853 return NULL;
854
855 index = BUFSIZE_INDEX(bh->b_size);
856
857 if (bh->b_uptodate)
858 return bh;
859
860 blocks = ((filesize & (bufsize - 1)) - (pos & (bufsize - 1))) >> (9+index);
861
862 if (blocks > (read_ahead[MAJOR(dev)] >> index))
863 blocks = read_ahead[MAJOR(dev)] >> index;
864 if (blocks > NBUF)
865 blocks = NBUF;
866
867 bhlist[0] = bh;
868 j = 1;
869 for(i=1; i<blocks; i++) {
870 bh = getblk(dev,block+i,bufsize);
871 if (bh->b_uptodate) {
872 brelse(bh);
873 break;
874 }
875 bhlist[j++] = bh;
876 }
877
878
879 ll_rw_block(READ, j, bhlist);
880
881 for(i=1; i<j; i++)
882 brelse(bhlist[i]);
883
884
885 bh = bhlist[0];
886 wait_on_buffer(bh);
887 if (bh->b_uptodate)
888 return bh;
889 brelse(bh);
890 return NULL;
891 }
892
893
894
895
896 static void put_unused_buffer_head(struct buffer_head * bh)
897 {
898 struct wait_queue * wait;
899
900 wait = ((volatile struct buffer_head *) bh)->b_wait;
901 memset(bh,0,sizeof(*bh));
902 ((volatile struct buffer_head *) bh)->b_wait = wait;
903 bh->b_next_free = unused_list;
904 unused_list = bh;
905 }
906
907 static void get_more_buffer_heads(void)
908 {
909 int i;
910 struct buffer_head * bh;
911
912 if (unused_list)
913 return;
914
915 if (!(bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
916 return;
917
918 for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
919 bh->b_next_free = unused_list;
920 unused_list = bh++;
921 }
922 }
923
924 static struct buffer_head * get_unused_buffer_head(void)
925 {
926 struct buffer_head * bh;
927
928 get_more_buffer_heads();
929 if (!unused_list)
930 return NULL;
931 bh = unused_list;
932 unused_list = bh->b_next_free;
933 bh->b_next_free = NULL;
934 bh->b_data = NULL;
935 bh->b_size = 0;
936 bh->b_req = 0;
937 return bh;
938 }
939
940
941
942
943
944
945
946 static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
947 {
948 struct buffer_head *bh, *head;
949 unsigned long offset;
950
951 head = NULL;
952 offset = PAGE_SIZE;
953 while ((offset -= size) < PAGE_SIZE) {
954 bh = get_unused_buffer_head();
955 if (!bh)
956 goto no_grow;
957 bh->b_this_page = head;
958 head = bh;
959 bh->b_data = (char *) (page+offset);
960 bh->b_size = size;
961 bh->b_dev = 0xffff;
962 }
963 return head;
964
965
966
967 no_grow:
968 bh = head;
969 while (bh) {
970 head = bh;
971 bh = bh->b_this_page;
972 put_unused_buffer_head(head);
973 }
974 return NULL;
975 }
976
977 static void read_buffers(struct buffer_head * bh[], int nrbuf)
978 {
979 int i;
980 int bhnum = 0;
981 struct buffer_head * bhr[MAX_BUF_PER_PAGE];
982
983 for (i = 0 ; i < nrbuf ; i++) {
984 if (bh[i] && !bh[i]->b_uptodate)
985 bhr[bhnum++] = bh[i];
986 }
987 if (bhnum)
988 ll_rw_block(READ, bhnum, bhr);
989 for (i = nrbuf ; --i >= 0 ; ) {
990 if (bh[i]) {
991 wait_on_buffer(bh[i]);
992 }
993 }
994 }
995
996
997
998
999
1000
1001
1002
1003
1004 static unsigned long try_to_align(struct buffer_head ** bh, int nrbuf,
1005 unsigned long address)
1006 {
1007 while (nrbuf-- > 0)
1008 brelse(bh[nrbuf]);
1009 return 0;
1010 }
1011
1012 static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
1013 dev_t dev, int *b, int size)
1014 {
1015 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1016 unsigned long page;
1017 unsigned long offset;
1018 int block;
1019 int nrbuf;
1020 int aligned = 1;
1021
1022 bh[0] = first;
1023 nrbuf = 1;
1024 page = (unsigned long) first->b_data;
1025 if (page & ~PAGE_MASK)
1026 aligned = 0;
1027 for (offset = size ; offset < PAGE_SIZE ; offset += size) {
1028 block = *++b;
1029 if (!block)
1030 goto no_go;
1031 first = get_hash_table(dev, block, size);
1032 if (!first)
1033 goto no_go;
1034 bh[nrbuf++] = first;
1035 if (page+offset != (unsigned long) first->b_data)
1036 aligned = 0;
1037 }
1038 if (!aligned)
1039 return try_to_align(bh, nrbuf, address);
1040 mem_map[MAP_NR(page)]++;
1041 read_buffers(bh,nrbuf);
1042 while (nrbuf-- > 0)
1043 brelse(bh[nrbuf]);
1044 free_page(address);
1045 ++current->mm->min_flt;
1046 return page;
1047 no_go:
1048 while (nrbuf-- > 0)
1049 brelse(bh[nrbuf]);
1050 return 0;
1051 }
1052
1053 static unsigned long try_to_load_aligned(unsigned long address,
1054 dev_t dev, int b[], int size)
1055 {
1056 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1057 unsigned long offset;
1058 int isize = BUFSIZE_INDEX(size);
1059 int * p;
1060 int block;
1061
1062 bh = create_buffers(address, size);
1063 if (!bh)
1064 return 0;
1065
1066 p = b;
1067 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1068 block = *(p++);
1069 if (!block)
1070 goto not_aligned;
1071 if (find_buffer(dev, block, size))
1072 goto not_aligned;
1073 }
1074 tmp = bh;
1075 p = b;
1076 block = 0;
1077 while (1) {
1078 arr[block++] = bh;
1079 bh->b_count = 1;
1080 bh->b_dirt = 0;
1081 bh->b_reuse = 0;
1082 bh->b_flushtime = 0;
1083 bh->b_uptodate = 0;
1084 bh->b_req = 0;
1085 bh->b_dev = dev;
1086 bh->b_blocknr = *(p++);
1087 bh->b_list = BUF_CLEAN;
1088 nr_buffers++;
1089 nr_buffers_size[isize]++;
1090 insert_into_queues(bh);
1091 if (bh->b_this_page)
1092 bh = bh->b_this_page;
1093 else
1094 break;
1095 }
1096 buffermem += PAGE_SIZE;
1097 bh->b_this_page = tmp;
1098 mem_map[MAP_NR(address)]++;
1099 buffer_pages[MAP_NR(address)] = bh;
1100 read_buffers(arr,block);
1101 while (block-- > 0)
1102 brelse(arr[block]);
1103 ++current->mm->maj_flt;
1104 return address;
1105 not_aligned:
1106 while ((tmp = bh) != NULL) {
1107 bh = bh->b_this_page;
1108 put_unused_buffer_head(tmp);
1109 }
1110 return 0;
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 static inline unsigned long try_to_share_buffers(unsigned long address,
1125 dev_t dev, int *b, int size)
1126 {
1127 struct buffer_head * bh;
1128 int block;
1129
1130 block = b[0];
1131 if (!block)
1132 return 0;
1133 bh = get_hash_table(dev, block, size);
1134 if (bh)
1135 return check_aligned(bh, address, dev, b, size);
1136 return try_to_load_aligned(address, dev, b, size);
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int no_share)
1147 {
1148 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1149 unsigned long where;
1150 int i, j;
1151
1152 if (!no_share) {
1153 where = try_to_share_buffers(address, dev, b, size);
1154 if (where)
1155 return where;
1156 }
1157 ++current->mm->maj_flt;
1158 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1159 bh[i] = NULL;
1160 if (b[i])
1161 bh[i] = getblk(dev, b[i], size);
1162 }
1163 read_buffers(bh,i);
1164 where = address;
1165 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, where += size) {
1166 if (bh[i]) {
1167 if (bh[i]->b_uptodate)
1168 memcpy((void *) where, bh[i]->b_data, size);
1169 brelse(bh[i]);
1170 } else
1171 memset((void *) where, 0, size);
1172 }
1173 return address;
1174 }
1175
1176 #if 0
1177
1178
1179
1180
1181 void bwrite_page(unsigned long address, dev_t dev, int b[], int size)
1182 {
1183 struct buffer_head * bh[MAX_BUF_PER_PAGE];
1184 int i, j;
1185
1186 for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
1187 bh[i] = NULL;
1188 if (b[i])
1189 bh[i] = getblk(dev, b[i], size);
1190 }
1191 for (i=0, j=0; j<PAGE_SIZE ; i++, j += size, address += size) {
1192 if (bh[i]) {
1193 memcpy(bh[i]->b_data, (void *) address, size);
1194 bh[i]->b_uptodate = 1;
1195 mark_buffer_dirty(bh[i], 0);
1196 brelse(bh[i]);
1197 } else
1198 memset((void *) address, 0, size);
1199 }
1200 }
1201 #endif
1202
1203
1204
1205
1206
1207 static int grow_buffers(int pri, int size)
1208 {
1209 unsigned long page;
1210 struct buffer_head *bh, *tmp;
1211 struct buffer_head * insert_point;
1212 int isize;
1213
1214 if ((size & 511) || (size > PAGE_SIZE)) {
1215 printk("VFS: grow_buffers: size = %d\n",size);
1216 return 0;
1217 }
1218
1219 isize = BUFSIZE_INDEX(size);
1220
1221 if (!(page = __get_free_page(pri)))
1222 return 0;
1223 bh = create_buffers(page, size);
1224 if (!bh) {
1225 free_page(page);
1226 return 0;
1227 }
1228
1229 insert_point = free_list[isize];
1230
1231 tmp = bh;
1232 while (1) {
1233 nr_free[isize]++;
1234 if (insert_point) {
1235 tmp->b_next_free = insert_point->b_next_free;
1236 tmp->b_prev_free = insert_point;
1237 insert_point->b_next_free->b_prev_free = tmp;
1238 insert_point->b_next_free = tmp;
1239 } else {
1240 tmp->b_prev_free = tmp;
1241 tmp->b_next_free = tmp;
1242 }
1243 insert_point = tmp;
1244 ++nr_buffers;
1245 if (tmp->b_this_page)
1246 tmp = tmp->b_this_page;
1247 else
1248 break;
1249 }
1250 free_list[isize] = bh;
1251 buffer_pages[MAP_NR(page)] = bh;
1252 tmp->b_this_page = bh;
1253 wake_up(&buffer_wait);
1254 buffermem += PAGE_SIZE;
1255 return 1;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265 static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
1266 {
1267 unsigned long page;
1268 struct buffer_head * tmp, * p;
1269 int isize = BUFSIZE_INDEX(bh->b_size);
1270
1271 *bhp = bh;
1272 page = (unsigned long) bh->b_data;
1273 page &= PAGE_MASK;
1274 tmp = bh;
1275 do {
1276 if (!tmp)
1277 return 0;
1278 if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
1279 return 0;
1280 tmp = tmp->b_this_page;
1281 } while (tmp != bh);
1282 tmp = bh;
1283 do {
1284 p = tmp;
1285 tmp = tmp->b_this_page;
1286 nr_buffers--;
1287 nr_buffers_size[isize]--;
1288 if (p == *bhp)
1289 {
1290 *bhp = p->b_prev_free;
1291 if (p == *bhp)
1292 *bhp = NULL;
1293 }
1294 remove_from_queues(p);
1295 put_unused_buffer_head(p);
1296 } while (tmp != bh);
1297 buffermem -= PAGE_SIZE;
1298 buffer_pages[MAP_NR(page)] = NULL;
1299 free_page(page);
1300 return !mem_map[MAP_NR(page)];
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 static int maybe_shrink_lav_buffers(int size)
1317 {
1318 int nlist;
1319 int isize;
1320 int total_lav, total_n_buffers, n_sizes;
1321
1322
1323
1324
1325
1326
1327 total_lav = total_n_buffers = n_sizes = 0;
1328 for(nlist = 0; nlist < NR_SIZES; nlist++)
1329 {
1330 total_lav += buffers_lav[nlist];
1331 if(nr_buffers_size[nlist]) n_sizes++;
1332 total_n_buffers += nr_buffers_size[nlist];
1333 total_n_buffers -= nr_buffers_st[nlist][BUF_SHARED];
1334 }
1335
1336
1337
1338
1339 isize = (size ? BUFSIZE_INDEX(size) : -1);
1340
1341 if (n_sizes > 1)
1342 for(nlist = 0; nlist < NR_SIZES; nlist++)
1343 {
1344 if(nlist == isize) continue;
1345 if(nr_buffers_size[nlist] &&
1346 bdf_prm.b_un.lav_const * buffers_lav[nlist]*total_n_buffers <
1347 total_lav * (nr_buffers_size[nlist] - nr_buffers_st[nlist][BUF_SHARED]))
1348 if(shrink_specific_buffers(6, bufferindex_size[nlist]))
1349 return 1;
1350 }
1351 return 0;
1352 }
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 int shrink_buffers(unsigned int priority, unsigned long limit)
1365 {
1366 if (priority < 2) {
1367 sync_buffers(0,0);
1368 }
1369
1370 if(priority == 2) wakeup_bdflush(1);
1371
1372 if(maybe_shrink_lav_buffers(0)) return 1;
1373
1374
1375 return shrink_specific_buffers(priority, 0);
1376 }
1377
1378 static int shrink_specific_buffers(unsigned int priority, int size)
1379 {
1380 struct buffer_head *bh;
1381 int nlist;
1382 int i, isize, isize1;
1383
1384 #ifdef DEBUG
1385 if(size) printk("Shrinking buffers of size %d\n", size);
1386 #endif
1387
1388
1389 isize1 = (size ? BUFSIZE_INDEX(size) : -1);
1390
1391 for(isize = 0; isize<NR_SIZES; isize++){
1392 if(isize1 != -1 && isize1 != isize) continue;
1393 bh = free_list[isize];
1394 if(!bh) continue;
1395 for (i=0 ; !i || bh != free_list[isize]; bh = bh->b_next_free, i++) {
1396 if (bh->b_count || !bh->b_this_page)
1397 continue;
1398 if (try_to_free(bh, &bh))
1399 return 1;
1400 if(!bh) break;
1401
1402 }
1403 }
1404
1405
1406
1407 for(nlist = 0; nlist < NR_LIST; nlist++) {
1408 repeat1:
1409 if(priority > 3 && nlist == BUF_SHARED) continue;
1410 bh = lru_list[nlist];
1411 if(!bh) continue;
1412 i = 2*nr_buffers_type[nlist] >> priority;
1413 for ( ; i-- > 0 ; bh = bh->b_next_free) {
1414
1415 if(bh->b_list != nlist) goto repeat1;
1416 if (bh->b_count || !bh->b_this_page)
1417 continue;
1418 if(size && bh->b_size != size) continue;
1419 if (bh->b_lock)
1420 if (priority)
1421 continue;
1422 else
1423 wait_on_buffer(bh);
1424 if (bh->b_dirt) {
1425 bh->b_count++;
1426 bh->b_flushtime = 0;
1427 ll_rw_block(WRITEA, 1, &bh);
1428 bh->b_count--;
1429 continue;
1430 }
1431 if (try_to_free(bh, &bh))
1432 return 1;
1433 if(!bh) break;
1434 }
1435 }
1436 return 0;
1437 }
1438
1439
1440
1441
1442 void show_buffers(void)
1443 {
1444 struct buffer_head * bh;
1445 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
1446 int shared;
1447 int nlist, isize;
1448
1449 printk("Buffer memory: %6dkB\n",buffermem>>10);
1450 printk("Buffer heads: %6d\n",nr_buffer_heads);
1451 printk("Buffer blocks: %6d\n",nr_buffers);
1452
1453 for(nlist = 0; nlist < NR_LIST; nlist++) {
1454 shared = found = locked = dirty = used = lastused = 0;
1455 bh = lru_list[nlist];
1456 if(!bh) continue;
1457 do {
1458 found++;
1459 if (bh->b_lock)
1460 locked++;
1461 if (bh->b_dirt)
1462 dirty++;
1463 if(mem_map[MAP_NR(((unsigned long) bh->b_data))] !=1) shared++;
1464 if (bh->b_count)
1465 used++, lastused = found;
1466 bh = bh->b_next_free;
1467 } while (bh != lru_list[nlist]);
1468 printk("Buffer[%d] mem: %d buffers, %d used (last=%d), %d locked, %d dirty %d shrd\n",
1469 nlist, found, used, lastused, locked, dirty, shared);
1470 };
1471 printk("Size [LAV] Free Clean Unshar Lck Lck1 Dirty Shared\n");
1472 for(isize = 0; isize<NR_SIZES; isize++){
1473 printk("%5d [%5d]: %7d ", bufferindex_size[isize],
1474 buffers_lav[isize], nr_free[isize]);
1475 for(nlist = 0; nlist < NR_LIST; nlist++)
1476 printk("%7d ", nr_buffers_st[isize][nlist]);
1477 printk("\n");
1478 }
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488 static inline int try_to_reassign(struct buffer_head * bh, struct buffer_head ** bhp,
1489 dev_t dev, unsigned int starting_block)
1490 {
1491 unsigned long page;
1492 struct buffer_head * tmp, * p;
1493
1494 *bhp = bh;
1495 page = (unsigned long) bh->b_data;
1496 page &= PAGE_MASK;
1497 if(mem_map[MAP_NR(page)] != 1) return 0;
1498 tmp = bh;
1499 do {
1500 if (!tmp)
1501 return 0;
1502
1503 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
1504 return 0;
1505 tmp = tmp->b_this_page;
1506 } while (tmp != bh);
1507 tmp = bh;
1508
1509 while((unsigned long) tmp->b_data & (PAGE_SIZE - 1))
1510 tmp = tmp->b_this_page;
1511
1512
1513 bh = tmp;
1514 do {
1515 p = tmp;
1516 tmp = tmp->b_this_page;
1517 remove_from_queues(p);
1518 p->b_dev=dev;
1519 p->b_uptodate = 0;
1520 p->b_req = 0;
1521 p->b_blocknr=starting_block++;
1522 insert_into_queues(p);
1523 } while (tmp != bh);
1524 return 1;
1525 }
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 static int reassign_cluster(dev_t dev,
1542 unsigned int starting_block, int size)
1543 {
1544 struct buffer_head *bh;
1545 int isize = BUFSIZE_INDEX(size);
1546 int i;
1547
1548
1549
1550
1551
1552 while(nr_free[isize] < 32) refill_freelist(size);
1553
1554 bh = free_list[isize];
1555 if(bh)
1556 for (i=0 ; !i || bh != free_list[isize] ; bh = bh->b_next_free, i++) {
1557 if (!bh->b_this_page) continue;
1558 if (try_to_reassign(bh, &bh, dev, starting_block))
1559 return 4;
1560 }
1561 return 0;
1562 }
1563
1564
1565
1566
1567
1568 static unsigned long try_to_generate_cluster(dev_t dev, int block, int size)
1569 {
1570 struct buffer_head * bh, * tmp, * arr[MAX_BUF_PER_PAGE];
1571 int isize = BUFSIZE_INDEX(size);
1572 unsigned long offset;
1573 unsigned long page;
1574 int nblock;
1575
1576 page = get_free_page(GFP_NOBUFFER);
1577 if(!page) return 0;
1578
1579 bh = create_buffers(page, size);
1580 if (!bh) {
1581 free_page(page);
1582 return 0;
1583 };
1584 nblock = block;
1585 for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
1586 if (find_buffer(dev, nblock++, size))
1587 goto not_aligned;
1588 }
1589 tmp = bh;
1590 nblock = 0;
1591 while (1) {
1592 arr[nblock++] = bh;
1593 bh->b_count = 1;
1594 bh->b_dirt = 0;
1595 bh->b_flushtime = 0;
1596 bh->b_lock = 0;
1597 bh->b_uptodate = 0;
1598 bh->b_req = 0;
1599 bh->b_dev = dev;
1600 bh->b_list = BUF_CLEAN;
1601 bh->b_blocknr = block++;
1602 nr_buffers++;
1603 nr_buffers_size[isize]++;
1604 insert_into_queues(bh);
1605 if (bh->b_this_page)
1606 bh = bh->b_this_page;
1607 else
1608 break;
1609 }
1610 buffermem += PAGE_SIZE;
1611 buffer_pages[MAP_NR(page)] = bh;
1612 bh->b_this_page = tmp;
1613 while (nblock-- > 0)
1614 brelse(arr[nblock]);
1615 return 4;
1616 not_aligned:
1617 while ((tmp = bh) != NULL) {
1618 bh = bh->b_this_page;
1619 put_unused_buffer_head(tmp);
1620 }
1621 free_page(page);
1622 return 0;
1623 }
1624
1625 unsigned long generate_cluster(dev_t dev, int b[], int size)
1626 {
1627 int i, offset;
1628
1629 for (i = 0, offset = 0 ; offset < PAGE_SIZE ; i++, offset += size) {
1630 if(i && b[i]-1 != b[i-1]) return 0;
1631 if(find_buffer(dev, b[i], size)) return 0;
1632 };
1633
1634
1635
1636
1637
1638 if(maybe_shrink_lav_buffers(size))
1639 {
1640 int retval;
1641 retval = try_to_generate_cluster(dev, b[0], size);
1642 if(retval) return retval;
1643 };
1644
1645 if (nr_free_pages > min_free_pages*2)
1646 return try_to_generate_cluster(dev, b[0], size);
1647 else
1648 return reassign_cluster(dev, b[0], size);
1649 }
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 void buffer_init(void)
1662 {
1663 int i;
1664 int isize = BUFSIZE_INDEX(BLOCK_SIZE);
1665 long memsize = MAP_NR(high_memory) << PAGE_SHIFT;
1666
1667 if (memsize >= 4*1024*1024) {
1668 if(memsize >= 16*1024*1024)
1669 nr_hash = 16381;
1670 else
1671 nr_hash = 4093;
1672 } else {
1673 nr_hash = 997;
1674 };
1675
1676 hash_table = (struct buffer_head **) vmalloc(nr_hash *
1677 sizeof(struct buffer_head *));
1678
1679
1680 buffer_pages = (struct buffer_head **) vmalloc(MAP_NR(high_memory) *
1681 sizeof(struct buffer_head *));
1682 for (i = 0 ; i < MAP_NR(high_memory) ; i++)
1683 buffer_pages[i] = NULL;
1684
1685 for (i = 0 ; i < nr_hash ; i++)
1686 hash_table[i] = NULL;
1687 lru_list[BUF_CLEAN] = 0;
1688 grow_buffers(GFP_KERNEL, BLOCK_SIZE);
1689 if (!free_list[isize])
1690 panic("VFS: Unable to initialize buffer free list!");
1691 return;
1692 }
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 struct wait_queue * bdflush_wait = NULL;
1705 struct wait_queue * bdflush_done = NULL;
1706
1707 static int bdflush_running = 0;
1708
1709 static void wakeup_bdflush(int wait)
1710 {
1711 if(!bdflush_running){
1712 printk("Warning - bdflush not running\n");
1713 sync_buffers(0,0);
1714 return;
1715 };
1716 wake_up(&bdflush_wait);
1717 if(wait) sleep_on(&bdflush_done);
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 asmlinkage int sync_old_buffers(void)
1731 {
1732 int i, isize;
1733 int ndirty, nwritten;
1734 int nlist;
1735 int ncount;
1736 struct buffer_head * bh, *next;
1737
1738 sync_supers(0);
1739 sync_inodes(0);
1740
1741 ncount = 0;
1742 #ifdef DEBUG
1743 for(nlist = 0; nlist < NR_LIST; nlist++)
1744 #else
1745 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1746 #endif
1747 {
1748 ndirty = 0;
1749 nwritten = 0;
1750 repeat:
1751 bh = lru_list[nlist];
1752 if(bh)
1753 for (i = nr_buffers_type[nlist]; i-- > 0; bh = next) {
1754
1755 if(bh->b_list != nlist) goto repeat;
1756 next = bh->b_next_free;
1757 if(!lru_list[nlist]) {
1758 printk("Dirty list empty %d\n", i);
1759 break;
1760 }
1761
1762
1763 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1764 {
1765 refile_buffer(bh);
1766 continue;
1767 }
1768
1769 if (bh->b_lock || !bh->b_dirt)
1770 continue;
1771 ndirty++;
1772 if(bh->b_flushtime > jiffies) continue;
1773 nwritten++;
1774 bh->b_count++;
1775 bh->b_flushtime = 0;
1776 #ifdef DEBUG
1777 if(nlist != BUF_DIRTY) ncount++;
1778 #endif
1779 ll_rw_block(WRITE, 1, &bh);
1780 bh->b_count--;
1781 }
1782 }
1783 #ifdef DEBUG
1784 if (ncount) printk("sync_old_buffers: %d dirty buffers not on dirty list\n", ncount);
1785 printk("Wrote %d/%d buffers\n", nwritten, ndirty);
1786 #endif
1787
1788
1789
1790
1791 for(isize = 0; isize<NR_SIZES; isize++){
1792 CALC_LOAD(buffers_lav[isize], bdf_prm.b_un.lav_const, buffer_usage[isize]);
1793 buffer_usage[isize] = 0;
1794 };
1795 return 0;
1796 }
1797
1798
1799
1800
1801
1802
1803
1804
1805 asmlinkage int sys_bdflush(int func, long data)
1806 {
1807 int i, error;
1808 int ndirty;
1809 int nlist;
1810 int ncount;
1811 struct buffer_head * bh, *next;
1812
1813 if (!suser())
1814 return -EPERM;
1815
1816 if (func == 1)
1817 return sync_old_buffers();
1818
1819
1820 if (func >= 2) {
1821 i = (func-2) >> 1;
1822 if (i < 0 || i >= N_PARAM)
1823 return -EINVAL;
1824 if((func & 1) == 0) {
1825 error = verify_area(VERIFY_WRITE, (void *) data, sizeof(int));
1826 if (error)
1827 return error;
1828 put_user(bdf_prm.data[i], (int*)data);
1829 return 0;
1830 };
1831 if (data < bdflush_min[i] || data > bdflush_max[i])
1832 return -EINVAL;
1833 bdf_prm.data[i] = data;
1834 return 0;
1835 };
1836
1837 if (bdflush_running)
1838 return -EBUSY;
1839 bdflush_running++;
1840
1841
1842
1843 for (;;) {
1844 #ifdef DEBUG
1845 printk("bdflush() activated...");
1846 #endif
1847
1848 ncount = 0;
1849 #ifdef DEBUG
1850 for(nlist = 0; nlist < NR_LIST; nlist++)
1851 #else
1852 for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
1853 #endif
1854 {
1855 ndirty = 0;
1856 repeat:
1857 bh = lru_list[nlist];
1858 if(bh)
1859 for (i = nr_buffers_type[nlist]; i-- > 0 && ndirty < bdf_prm.b_un.ndirty;
1860 bh = next) {
1861
1862 if(bh->b_list != nlist) goto repeat;
1863 next = bh->b_next_free;
1864 if(!lru_list[nlist]) {
1865 printk("Dirty list empty %d\n", i);
1866 break;
1867 }
1868
1869
1870 if (nlist == BUF_DIRTY && !bh->b_dirt && !bh->b_lock)
1871 {
1872 refile_buffer(bh);
1873 continue;
1874 }
1875
1876 if (bh->b_lock || !bh->b_dirt)
1877 continue;
1878
1879
1880 bh->b_count++;
1881 ndirty++;
1882 bh->b_flushtime = 0;
1883 ll_rw_block(WRITE, 1, &bh);
1884 #ifdef DEBUG
1885 if(nlist != BUF_DIRTY) ncount++;
1886 #endif
1887 bh->b_count--;
1888 }
1889 }
1890 #ifdef DEBUG
1891 if (ncount) printk("sys_bdflush: %d dirty buffers not on dirty list\n", ncount);
1892 printk("sleeping again.\n");
1893 #endif
1894 wake_up(&bdflush_done);
1895
1896
1897
1898
1899 if(nr_buffers_type[BUF_DIRTY] <= (nr_buffers - nr_buffers_type[BUF_SHARED]) *
1900 bdf_prm.b_un.nfract/100) {
1901 if (current->signal & (1 << (SIGKILL-1))) {
1902 bdflush_running--;
1903 return 0;
1904 }
1905 current->signal = 0;
1906 interruptible_sleep_on(&bdflush_wait);
1907 }
1908 }
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927