This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- bread_page
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26
27 #include <asm/system.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_SCSI
31 #ifdef CONFIG_BLK_DEV_SR
32 extern int check_cdrom_media_change(int, int);
33 #endif
34 #ifdef CONFIG_BLK_DEV_SD
35 extern int check_scsidisk_media_change(int, int);
36 extern int revalidate_scsidisk(int, int);
37 #endif
38 #endif
39
40 static struct buffer_head * hash_table[NR_HASH];
41 static struct buffer_head * free_list = NULL;
42 static struct buffer_head * unused_list = NULL;
43 static struct wait_queue * buffer_wait = NULL;
44
45 int nr_buffers = 0;
46 int buffermem = 0;
47 int nr_buffer_heads = 0;
48 static int min_free_pages = 20;
49
50
51
52
53
54
55
56
57
58
59 void __wait_on_buffer(struct buffer_head * bh)
60 {
61 struct wait_queue wait = { current, NULL };
62
63 add_wait_queue(&bh->b_wait, &wait);
64 repeat:
65 current->state = TASK_UNINTERRUPTIBLE;
66 if (bh->b_lock) {
67 schedule();
68 goto repeat;
69 }
70 remove_wait_queue(&bh->b_wait, &wait);
71 current->state = TASK_RUNNING;
72 }
73
74 static void sync_buffers(dev_t dev)
75 {
76 int i;
77 struct buffer_head * bh;
78
79 bh = free_list;
80 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
81 if (bh->b_lock)
82 continue;
83 if (!bh->b_dirt)
84 continue;
85 ll_rw_block(WRITE, 1, &bh);
86 }
87 }
88
89 void sync_dev(dev_t dev)
90 {
91 sync_buffers(dev);
92 sync_supers(dev);
93 sync_inodes(dev);
94 sync_buffers(dev);
95 }
96
97 int sys_sync(void)
98 {
99 sync_dev(0);
100 return 0;
101 }
102
103 void invalidate_buffers(dev_t dev)
104 {
105 int i;
106 struct buffer_head * bh;
107
108 bh = free_list;
109 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
110 if (bh->b_dev != dev)
111 continue;
112 wait_on_buffer(bh);
113 if (bh->b_dev == dev)
114 bh->b_uptodate = bh->b_dirt = 0;
115 }
116 }
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 void check_disk_change(dev_t dev)
133 {
134 int i;
135 struct buffer_head * bh;
136
137 switch(MAJOR(dev)){
138 case 2:
139 if (!(bh = getblk(dev,0,1024)))
140 return;
141 i = floppy_change(bh);
142 brelse(bh);
143 break;
144
145 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
146 case 8:
147 i = check_scsidisk_media_change(dev, 0);
148 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
149 break;
150 #endif
151
152 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
153 case 11:
154 i = check_cdrom_media_change(dev, 0);
155 if (i) printk("Flushing buffers and inodes for CDROM\n");
156 break;
157 #endif
158
159 default:
160 return;
161 };
162
163 if (!i) return;
164
165 for (i=0 ; i<NR_SUPER ; i++)
166 if (super_block[i].s_dev == dev)
167 put_super(super_block[i].s_dev);
168 invalidate_inodes(dev);
169 invalidate_buffers(dev);
170
171 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
172
173
174 if (MAJOR(dev) == 8)
175 revalidate_scsidisk(dev, 0);
176 #endif
177 }
178
179 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
180 #define hash(dev,block) hash_table[_hashfn(dev,block)]
181
182 static inline void remove_from_hash_queue(struct buffer_head * bh)
183 {
184 if (bh->b_next)
185 bh->b_next->b_prev = bh->b_prev;
186 if (bh->b_prev)
187 bh->b_prev->b_next = bh->b_next;
188 if (hash(bh->b_dev,bh->b_blocknr) == bh)
189 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
190 bh->b_next = bh->b_prev = NULL;
191 }
192
193 static inline void remove_from_free_list(struct buffer_head * bh)
194 {
195 if (!(bh->b_prev_free) || !(bh->b_next_free))
196 panic("Free block list corrupted");
197 bh->b_prev_free->b_next_free = bh->b_next_free;
198 bh->b_next_free->b_prev_free = bh->b_prev_free;
199 if (free_list == bh)
200 free_list = bh->b_next_free;
201 bh->b_next_free = bh->b_prev_free = NULL;
202 }
203
204 static inline void remove_from_queues(struct buffer_head * bh)
205 {
206 remove_from_hash_queue(bh);
207 remove_from_free_list(bh);
208 }
209
210 static inline void put_first_free(struct buffer_head * bh)
211 {
212 if (!bh || (bh == free_list))
213 return;
214 remove_from_free_list(bh);
215
216 bh->b_next_free = free_list;
217 bh->b_prev_free = free_list->b_prev_free;
218 free_list->b_prev_free->b_next_free = bh;
219 free_list->b_prev_free = bh;
220 free_list = bh;
221 }
222
223 static inline void put_last_free(struct buffer_head * bh)
224 {
225 if (!bh)
226 return;
227 if (bh == free_list) {
228 free_list = bh->b_next_free;
229 return;
230 }
231 remove_from_free_list(bh);
232
233 bh->b_next_free = free_list;
234 bh->b_prev_free = free_list->b_prev_free;
235 free_list->b_prev_free->b_next_free = bh;
236 free_list->b_prev_free = bh;
237 }
238
239 static inline void insert_into_queues(struct buffer_head * bh)
240 {
241
242 bh->b_next_free = free_list;
243 bh->b_prev_free = free_list->b_prev_free;
244 free_list->b_prev_free->b_next_free = bh;
245 free_list->b_prev_free = bh;
246
247 bh->b_prev = NULL;
248 bh->b_next = NULL;
249 if (!bh->b_dev)
250 return;
251 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
252 hash(bh->b_dev,bh->b_blocknr) = bh;
253 if (bh->b_next)
254 bh->b_next->b_prev = bh;
255 }
256
257 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
258 {
259 struct buffer_head * tmp;
260
261 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
262 if (tmp->b_dev==dev && tmp->b_blocknr==block)
263 if (tmp->b_size == size)
264 return tmp;
265 else {
266 printk("wrong block-size on device %04x\n",dev);
267 return NULL;
268 }
269 return NULL;
270 }
271
272
273
274
275
276
277
278
279 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
280 {
281 struct buffer_head * bh;
282
283 for (;;) {
284 if (!(bh=find_buffer(dev,block,size)))
285 return NULL;
286 bh->b_count++;
287 wait_on_buffer(bh);
288 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
289 return bh;
290 bh->b_count--;
291 }
292 }
293
294
295
296
297
298
299
300
301
302
303
304 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
305 struct buffer_head * getblk(dev_t dev, int block, int size)
306 {
307 struct buffer_head * bh, * tmp;
308 int buffers;
309 static int grow_size = 0;
310
311 repeat:
312 bh = get_hash_table(dev, block, size);
313 if (bh) {
314 if (bh->b_uptodate && !bh->b_dirt)
315 put_last_free(bh);
316 return bh;
317 }
318 grow_size -= size;
319 if (nr_free_pages > min_free_pages &&
320 buffermem < 6*1024*1024 &&
321 grow_size <= 0) {
322 grow_buffers(size);
323 grow_size = 4096;
324 }
325 buffers = nr_buffers;
326 bh = NULL;
327
328 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
329 if (tmp->b_count || tmp->b_size != size)
330 continue;
331 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
332 bh = tmp;
333 if (!BADNESS(tmp))
334 break;
335 }
336 #if 0
337 if (tmp->b_dirt)
338 ll_rw_block(WRITEA, 1, &tmp);
339 #endif
340 }
341
342 if (!bh && nr_free_pages > 5) {
343 grow_buffers(size);
344 goto repeat;
345 }
346
347
348 if (!bh) {
349 sleep_on(&buffer_wait);
350 goto repeat;
351 }
352 wait_on_buffer(bh);
353 if (bh->b_count || bh->b_size != size)
354 goto repeat;
355 if (bh->b_dirt) {
356 sync_buffers(bh->b_dev);
357 goto repeat;
358 }
359
360
361 if (find_buffer(dev,block,size))
362 goto repeat;
363
364
365 bh->b_count=1;
366 bh->b_dirt=0;
367 bh->b_uptodate=0;
368 remove_from_queues(bh);
369 bh->b_dev=dev;
370 bh->b_blocknr=block;
371 insert_into_queues(bh);
372 return bh;
373 }
374
375 void brelse(struct buffer_head * buf)
376 {
377 if (!buf)
378 return;
379 wait_on_buffer(buf);
380 if (buf->b_count) {
381 if (--buf->b_count)
382 return;
383 wake_up(&buffer_wait);
384 return;
385 }
386 printk("Trying to free free buffer\n");
387 }
388
389
390
391
392
393 struct buffer_head * bread(dev_t dev, int block, int size)
394 {
395 struct buffer_head * bh;
396
397 if (!(bh = getblk(dev, block, size))) {
398 printk("bread: getblk returned NULL\n");
399 return NULL;
400 }
401 if (bh->b_uptodate)
402 return bh;
403 ll_rw_block(READ, 1, &bh);
404 wait_on_buffer(bh);
405 if (bh->b_uptodate)
406 return bh;
407 brelse(bh);
408 return NULL;
409 }
410
411 #define COPYBLK(from,to) \
412 __asm__("cld\n\t" \
413 "rep\n\t" \
414 "movsl\n\t" \
415 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
416 :"cx","di","si")
417
418
419
420
421
422
423
424 void bread_page(unsigned long address, dev_t dev, int b[4])
425 {
426 struct buffer_head * bh[4];
427 struct buffer_head * bhr[4];
428 int bhnum = 0;
429 int i;
430
431 for (i=0 ; i<4 ; i++)
432 if (b[i]) {
433 bh[i] = getblk(dev, b[i], 1024);
434 if (bh[i] && !bh[i]->b_uptodate)
435 bhr[bhnum++] = bh[i];
436 } else
437 bh[i] = NULL;
438
439 if (bhnum)
440 ll_rw_block(READ, bhnum, bhr);
441
442 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
443 if (bh[i]) {
444 wait_on_buffer(bh[i]);
445 if (bh[i]->b_uptodate)
446 COPYBLK((unsigned long) bh[i]->b_data,address);
447 brelse(bh[i]);
448 }
449 }
450
451
452
453
454
455
456 struct buffer_head * breada(dev_t dev,int first, ...)
457 {
458 va_list args;
459 struct buffer_head * bh, *tmp;
460
461 va_start(args,first);
462 if (!(bh = getblk(dev, first, 1024))) {
463 printk("breada: getblk returned NULL\n");
464 return NULL;
465 }
466 if (!bh->b_uptodate)
467 ll_rw_block(READ, 1, &bh);
468 while ((first=va_arg(args,int))>=0) {
469 tmp = getblk(dev, first, 1024);
470 if (tmp) {
471 if (!tmp->b_uptodate)
472 ll_rw_block(READA, 1, &tmp);
473 tmp->b_count--;
474 }
475 }
476 va_end(args);
477 wait_on_buffer(bh);
478 if (bh->b_uptodate)
479 return bh;
480 brelse(bh);
481 return (NULL);
482 }
483
484
485
486
487 static void put_unused_buffer_head(struct buffer_head * bh)
488 {
489 struct wait_queue * wait;
490
491 wait = ((volatile struct buffer_head *) bh)->b_wait;
492 memset((void *) bh,0,sizeof(*bh));
493 ((volatile struct buffer_head *) bh)->b_wait = wait;
494 bh->b_next_free = unused_list;
495 unused_list = bh;
496 }
497
498 static void get_more_buffer_heads(void)
499 {
500 unsigned long page;
501 struct buffer_head * bh;
502
503 if (unused_list)
504 return;
505 page = get_free_page(GFP_KERNEL);
506 if (!page)
507 return;
508 bh = (struct buffer_head *) page;
509 while ((unsigned long) (bh+1) <= page+4096) {
510 put_unused_buffer_head(bh);
511 bh++;
512 nr_buffer_heads++;
513 }
514 }
515
516 static struct buffer_head * get_unused_buffer_head(void)
517 {
518 struct buffer_head * bh;
519
520 get_more_buffer_heads();
521 if (!unused_list)
522 return NULL;
523 bh = unused_list;
524 unused_list = bh->b_next_free;
525 bh->b_next_free = NULL;
526 bh->b_data = NULL;
527 bh->b_size = 0;
528 return bh;
529 }
530
531
532
533
534
535
536
537 void grow_buffers(int size)
538 {
539 unsigned long page;
540 int i;
541 struct buffer_head *bh, *tmp;
542
543 if ((size & 511) || (size > 4096)) {
544 printk("grow_buffers: size = %d\n",size);
545 return;
546 }
547 page = get_free_page(GFP_BUFFER);
548 if (!page)
549 return;
550 tmp = NULL;
551 i = 0;
552 for (i = 0 ; i+size <= 4096 ; i += size) {
553 bh = get_unused_buffer_head();
554 if (!bh)
555 goto no_grow;
556 bh->b_this_page = tmp;
557 tmp = bh;
558 bh->b_data = (char * ) (page+i);
559 bh->b_size = size;
560 }
561 tmp = bh;
562 while (1) {
563 if (free_list) {
564 tmp->b_next_free = free_list;
565 tmp->b_prev_free = free_list->b_prev_free;
566 free_list->b_prev_free->b_next_free = tmp;
567 free_list->b_prev_free = tmp;
568 } else {
569 tmp->b_prev_free = tmp;
570 tmp->b_next_free = tmp;
571 }
572 free_list = tmp;
573 ++nr_buffers;
574 if (tmp->b_this_page)
575 tmp = tmp->b_this_page;
576 else
577 break;
578 }
579 tmp->b_this_page = bh;
580 buffermem += 4096;
581 return;
582
583
584
585 no_grow:
586 bh = tmp;
587 while (bh) {
588 tmp = bh;
589 bh = bh->b_this_page;
590 put_unused_buffer_head(tmp);
591 }
592 free_page(page);
593 }
594
595
596
597
598
599 static int try_to_free(struct buffer_head * bh)
600 {
601 unsigned long page;
602 struct buffer_head * tmp, * p;
603
604 tmp = bh;
605 do {
606 if (!tmp)
607 return 0;
608 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
609 return 0;
610 tmp = tmp->b_this_page;
611 } while (tmp != bh);
612 page = (unsigned long) bh->b_data;
613 page &= 0xfffff000;
614 tmp = bh;
615 do {
616 p = tmp;
617 tmp = tmp->b_this_page;
618 nr_buffers--;
619 remove_from_queues(p);
620 put_unused_buffer_head(p);
621 } while (tmp != bh);
622 buffermem -= 4096;
623 free_page(page);
624 return 1;
625 }
626
627
628
629
630
631
632
633
634 int shrink_buffers(unsigned int priority)
635 {
636 struct buffer_head *bh;
637 int i;
638
639 if (priority < 2)
640 sync_buffers(0);
641 bh = free_list;
642 i = nr_buffers >> priority;
643 for ( ; i-- > 0 ; bh = bh->b_next_free) {
644 if (bh->b_count || !bh->b_this_page)
645 continue;
646 if (bh->b_lock)
647 if (priority)
648 continue;
649 else
650 wait_on_buffer(bh);
651 if (bh->b_dirt) {
652 ll_rw_block(WRITEA, 1, &bh);
653 continue;
654 }
655 if (try_to_free(bh))
656 return 1;
657 }
658 return 0;
659 }
660
661
662
663
664
665
666
667
668 void buffer_init(void)
669 {
670 int i;
671
672 if (high_memory >= 4*1024*1024)
673 min_free_pages = 200;
674 else
675 min_free_pages = 20;
676 for (i = 0 ; i < NR_HASH ; i++)
677 hash_table[i] = NULL;
678 free_list = 0;
679 grow_buffers(BLOCK_SIZE);
680 if (!free_list)
681 panic("Unable to initialize buffer free list!");
682 return;
683 }