This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- bread_page
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26
27 #include <asm/system.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_SCSI
31 #ifdef CONFIG_BLK_DEV_SR
32 extern int check_cdrom_media_change(int, int);
33 #endif
34 #ifdef CONFIG_BLK_DEV_SD
35 extern int check_scsidisk_media_change(int, int);
36 extern int revalidate_scsidisk(int, int);
37 #endif
38 #endif
39
40 static struct buffer_head * hash_table[NR_HASH];
41 static struct buffer_head * free_list = NULL;
42 static struct buffer_head * unused_list = NULL;
43 static struct wait_queue * buffer_wait = NULL;
44
45 int nr_buffers = 0;
46 int buffermem = 0;
47 int nr_buffer_heads = 0;
48
49
50
51
52
53
54
55
56
57
58 void __wait_on_buffer(struct buffer_head * bh)
59 {
60 struct wait_queue wait = { current, NULL };
61
62 add_wait_queue(&bh->b_wait, &wait);
63 repeat:
64 current->state = TASK_UNINTERRUPTIBLE;
65 if (bh->b_lock) {
66 schedule();
67 goto repeat;
68 }
69 remove_wait_queue(&bh->b_wait, &wait);
70 current->state = TASK_RUNNING;
71 }
72
73 static void sync_buffers(dev_t dev)
74 {
75 int i;
76 struct buffer_head * bh;
77
78 bh = free_list;
79 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
80 if (bh->b_lock)
81 continue;
82 if (!bh->b_dirt)
83 continue;
84 ll_rw_block(WRITE, 1, &bh);
85 }
86 }
87
88 void sync_dev(dev_t dev)
89 {
90 sync_buffers(dev);
91 sync_supers(dev);
92 sync_inodes(dev);
93 sync_buffers(dev);
94 }
95
96 int sys_sync(void)
97 {
98 sync_dev(0);
99 return 0;
100 }
101
102 void invalidate_buffers(dev_t dev)
103 {
104 int i;
105 struct buffer_head * bh;
106
107 bh = free_list;
108 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
109 if (bh->b_dev != dev)
110 continue;
111 wait_on_buffer(bh);
112 if (bh->b_dev == dev)
113 bh->b_uptodate = bh->b_dirt = 0;
114 }
115 }
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 void check_disk_change(dev_t dev)
132 {
133 int i;
134 struct buffer_head * bh;
135
136 switch(MAJOR(dev)){
137 case 2:
138 if (!(bh = getblk(dev,0,1024)))
139 return;
140 i = floppy_change(bh);
141 brelse(bh);
142 break;
143
144 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
145 case 8:
146 i = check_scsidisk_media_change(dev, 0);
147 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
148 break;
149 #endif
150
151 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
152 case 11:
153 i = check_cdrom_media_change(dev, 0);
154 if (i) printk("Flushing buffers and inodes for CDROM\n");
155 break;
156 #endif
157
158 default:
159 return;
160 };
161
162 if (!i) return;
163
164 for (i=0 ; i<NR_SUPER ; i++)
165 if (super_block[i].s_dev == dev)
166 put_super(super_block[i].s_dev);
167 invalidate_inodes(dev);
168 invalidate_buffers(dev);
169
170 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
171
172
173 if (MAJOR(dev) == 8)
174 revalidate_scsidisk(dev, 0);
175 #endif
176 }
177
178 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
179 #define hash(dev,block) hash_table[_hashfn(dev,block)]
180
181 static inline void remove_from_hash_queue(struct buffer_head * bh)
182 {
183 if (bh->b_next)
184 bh->b_next->b_prev = bh->b_prev;
185 if (bh->b_prev)
186 bh->b_prev->b_next = bh->b_next;
187 if (hash(bh->b_dev,bh->b_blocknr) == bh)
188 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
189 bh->b_next = bh->b_prev = NULL;
190 }
191
192 static inline void remove_from_free_list(struct buffer_head * bh)
193 {
194 if (!(bh->b_prev_free) || !(bh->b_next_free))
195 panic("Free block list corrupted");
196 bh->b_prev_free->b_next_free = bh->b_next_free;
197 bh->b_next_free->b_prev_free = bh->b_prev_free;
198 if (free_list == bh)
199 free_list = bh->b_next_free;
200 bh->b_next_free = bh->b_prev_free = NULL;
201 }
202
203 static inline void remove_from_queues(struct buffer_head * bh)
204 {
205 remove_from_hash_queue(bh);
206 remove_from_free_list(bh);
207 }
208
209 static inline void put_first_free(struct buffer_head * bh)
210 {
211 if (!bh || (bh == free_list))
212 return;
213 remove_from_free_list(bh);
214
215 bh->b_next_free = free_list;
216 bh->b_prev_free = free_list->b_prev_free;
217 free_list->b_prev_free->b_next_free = bh;
218 free_list->b_prev_free = bh;
219 free_list = bh;
220 }
221
222 static inline void put_last_free(struct buffer_head * bh)
223 {
224 if (!bh)
225 return;
226 if (bh == free_list) {
227 free_list = bh->b_next_free;
228 return;
229 }
230 remove_from_free_list(bh);
231
232 bh->b_next_free = free_list;
233 bh->b_prev_free = free_list->b_prev_free;
234 free_list->b_prev_free->b_next_free = bh;
235 free_list->b_prev_free = bh;
236 }
237
238 static inline void insert_into_queues(struct buffer_head * bh)
239 {
240
241 bh->b_next_free = free_list;
242 bh->b_prev_free = free_list->b_prev_free;
243 free_list->b_prev_free->b_next_free = bh;
244 free_list->b_prev_free = bh;
245
246 bh->b_prev = NULL;
247 bh->b_next = NULL;
248 if (!bh->b_dev)
249 return;
250 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
251 hash(bh->b_dev,bh->b_blocknr) = bh;
252 if (bh->b_next)
253 bh->b_next->b_prev = bh;
254 }
255
256 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
257 {
258 struct buffer_head * tmp;
259
260 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
261 if (tmp->b_dev==dev && tmp->b_blocknr==block)
262 if (tmp->b_size == size)
263 return tmp;
264 else {
265 printk("wrong block-size on device %04x\n",dev);
266 return NULL;
267 }
268 return NULL;
269 }
270
271
272
273
274
275
276
277
278 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
279 {
280 struct buffer_head * bh;
281
282 for (;;) {
283 if (!(bh=find_buffer(dev,block,size)))
284 return NULL;
285 bh->b_count++;
286 wait_on_buffer(bh);
287 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
288 return bh;
289 bh->b_count--;
290 }
291 }
292
293
294
295
296
297
298
299
300
301
302
303 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
304 struct buffer_head * getblk(dev_t dev, int block, int size)
305 {
306 struct buffer_head * bh, * tmp;
307 int buffers;
308
309 repeat:
310 bh = get_hash_table(dev, block, size);
311 if (bh) {
312 if (bh->b_uptodate && !bh->b_dirt)
313 put_last_free(bh);
314 return bh;
315 }
316
317 if (nr_free_pages > 30 && buffermem < 6*1024*1024)
318 grow_buffers(size);
319
320 buffers = nr_buffers;
321 bh = NULL;
322
323 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
324 if (tmp->b_count || tmp->b_size != size)
325 continue;
326 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
327 bh = tmp;
328 if (!BADNESS(tmp))
329 break;
330 }
331 #if 0
332 if (tmp->b_dirt)
333 ll_rw_block(WRITEA, 1, &tmp);
334 #endif
335 }
336
337 if (!bh && nr_free_pages > 5) {
338 grow_buffers(size);
339 goto repeat;
340 }
341
342
343 if (!bh) {
344 sleep_on(&buffer_wait);
345 goto repeat;
346 }
347 wait_on_buffer(bh);
348 if (bh->b_count || bh->b_size != size)
349 goto repeat;
350 if (bh->b_dirt) {
351 sync_buffers(bh->b_dev);
352 goto repeat;
353 }
354
355
356 if (find_buffer(dev,block,size))
357 goto repeat;
358
359
360 bh->b_count=1;
361 bh->b_dirt=0;
362 bh->b_uptodate=0;
363 remove_from_queues(bh);
364 bh->b_dev=dev;
365 bh->b_blocknr=block;
366 insert_into_queues(bh);
367 return bh;
368 }
369
370 void brelse(struct buffer_head * buf)
371 {
372 if (!buf)
373 return;
374 wait_on_buffer(buf);
375 if (buf->b_count) {
376 if (--buf->b_count)
377 return;
378 wake_up(&buffer_wait);
379 return;
380 }
381 printk("Trying to free free buffer\n");
382 }
383
384
385
386
387
388 struct buffer_head * bread(dev_t dev, int block, int size)
389 {
390 struct buffer_head * bh;
391
392 if (!(bh = getblk(dev, block, size))) {
393 printk("bread: getblk returned NULL\n");
394 return NULL;
395 }
396 if (bh->b_uptodate)
397 return bh;
398 ll_rw_block(READ, 1, &bh);
399 wait_on_buffer(bh);
400 if (bh->b_uptodate)
401 return bh;
402 brelse(bh);
403 return NULL;
404 }
405
406 #define COPYBLK(from,to) \
407 __asm__("cld\n\t" \
408 "rep\n\t" \
409 "movsl\n\t" \
410 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
411 :"cx","di","si")
412
413
414
415
416
417
418
419 void bread_page(unsigned long address, dev_t dev, int b[4])
420 {
421 struct buffer_head * bh[4];
422 struct buffer_head * bhr[4];
423 int bhnum = 0;
424 int i;
425
426 for (i=0 ; i<4 ; i++)
427 if (b[i]) {
428 bh[i] = getblk(dev, b[i], 1024);
429 if (bh[i] && !bh[i]->b_uptodate)
430 bhr[bhnum++] = bh[i];
431 } else
432 bh[i] = NULL;
433
434 if(bhnum)
435 ll_rw_block(READ, bhnum, bhr);
436
437 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
438 if (bh[i]) {
439 wait_on_buffer(bh[i]);
440 if (bh[i]->b_uptodate)
441 COPYBLK((unsigned long) bh[i]->b_data,address);
442 brelse(bh[i]);
443 }
444 }
445
446
447
448
449
450
451 struct buffer_head * breada(dev_t dev,int first, ...)
452 {
453 va_list args;
454 struct buffer_head * bh, *tmp;
455
456 va_start(args,first);
457 if (!(bh = getblk(dev, first, 1024))) {
458 printk("breada: getblk returned NULL\n");
459 return NULL;
460 }
461 if (!bh->b_uptodate)
462 ll_rw_block(READ, 1, &bh);
463 while ((first=va_arg(args,int))>=0) {
464 tmp = getblk(dev, first, 1024);
465 if (tmp) {
466 if (!tmp->b_uptodate)
467 ll_rw_block(READA, 1, &tmp);
468 tmp->b_count--;
469 }
470 }
471 va_end(args);
472 wait_on_buffer(bh);
473 if (bh->b_uptodate)
474 return bh;
475 brelse(bh);
476 return (NULL);
477 }
478
479
480
481
482 static void put_unused_buffer_head(struct buffer_head * bh)
483 {
484 struct wait_queue * wait;
485
486 wait = ((volatile struct buffer_head *) bh)->b_wait;
487 memset((void *) bh,0,sizeof(*bh));
488 ((volatile struct buffer_head *) bh)->b_wait = wait;
489 bh->b_next_free = unused_list;
490 unused_list = bh;
491 }
492
493 static void get_more_buffer_heads(void)
494 {
495 unsigned long page;
496 struct buffer_head * bh;
497
498 if (unused_list)
499 return;
500 page = get_free_page(GFP_KERNEL);
501 if (!page)
502 return;
503 bh = (struct buffer_head *) page;
504 while ((unsigned long) (bh+1) <= page+4096) {
505 put_unused_buffer_head(bh);
506 bh++;
507 nr_buffer_heads++;
508 }
509 }
510
511 static struct buffer_head * get_unused_buffer_head(void)
512 {
513 struct buffer_head * bh;
514
515 get_more_buffer_heads();
516 if (!unused_list)
517 return NULL;
518 bh = unused_list;
519 unused_list = bh->b_next_free;
520 bh->b_next_free = NULL;
521 bh->b_data = NULL;
522 bh->b_size = 0;
523 return bh;
524 }
525
526
527
528
529
530
531
532 void grow_buffers(int size)
533 {
534 unsigned long page;
535 int i;
536 struct buffer_head *bh, *tmp;
537
538 if ((size & 511) || (size > 4096)) {
539 printk("grow_buffers: size = %d\n",size);
540 return;
541 }
542 page = get_free_page(GFP_BUFFER);
543 if (!page)
544 return;
545 tmp = NULL;
546 i = 0;
547 for (i = 0 ; i+size <= 4096 ; i += size) {
548 bh = get_unused_buffer_head();
549 if (!bh)
550 goto no_grow;
551 bh->b_this_page = tmp;
552 tmp = bh;
553 bh->b_data = (char * ) (page+i);
554 bh->b_size = size;
555 }
556 tmp = bh;
557 while (1) {
558 if (free_list) {
559 tmp->b_next_free = free_list;
560 tmp->b_prev_free = free_list->b_prev_free;
561 free_list->b_prev_free->b_next_free = tmp;
562 free_list->b_prev_free = tmp;
563 } else {
564 tmp->b_prev_free = tmp;
565 tmp->b_next_free = tmp;
566 }
567 free_list = tmp;
568 ++nr_buffers;
569 if (tmp->b_this_page)
570 tmp = tmp->b_this_page;
571 else
572 break;
573 }
574 tmp->b_this_page = bh;
575 buffermem += 4096;
576 return;
577
578
579
580 no_grow:
581 bh = tmp;
582 while (bh) {
583 tmp = bh;
584 bh = bh->b_this_page;
585 put_unused_buffer_head(tmp);
586 }
587 free_page(page);
588 }
589
590
591
592
593
594 static int try_to_free(struct buffer_head * bh)
595 {
596 unsigned long page;
597 struct buffer_head * tmp, * p;
598
599 tmp = bh;
600 do {
601 if (!tmp)
602 return 0;
603 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
604 return 0;
605 tmp = tmp->b_this_page;
606 } while (tmp != bh);
607 page = (unsigned long) bh->b_data;
608 page &= 0xfffff000;
609 tmp = bh;
610 do {
611 p = tmp;
612 tmp = tmp->b_this_page;
613 nr_buffers--;
614 remove_from_queues(p);
615 put_unused_buffer_head(p);
616 } while (tmp != bh);
617 buffermem -= 4096;
618 free_page(page);
619 return 1;
620 }
621
622
623
624
625
626
627
628
629 int shrink_buffers(unsigned int priority)
630 {
631 struct buffer_head *bh;
632 int i;
633
634 if (priority < 2)
635 sync_buffers(0);
636 bh = free_list;
637 i = nr_buffers >> priority;
638 for ( ; i-- > 0 ; bh = bh->b_next_free) {
639 if (bh->b_count || !bh->b_this_page)
640 continue;
641 if (bh->b_lock)
642 if (priority)
643 continue;
644 else
645 wait_on_buffer(bh);
646 if (bh->b_dirt) {
647 ll_rw_block(WRITEA, 1, &bh);
648 continue;
649 }
650 if (try_to_free(bh))
651 return 1;
652 }
653 return 0;
654 }
655
656
657
658
659
660
661
662
663 void buffer_init(void)
664 {
665 int i;
666
667 for (i = 0 ; i < NR_HASH ; i++)
668 hash_table[i] = NULL;
669 free_list = 0;
670 grow_buffers(BLOCK_SIZE);
671 if (!free_list)
672 panic("Unable to initialize buffer free list!");
673 return;
674 }