This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- bread_page
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26
27 #include <asm/system.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_SCSI
31 #ifdef CONFIG_BLK_DEV_SR
32 extern int check_cdrom_media_change(int, int);
33 #endif
34 #ifdef CONFIG_BLK_DEV_SD
35 extern int check_scsidisk_media_change(int, int);
36 extern int revalidate_scsidisk(int, int);
37 #endif
38 #endif
39
40 static struct buffer_head * hash_table[NR_HASH];
41 static struct buffer_head * free_list = NULL;
42 static struct buffer_head * unused_list = NULL;
43 static struct wait_queue * buffer_wait = NULL;
44
45 int nr_buffers = 0;
46 int nr_buffer_heads = 0;
47
48
49
50
51
52
53
54
55
56
57 void __wait_on_buffer(struct buffer_head * bh)
58 {
59 struct wait_queue wait = { current, NULL };
60
61 add_wait_queue(&bh->b_wait, &wait);
62 repeat:
63 current->state = TASK_UNINTERRUPTIBLE;
64 if (bh->b_lock) {
65 schedule();
66 goto repeat;
67 }
68 remove_wait_queue(&bh->b_wait, &wait);
69 current->state = TASK_RUNNING;
70 }
71
72 static void sync_buffers(dev_t dev)
73 {
74 int i;
75 struct buffer_head * bh;
76
77 bh = free_list;
78 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
79 if (bh->b_lock)
80 continue;
81 if (!bh->b_dirt)
82 continue;
83 ll_rw_block(WRITE, 1, &bh);
84 }
85 }
86
87 void sync_dev(dev_t dev)
88 {
89 sync_buffers(dev);
90 sync_supers(dev);
91 sync_inodes(dev);
92 sync_buffers(dev);
93 }
94
95 int sys_sync(void)
96 {
97 sync_dev(0);
98 return 0;
99 }
100
101 void invalidate_buffers(dev_t dev)
102 {
103 int i;
104 struct buffer_head * bh;
105
106 bh = free_list;
107 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
108 if (bh->b_dev != dev)
109 continue;
110 wait_on_buffer(bh);
111 if (bh->b_dev == dev)
112 bh->b_uptodate = bh->b_dirt = 0;
113 }
114 }
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 void check_disk_change(dev_t dev)
131 {
132 int i;
133 struct buffer_head * bh;
134
135 switch(MAJOR(dev)){
136 case 2:
137 if (!(bh = getblk(dev,0,1024)))
138 return;
139 i = floppy_change(bh);
140 brelse(bh);
141 break;
142
143 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
144 case 8:
145 i = check_scsidisk_media_change(dev, 0);
146 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
147 break;
148 #endif
149
150 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
151 case 11:
152 i = check_cdrom_media_change(dev, 0);
153 if (i) printk("Flushing buffers and inodes for CDROM\n");
154 break;
155 #endif
156
157 default:
158 return;
159 };
160
161 if (!i) return;
162
163 for (i=0 ; i<NR_SUPER ; i++)
164 if (super_block[i].s_dev == dev)
165 put_super(super_block[i].s_dev);
166 invalidate_inodes(dev);
167 invalidate_buffers(dev);
168
169 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
170
171
172 if (MAJOR(dev) == 8)
173 revalidate_scsidisk(dev, 0);
174 #endif
175 }
176
177 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
178 #define hash(dev,block) hash_table[_hashfn(dev,block)]
179
180 static inline void remove_from_hash_queue(struct buffer_head * bh)
181 {
182 if (bh->b_next)
183 bh->b_next->b_prev = bh->b_prev;
184 if (bh->b_prev)
185 bh->b_prev->b_next = bh->b_next;
186 if (hash(bh->b_dev,bh->b_blocknr) == bh)
187 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
188 bh->b_next = bh->b_prev = NULL;
189 }
190
191 static inline void remove_from_free_list(struct buffer_head * bh)
192 {
193 if (!(bh->b_prev_free) || !(bh->b_next_free))
194 panic("Free block list corrupted");
195 bh->b_prev_free->b_next_free = bh->b_next_free;
196 bh->b_next_free->b_prev_free = bh->b_prev_free;
197 if (free_list == bh)
198 free_list = bh->b_next_free;
199 bh->b_next_free = bh->b_prev_free = NULL;
200 }
201
202 static inline void remove_from_queues(struct buffer_head * bh)
203 {
204 remove_from_hash_queue(bh);
205 remove_from_free_list(bh);
206 }
207
208 static inline void put_first_free(struct buffer_head * bh)
209 {
210 if (!bh || (bh == free_list))
211 return;
212 remove_from_free_list(bh);
213
214 bh->b_next_free = free_list;
215 bh->b_prev_free = free_list->b_prev_free;
216 free_list->b_prev_free->b_next_free = bh;
217 free_list->b_prev_free = bh;
218 free_list = bh;
219 }
220
221 static inline void put_last_free(struct buffer_head * bh)
222 {
223 if (!bh)
224 return;
225 if (bh == free_list) {
226 free_list = bh->b_next_free;
227 return;
228 }
229 remove_from_free_list(bh);
230
231 bh->b_next_free = free_list;
232 bh->b_prev_free = free_list->b_prev_free;
233 free_list->b_prev_free->b_next_free = bh;
234 free_list->b_prev_free = bh;
235 }
236
237 static inline void insert_into_queues(struct buffer_head * bh)
238 {
239
240 bh->b_next_free = free_list;
241 bh->b_prev_free = free_list->b_prev_free;
242 free_list->b_prev_free->b_next_free = bh;
243 free_list->b_prev_free = bh;
244
245 bh->b_prev = NULL;
246 bh->b_next = NULL;
247 if (!bh->b_dev)
248 return;
249 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
250 hash(bh->b_dev,bh->b_blocknr) = bh;
251 if (bh->b_next)
252 bh->b_next->b_prev = bh;
253 }
254
255 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
256 {
257 struct buffer_head * tmp;
258
259 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
260 if (tmp->b_dev==dev && tmp->b_blocknr==block)
261 if (tmp->b_size == size)
262 return tmp;
263 else {
264 printk("wrong block-size on device %04x\n",dev);
265 return NULL;
266 }
267 return NULL;
268 }
269
270
271
272
273
274
275
276
277 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
278 {
279 struct buffer_head * bh;
280
281 for (;;) {
282 if (!(bh=find_buffer(dev,block,size)))
283 return NULL;
284 bh->b_count++;
285 wait_on_buffer(bh);
286 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) {
287 put_last_free(bh);
288 return bh;
289 }
290 bh->b_count--;
291 }
292 }
293
294
295
296
297
298
299
300
301
302
303
304 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
305 struct buffer_head * getblk(dev_t dev, int block, int size)
306 {
307 struct buffer_head * bh, * tmp;
308 int buffers;
309
310 repeat:
311 if (bh = get_hash_table(dev, block, size))
312 return bh;
313
314 if (nr_free_pages > 30)
315 grow_buffers(size);
316
317 buffers = nr_buffers;
318 bh = NULL;
319
320 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
321 if (tmp->b_count || tmp->b_size != size)
322 continue;
323 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
324 bh = tmp;
325 if (!BADNESS(tmp))
326 break;
327 }
328 #if 0
329 if (tmp->b_dirt)
330 ll_rw_block(WRITEA, 1, &tmp);
331 #endif
332 }
333
334 if (!bh && nr_free_pages > 5) {
335 grow_buffers(size);
336 goto repeat;
337 }
338
339
340 if (!bh) {
341 sleep_on(&buffer_wait);
342 goto repeat;
343 }
344 wait_on_buffer(bh);
345 if (bh->b_count || bh->b_size != size)
346 goto repeat;
347 if (bh->b_dirt) {
348 sync_buffers(bh->b_dev);
349 goto repeat;
350 }
351
352
353 if (find_buffer(dev,block,size))
354 goto repeat;
355
356
357 bh->b_count=1;
358 bh->b_dirt=0;
359 bh->b_uptodate=0;
360 remove_from_queues(bh);
361 bh->b_dev=dev;
362 bh->b_blocknr=block;
363 insert_into_queues(bh);
364 return bh;
365 }
366
367 void brelse(struct buffer_head * buf)
368 {
369 if (!buf)
370 return;
371 wait_on_buffer(buf);
372 if (!(buf->b_count--))
373 panic("Trying to free free buffer");
374 wake_up(&buffer_wait);
375 }
376
377
378
379
380
381 struct buffer_head * bread(dev_t dev, int block, int size)
382 {
383 struct buffer_head * bh;
384
385 if (!(bh = getblk(dev, block, size))) {
386 printk("bread: getblk returned NULL\n");
387 return NULL;
388 }
389 if (bh->b_uptodate)
390 return bh;
391 ll_rw_block(READ, 1, &bh);
392 wait_on_buffer(bh);
393 if (bh->b_uptodate)
394 return bh;
395 brelse(bh);
396 return NULL;
397 }
398
399 #define COPYBLK(from,to) \
400 __asm__("cld\n\t" \
401 "rep\n\t" \
402 "movsl\n\t" \
403 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
404 :"cx","di","si")
405
406
407
408
409
410
411
412 void bread_page(unsigned long address, dev_t dev, int b[4])
413 {
414 struct buffer_head * bh[4];
415 int i;
416
417 for (i=0 ; i<4 ; i++)
418 if (b[i]) {
419 if (bh[i] = getblk(dev, b[i], 1024))
420 if (!bh[i]->b_uptodate)
421 ll_rw_block(READ, 1, &bh[i]);
422 } else
423 bh[i] = NULL;
424 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
425 if (bh[i]) {
426 wait_on_buffer(bh[i]);
427 if (bh[i]->b_uptodate)
428 COPYBLK((unsigned long) bh[i]->b_data,address);
429 brelse(bh[i]);
430 }
431 }
432
433
434
435
436
437
438 struct buffer_head * breada(dev_t dev,int first, ...)
439 {
440 va_list args;
441 struct buffer_head * bh, *tmp;
442
443 va_start(args,first);
444 if (!(bh = getblk(dev, first, 1024))) {
445 printk("breada: getblk returned NULL\n");
446 return NULL;
447 }
448 if (!bh->b_uptodate)
449 ll_rw_block(READ, 1, &bh);
450 while ((first=va_arg(args,int))>=0) {
451 tmp = getblk(dev, first, 1024);
452 if (tmp) {
453 if (!tmp->b_uptodate)
454 ll_rw_block(READA, 1, &tmp);
455 tmp->b_count--;
456 }
457 }
458 va_end(args);
459 wait_on_buffer(bh);
460 if (bh->b_uptodate)
461 return bh;
462 brelse(bh);
463 return (NULL);
464 }
465
466
467
468
469 static void put_unused_buffer_head(struct buffer_head * bh)
470 {
471 struct wait_queue * wait;
472
473 wait = ((volatile struct buffer_head *) bh)->b_wait;
474 memset((void *) bh,0,sizeof(*bh));
475 ((volatile struct buffer_head *) bh)->b_wait = wait;
476 bh->b_next_free = unused_list;
477 unused_list = bh;
478 }
479
480 static void get_more_buffer_heads(void)
481 {
482 unsigned long page;
483 struct buffer_head * bh;
484
485 if (unused_list)
486 return;
487 page = get_free_page(GFP_KERNEL);
488 if (!page)
489 return;
490 bh = (struct buffer_head *) page;
491 while ((unsigned long) (bh+1) <= page+4096) {
492 put_unused_buffer_head(bh);
493 bh++;
494 nr_buffer_heads++;
495 }
496 }
497
498 static struct buffer_head * get_unused_buffer_head(void)
499 {
500 struct buffer_head * bh;
501
502 get_more_buffer_heads();
503 if (!unused_list)
504 return NULL;
505 bh = unused_list;
506 unused_list = bh->b_next_free;
507 bh->b_next_free = NULL;
508 bh->b_data = NULL;
509 bh->b_size = 0;
510 return bh;
511 }
512
513
514
515
516
517
518
519 void grow_buffers(int size)
520 {
521 unsigned long page;
522 int i;
523 struct buffer_head *bh, *tmp;
524
525 if ((size & 511) || (size > 4096)) {
526 printk("grow_buffers: size = %d\n",size);
527 return;
528 }
529 page = get_free_page(GFP_BUFFER);
530 if (!page)
531 return;
532 tmp = NULL;
533 i = 0;
534 for (i = 0 ; i+size <= 4096 ; i += size) {
535 bh = get_unused_buffer_head();
536 if (!bh)
537 goto no_grow;
538 bh->b_this_page = tmp;
539 tmp = bh;
540 bh->b_data = (char * ) (page+i);
541 bh->b_size = size;
542 }
543 tmp = bh;
544 while (1) {
545 if (free_list) {
546 tmp->b_next_free = free_list;
547 tmp->b_prev_free = free_list->b_prev_free;
548 free_list->b_prev_free->b_next_free = tmp;
549 free_list->b_prev_free = tmp;
550 } else {
551 tmp->b_prev_free = tmp;
552 tmp->b_next_free = tmp;
553 }
554 free_list = tmp;
555 ++nr_buffers;
556 if (tmp->b_this_page)
557 tmp = tmp->b_this_page;
558 else
559 break;
560 }
561 tmp->b_this_page = bh;
562 return;
563
564
565
566 no_grow:
567 bh = tmp;
568 while (bh) {
569 tmp = bh;
570 bh = bh->b_this_page;
571 put_unused_buffer_head(tmp);
572 }
573 free_page(page);
574 }
575
576
577
578
579
580 static int try_to_free(struct buffer_head * bh)
581 {
582 unsigned long page;
583 struct buffer_head * tmp, * p;
584
585 tmp = bh;
586 do {
587 if (!tmp)
588 return 0;
589 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
590 return 0;
591 tmp = tmp->b_this_page;
592 } while (tmp != bh);
593 page = (unsigned long) bh->b_data;
594 page &= 0xfffff000;
595 tmp = bh;
596 do {
597 p = tmp;
598 tmp = tmp->b_this_page;
599 nr_buffers--;
600 remove_from_queues(p);
601 put_unused_buffer_head(p);
602 } while (tmp != bh);
603 free_page(page);
604 return 1;
605 }
606
607
608
609
610
611
612
613
614 int shrink_buffers(unsigned int priority)
615 {
616 struct buffer_head *bh;
617 int i;
618
619 if (priority < 2)
620 sync_buffers(0);
621 bh = free_list;
622 i = nr_buffers >> priority;
623 for ( ; i-- > 0 ; bh = bh->b_next_free) {
624 if (bh->b_count || !bh->b_this_page)
625 continue;
626 if (bh->b_lock)
627 if (priority)
628 continue;
629 else
630 wait_on_buffer(bh);
631 if (bh->b_dirt) {
632 ll_rw_block(WRITEA, 1, &bh);
633 continue;
634 }
635 if (try_to_free(bh))
636 return 1;
637 }
638 return 0;
639 }
640
641
642
643
644
645
646
647
648 void buffer_init(void)
649 {
650 int i;
651
652 for (i = 0 ; i < NR_HASH ; i++)
653 hash_table[i] = NULL;
654 free_list = 0;
655 grow_buffers(BLOCK_SIZE);
656 if (!free_list)
657 panic("Unable to initialize buffer free list!");
658 return;
659 }