This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- bread_page
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26
27 #include <asm/system.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_SCSI
31 #ifdef CONFIG_BLK_DEV_SR
32 extern int check_cdrom_media_change(int, int);
33 #endif
34 #ifdef CONFIG_BLK_DEV_SD
35 extern int check_scsidisk_media_change(int, int);
36 extern int revalidate_scsidisk(int, int);
37 #endif
38 #endif
39
40 static struct buffer_head * hash_table[NR_HASH];
41 static struct buffer_head * free_list = NULL;
42 static struct buffer_head * unused_list = NULL;
43 static struct wait_queue * buffer_wait = NULL;
44
45 int nr_buffers = 0;
46 int nr_buffer_heads = 0;
47
48
49
50
51
52
53
54
55
56
57 void __wait_on_buffer(struct buffer_head * bh)
58 {
59 add_wait_queue(&bh->b_wait,¤t->wait);
60 repeat:
61 current->state = TASK_UNINTERRUPTIBLE;
62 if (bh->b_lock) {
63 schedule();
64 goto repeat;
65 }
66 remove_wait_queue(&bh->b_wait,¤t->wait);
67 current->state = TASK_RUNNING;
68 }
69
70 static void sync_buffers(dev_t dev)
71 {
72 int i;
73 struct buffer_head * bh;
74
75 bh = free_list;
76 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
77 if (bh->b_lock)
78 continue;
79 if (!bh->b_dirt)
80 continue;
81 ll_rw_block(WRITE,bh);
82 }
83 }
84
85 void sync_dev(dev_t dev)
86 {
87 sync_buffers(dev);
88 sync_supers(dev);
89 sync_inodes(dev);
90 sync_buffers(dev);
91 }
92
93 int sys_sync(void)
94 {
95 sync_dev(0);
96 return 0;
97 }
98
99 void invalidate_buffers(dev_t dev)
100 {
101 int i;
102 struct buffer_head * bh;
103
104 bh = free_list;
105 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
106 if (bh->b_dev != dev)
107 continue;
108 wait_on_buffer(bh);
109 if (bh->b_dev == dev)
110 bh->b_uptodate = bh->b_dirt = 0;
111 }
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 void check_disk_change(dev_t dev)
129 {
130 int i;
131 struct buffer_head * bh;
132
133 switch(MAJOR(dev)){
134 case 2:
135 if (!(bh = getblk(dev,0,1024)))
136 return;
137 i = floppy_change(bh);
138 brelse(bh);
139 break;
140
141 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
142 case 8:
143 i = check_scsidisk_media_change(dev, 0);
144 if (i) printk("Flushing buffers and inodes for SCSI disk\n");
145 break;
146 #endif
147
148 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
149 case 11:
150 i = check_cdrom_media_change(dev, 0);
151 if (i) printk("Flushing buffers and inodes for CDROM\n");
152 break;
153 #endif
154
155 default:
156 return;
157 };
158
159 if (!i) return;
160
161 for (i=0 ; i<NR_SUPER ; i++)
162 if (super_block[i].s_dev == dev)
163 put_super(super_block[i].s_dev);
164 invalidate_inodes(dev);
165 invalidate_buffers(dev);
166
167 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
168
169
170 if (MAJOR(dev) == 8)
171 revalidate_scsidisk(dev, 0);
172 #endif
173 }
174
175 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
176 #define hash(dev,block) hash_table[_hashfn(dev,block)]
177
178 static inline void remove_from_hash_queue(struct buffer_head * bh)
179 {
180 if (bh->b_next)
181 bh->b_next->b_prev = bh->b_prev;
182 if (bh->b_prev)
183 bh->b_prev->b_next = bh->b_next;
184 if (hash(bh->b_dev,bh->b_blocknr) == bh)
185 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
186 bh->b_next = bh->b_prev = NULL;
187 }
188
189 static inline void remove_from_free_list(struct buffer_head * bh)
190 {
191 if (!(bh->b_prev_free) || !(bh->b_next_free))
192 panic("Free block list corrupted");
193 bh->b_prev_free->b_next_free = bh->b_next_free;
194 bh->b_next_free->b_prev_free = bh->b_prev_free;
195 if (free_list == bh)
196 free_list = bh->b_next_free;
197 bh->b_next_free = bh->b_prev_free = NULL;
198 }
199
200 static inline void remove_from_queues(struct buffer_head * bh)
201 {
202 remove_from_hash_queue(bh);
203 remove_from_free_list(bh);
204 }
205
206 static inline void put_first_free(struct buffer_head * bh)
207 {
208 if (!bh || (bh == free_list))
209 return;
210 remove_from_free_list(bh);
211
212 bh->b_next_free = free_list;
213 bh->b_prev_free = free_list->b_prev_free;
214 free_list->b_prev_free->b_next_free = bh;
215 free_list->b_prev_free = bh;
216 free_list = bh;
217 }
218
219 static inline void put_last_free(struct buffer_head * bh)
220 {
221 if (!bh)
222 return;
223 if (bh == free_list) {
224 free_list = bh->b_next_free;
225 return;
226 }
227 remove_from_free_list(bh);
228
229 bh->b_next_free = free_list;
230 bh->b_prev_free = free_list->b_prev_free;
231 free_list->b_prev_free->b_next_free = bh;
232 free_list->b_prev_free = bh;
233 }
234
235 static inline void insert_into_queues(struct buffer_head * bh)
236 {
237
238 bh->b_next_free = free_list;
239 bh->b_prev_free = free_list->b_prev_free;
240 free_list->b_prev_free->b_next_free = bh;
241 free_list->b_prev_free = bh;
242
243 bh->b_prev = NULL;
244 bh->b_next = NULL;
245 if (!bh->b_dev)
246 return;
247 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
248 hash(bh->b_dev,bh->b_blocknr) = bh;
249 if (bh->b_next)
250 bh->b_next->b_prev = bh;
251 }
252
253 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
254 {
255 struct buffer_head * tmp;
256
257 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
258 if (tmp->b_dev==dev && tmp->b_blocknr==block)
259 if (tmp->b_size == size)
260 return tmp;
261 else {
262 printk("wrong block-size on device %04x\n",dev);
263 return NULL;
264 }
265 return NULL;
266 }
267
268
269
270
271
272
273
274
275 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
276 {
277 struct buffer_head * bh;
278
279 for (;;) {
280 if (!(bh=find_buffer(dev,block,size)))
281 return NULL;
282 bh->b_count++;
283 wait_on_buffer(bh);
284 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) {
285 put_last_free(bh);
286 return bh;
287 }
288 bh->b_count--;
289 }
290 }
291
292
293
294
295
296
297
298
299
300
301
302 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
303 struct buffer_head * getblk(dev_t dev, int block, int size)
304 {
305 struct buffer_head * bh, * tmp;
306 int buffers;
307
308 repeat:
309 if (bh = get_hash_table(dev, block, size))
310 return bh;
311
312 if (nr_free_pages > 30)
313 grow_buffers(size);
314
315 buffers = nr_buffers;
316 bh = NULL;
317
318 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
319 if (tmp->b_count || tmp->b_size != size)
320 continue;
321 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
322 bh = tmp;
323 if (!BADNESS(tmp))
324 break;
325 }
326 #if 0
327 if (tmp->b_dirt)
328 ll_rw_block(WRITEA,tmp);
329 #endif
330 }
331
332 if (!bh && nr_free_pages > 5) {
333 grow_buffers(size);
334 goto repeat;
335 }
336
337
338 if (!bh) {
339 sleep_on(&buffer_wait);
340 goto repeat;
341 }
342 wait_on_buffer(bh);
343 if (bh->b_count || bh->b_size != size)
344 goto repeat;
345 if (bh->b_dirt) {
346 sync_buffers(bh->b_dev);
347 goto repeat;
348 }
349
350
351 if (find_buffer(dev,block,size))
352 goto repeat;
353
354
355 bh->b_count=1;
356 bh->b_dirt=0;
357 bh->b_uptodate=0;
358 remove_from_queues(bh);
359 bh->b_dev=dev;
360 bh->b_blocknr=block;
361 insert_into_queues(bh);
362 return bh;
363 }
364
365 void brelse(struct buffer_head * buf)
366 {
367 if (!buf)
368 return;
369 wait_on_buffer(buf);
370 if (!(buf->b_count--))
371 panic("Trying to free free buffer");
372 wake_up(&buffer_wait);
373 }
374
375
376
377
378
379 struct buffer_head * bread(dev_t dev, int block, int size)
380 {
381 struct buffer_head * bh;
382
383 if (!(bh = getblk(dev, block, size))) {
384 printk("bread: getblk returned NULL\n");
385 return NULL;
386 }
387 if (bh->b_uptodate)
388 return bh;
389 ll_rw_block(READ,bh);
390 wait_on_buffer(bh);
391 if (bh->b_uptodate)
392 return bh;
393 brelse(bh);
394 return NULL;
395 }
396
397 #define COPYBLK(from,to) \
398 __asm__("cld\n\t" \
399 "rep\n\t" \
400 "movsl\n\t" \
401 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
402 :"cx","di","si")
403
404
405
406
407
408
409
410 void bread_page(unsigned long address, dev_t dev, int b[4])
411 {
412 struct buffer_head * bh[4];
413 int i;
414
415 for (i=0 ; i<4 ; i++)
416 if (b[i]) {
417 if (bh[i] = getblk(dev, b[i], 1024))
418 if (!bh[i]->b_uptodate)
419 ll_rw_block(READ,bh[i]);
420 } else
421 bh[i] = NULL;
422 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
423 if (bh[i]) {
424 wait_on_buffer(bh[i]);
425 if (bh[i]->b_uptodate)
426 COPYBLK((unsigned long) bh[i]->b_data,address);
427 brelse(bh[i]);
428 }
429 }
430
431
432
433
434
435
436 struct buffer_head * breada(dev_t dev,int first, ...)
437 {
438 va_list args;
439 struct buffer_head * bh, *tmp;
440
441 va_start(args,first);
442 if (!(bh = getblk(dev, first, 1024))) {
443 printk("breada: getblk returned NULL\n");
444 return NULL;
445 }
446 if (!bh->b_uptodate)
447 ll_rw_block(READ,bh);
448 while ((first=va_arg(args,int))>=0) {
449 tmp = getblk(dev, first, 1024);
450 if (tmp) {
451 if (!tmp->b_uptodate)
452 ll_rw_block(READA,tmp);
453 tmp->b_count--;
454 }
455 }
456 va_end(args);
457 wait_on_buffer(bh);
458 if (bh->b_uptodate)
459 return bh;
460 brelse(bh);
461 return (NULL);
462 }
463
464 static void put_unused_buffer_head(struct buffer_head * bh)
465 {
466 memset((void *) bh,0,sizeof(*bh));
467 bh->b_next_free = unused_list;
468 unused_list = bh;
469 }
470
471 static void get_more_buffer_heads(void)
472 {
473 unsigned long page;
474 struct buffer_head * bh;
475
476 if (unused_list)
477 return;
478 page = get_free_page(GFP_KERNEL);
479 if (!page)
480 return;
481 bh = (struct buffer_head *) page;
482 while ((unsigned long) (bh+1) <= page+4096) {
483 put_unused_buffer_head(bh);
484 bh++;
485 nr_buffer_heads++;
486 }
487 }
488
489 static struct buffer_head * get_unused_buffer_head(void)
490 {
491 struct buffer_head * bh;
492
493 get_more_buffer_heads();
494 if (!unused_list)
495 return NULL;
496 bh = unused_list;
497 unused_list = bh->b_next_free;
498 bh->b_next_free = NULL;
499 bh->b_data = NULL;
500 bh->b_size = 0;
501 return bh;
502 }
503
504
505
506
507
508
509
510 void grow_buffers(int size)
511 {
512 unsigned long page;
513 int i;
514 struct buffer_head *bh, *tmp;
515
516 if ((size & 511) || (size > 4096)) {
517 printk("grow_buffers: size = %d\n",size);
518 return;
519 }
520 page = get_free_page(GFP_BUFFER);
521 if (!page)
522 return;
523 tmp = NULL;
524 i = 0;
525 for (i = 0 ; i+size <= 4096 ; i += size) {
526 bh = get_unused_buffer_head();
527 if (!bh)
528 goto no_grow;
529 bh->b_this_page = tmp;
530 tmp = bh;
531 bh->b_data = (char * ) (page+i);
532 bh->b_size = size;
533 }
534 tmp = bh;
535 while (1) {
536 if (free_list) {
537 tmp->b_next_free = free_list;
538 tmp->b_prev_free = free_list->b_prev_free;
539 free_list->b_prev_free->b_next_free = tmp;
540 free_list->b_prev_free = tmp;
541 } else {
542 tmp->b_prev_free = tmp;
543 tmp->b_next_free = tmp;
544 }
545 free_list = tmp;
546 ++nr_buffers;
547 if (tmp->b_this_page)
548 tmp = tmp->b_this_page;
549 else
550 break;
551 }
552 tmp->b_this_page = bh;
553 return;
554
555
556
557 no_grow:
558 bh = tmp;
559 while (bh) {
560 tmp = bh;
561 bh = bh->b_this_page;
562 put_unused_buffer_head(tmp);
563 }
564 free_page(page);
565 }
566
567
568
569
570
571 static int try_to_free(struct buffer_head * bh)
572 {
573 unsigned long page;
574 struct buffer_head * tmp, * p;
575
576 tmp = bh;
577 do {
578 if (!tmp)
579 return 0;
580 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
581 return 0;
582 tmp = tmp->b_this_page;
583 } while (tmp != bh);
584 page = (unsigned long) bh->b_data;
585 page &= 0xfffff000;
586 tmp = bh;
587 do {
588 p = tmp;
589 tmp = tmp->b_this_page;
590 nr_buffers--;
591 remove_from_queues(p);
592 put_unused_buffer_head(p);
593 } while (tmp != bh);
594 free_page(page);
595 return 1;
596 }
597
598
599
600
601
602
603
604
605 int shrink_buffers(unsigned int priority)
606 {
607 struct buffer_head *bh;
608 int i;
609
610 if (priority < 2)
611 sync_buffers(0);
612 bh = free_list;
613 i = nr_buffers >> priority;
614 for ( ; i-- > 0 ; bh = bh->b_next_free) {
615 if (bh->b_count || !bh->b_this_page)
616 continue;
617 if (bh->b_lock)
618 if (priority)
619 continue;
620 else
621 wait_on_buffer(bh);
622 if (bh->b_dirt) {
623 ll_rw_block(WRITEA,bh);
624 continue;
625 }
626 if (try_to_free(bh))
627 return 1;
628 }
629 return 0;
630 }
631
632
633
634
635
636
637
638
639 void buffer_init(void)
640 {
641 int i;
642
643 for (i = 0 ; i < NR_HASH ; i++)
644 hash_table[i] = NULL;
645 free_list = 0;
646 grow_buffers(BLOCK_SIZE);
647 if (!free_list)
648 panic("Unable to initialize buffer free list!");
649 return;
650 }