This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26
27 #include <asm/system.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_SCSI
31 #ifdef CONFIG_BLK_DEV_SR
32 extern int check_cdrom_media_change(int, int);
33 #endif
34 #ifdef CONFIG_BLK_DEV_SD
35 extern int check_scsidisk_media_change(int, int);
36 extern int revalidate_scsidisk(int, int);
37 #endif
38 #endif
39
40 static struct buffer_head * hash_table[NR_HASH];
41 static struct buffer_head * free_list = NULL;
42 static struct buffer_head * unused_list = NULL;
43 static struct wait_queue * buffer_wait = NULL;
44
45 int nr_buffers = 0;
46 int buffermem = 0;
47 int nr_buffer_heads = 0;
48 static int min_free_pages = 20;
49
50
51
52
53
54
55
56
57
58
59 void __wait_on_buffer(struct buffer_head * bh)
60 {
61 struct wait_queue wait = { current, NULL };
62
63 add_wait_queue(&bh->b_wait, &wait);
64 repeat:
65 current->state = TASK_UNINTERRUPTIBLE;
66 if (bh->b_lock) {
67 schedule();
68 goto repeat;
69 }
70 remove_wait_queue(&bh->b_wait, &wait);
71 current->state = TASK_RUNNING;
72 }
73
74 static void sync_buffers(dev_t dev)
75 {
76 int i;
77 struct buffer_head * bh;
78
79 bh = free_list;
80 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
81 if (dev && bh->b_dev != dev)
82 continue;
83 if (bh->b_lock)
84 continue;
85 if (!bh->b_dirt)
86 continue;
87 ll_rw_block(WRITE, 1, &bh);
88 }
89 }
90
91 void sync_dev(dev_t dev)
92 {
93 sync_buffers(dev);
94 sync_supers(dev);
95 sync_inodes(dev);
96 sync_buffers(dev);
97 }
98
99 int sys_sync(void)
100 {
101 sync_dev(0);
102 return 0;
103 }
104
105 void invalidate_buffers(dev_t dev)
106 {
107 int i;
108 struct buffer_head * bh;
109
110 bh = free_list;
111 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
112 if (bh->b_dev != dev)
113 continue;
114 wait_on_buffer(bh);
115 if (bh->b_dev == dev)
116 bh->b_uptodate = bh->b_dirt = 0;
117 }
118 }
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134 void check_disk_change(dev_t dev)
135 {
136 int i;
137 struct buffer_head * bh;
138
139 switch(MAJOR(dev)){
140 case 2:
141 if (!(bh = getblk(dev,0,1024)))
142 return;
143 i = floppy_change(bh);
144 brelse(bh);
145 break;
146
147 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
148 case 8:
149 i = check_scsidisk_media_change(dev, 0);
150 break;
151 #endif
152
153 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
154 case 11:
155 i = check_cdrom_media_change(dev, 0);
156 break;
157 #endif
158
159 default:
160 return;
161 };
162
163 if (!i) return;
164
165 printk("VFS: Disk change detected on device %d/%d\n",
166 MAJOR(dev), MINOR(dev));
167 for (i=0 ; i<NR_SUPER ; i++)
168 if (super_block[i].s_dev == dev)
169 put_super(super_block[i].s_dev);
170 invalidate_inodes(dev);
171 invalidate_buffers(dev);
172
173 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
174
175
176 if (MAJOR(dev) == 8)
177 revalidate_scsidisk(dev, 0);
178 #endif
179 }
180
181 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
182 #define hash(dev,block) hash_table[_hashfn(dev,block)]
183
184 static inline void remove_from_hash_queue(struct buffer_head * bh)
185 {
186 if (bh->b_next)
187 bh->b_next->b_prev = bh->b_prev;
188 if (bh->b_prev)
189 bh->b_prev->b_next = bh->b_next;
190 if (hash(bh->b_dev,bh->b_blocknr) == bh)
191 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
192 bh->b_next = bh->b_prev = NULL;
193 }
194
195 static inline void remove_from_free_list(struct buffer_head * bh)
196 {
197 if (!(bh->b_prev_free) || !(bh->b_next_free))
198 panic("VFS: Free block list corrupted");
199 bh->b_prev_free->b_next_free = bh->b_next_free;
200 bh->b_next_free->b_prev_free = bh->b_prev_free;
201 if (free_list == bh)
202 free_list = bh->b_next_free;
203 bh->b_next_free = bh->b_prev_free = NULL;
204 }
205
206 static inline void remove_from_queues(struct buffer_head * bh)
207 {
208 remove_from_hash_queue(bh);
209 remove_from_free_list(bh);
210 }
211
212 static inline void put_first_free(struct buffer_head * bh)
213 {
214 if (!bh || (bh == free_list))
215 return;
216 remove_from_free_list(bh);
217
218 bh->b_next_free = free_list;
219 bh->b_prev_free = free_list->b_prev_free;
220 free_list->b_prev_free->b_next_free = bh;
221 free_list->b_prev_free = bh;
222 free_list = bh;
223 }
224
225 static inline void put_last_free(struct buffer_head * bh)
226 {
227 if (!bh)
228 return;
229 if (bh == free_list) {
230 free_list = bh->b_next_free;
231 return;
232 }
233 remove_from_free_list(bh);
234
235 bh->b_next_free = free_list;
236 bh->b_prev_free = free_list->b_prev_free;
237 free_list->b_prev_free->b_next_free = bh;
238 free_list->b_prev_free = bh;
239 }
240
241 static inline void insert_into_queues(struct buffer_head * bh)
242 {
243
244 bh->b_next_free = free_list;
245 bh->b_prev_free = free_list->b_prev_free;
246 free_list->b_prev_free->b_next_free = bh;
247 free_list->b_prev_free = bh;
248
249 bh->b_prev = NULL;
250 bh->b_next = NULL;
251 if (!bh->b_dev)
252 return;
253 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
254 hash(bh->b_dev,bh->b_blocknr) = bh;
255 if (bh->b_next)
256 bh->b_next->b_prev = bh;
257 }
258
259 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
260 {
261 struct buffer_head * tmp;
262
263 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
264 if (tmp->b_dev==dev && tmp->b_blocknr==block)
265 if (tmp->b_size == size)
266 return tmp;
267 else {
268 printk("VFS: Wrong blocksize on device %d/%d\n",
269 MAJOR(dev), MINOR(dev));
270 return NULL;
271 }
272 return NULL;
273 }
274
275
276
277
278
279
280
281
282 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
283 {
284 struct buffer_head * bh;
285
286 for (;;) {
287 if (!(bh=find_buffer(dev,block,size)))
288 return NULL;
289 bh->b_count++;
290 wait_on_buffer(bh);
291 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
292 return bh;
293 bh->b_count--;
294 }
295 }
296
297
298
299
300
301
302
303
304
305
306
307 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
308 struct buffer_head * getblk(dev_t dev, int block, int size)
309 {
310 struct buffer_head * bh, * tmp;
311 int buffers;
312 static int grow_size = 0;
313
314 repeat:
315 bh = get_hash_table(dev, block, size);
316 if (bh) {
317 if (bh->b_uptodate && !bh->b_dirt)
318 put_last_free(bh);
319 return bh;
320 }
321 grow_size -= size;
322 if (nr_free_pages > min_free_pages &&
323 buffermem < 6*1024*1024 &&
324 grow_size <= 0) {
325 grow_buffers(size);
326 grow_size = 4096;
327 }
328 buffers = nr_buffers;
329 bh = NULL;
330
331 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
332 if (tmp->b_count || tmp->b_size != size)
333 continue;
334 if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1)
335 continue;
336 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
337 bh = tmp;
338 if (!BADNESS(tmp))
339 break;
340 }
341 #if 0
342 if (tmp->b_dirt)
343 ll_rw_block(WRITEA, 1, &tmp);
344 #endif
345 }
346
347 if (!bh && nr_free_pages > 5) {
348 grow_buffers(size);
349 goto repeat;
350 }
351
352
353 if (!bh) {
354 sleep_on(&buffer_wait);
355 goto repeat;
356 }
357 wait_on_buffer(bh);
358 if (bh->b_count || bh->b_size != size)
359 goto repeat;
360 if (bh->b_dirt) {
361 sync_buffers(bh->b_dev);
362 goto repeat;
363 }
364
365
366 if (find_buffer(dev,block,size))
367 goto repeat;
368
369
370 bh->b_count=1;
371 bh->b_dirt=0;
372 bh->b_uptodate=0;
373 remove_from_queues(bh);
374 bh->b_dev=dev;
375 bh->b_blocknr=block;
376 insert_into_queues(bh);
377 return bh;
378 }
379
380 void brelse(struct buffer_head * buf)
381 {
382 if (!buf)
383 return;
384 wait_on_buffer(buf);
385 if (buf->b_count) {
386 if (--buf->b_count)
387 return;
388 wake_up(&buffer_wait);
389 return;
390 }
391 printk("VFS: brelse: Trying to free free buffer\n");
392 }
393
394
395
396
397
398 struct buffer_head * bread(dev_t dev, int block, int size)
399 {
400 struct buffer_head * bh;
401
402 if (!(bh = getblk(dev, block, size))) {
403 printk("VFS: bread: READ error on device %d/%d\n",
404 MAJOR(dev), MINOR(dev));
405 return NULL;
406 }
407 if (bh->b_uptodate)
408 return bh;
409 ll_rw_block(READ, 1, &bh);
410 wait_on_buffer(bh);
411 if (bh->b_uptodate)
412 return bh;
413 brelse(bh);
414 return NULL;
415 }
416
417
418
419
420
421
422 struct buffer_head * breada(dev_t dev,int first, ...)
423 {
424 va_list args;
425 struct buffer_head * bh, *tmp;
426
427 va_start(args,first);
428 if (!(bh = getblk(dev, first, 1024))) {
429 printk("VFS: breada: READ error on device %d/%d\n",
430 MAJOR(dev), MINOR(dev));
431 return NULL;
432 }
433 if (!bh->b_uptodate)
434 ll_rw_block(READ, 1, &bh);
435 while ((first=va_arg(args,int))>=0) {
436 tmp = getblk(dev, first, 1024);
437 if (tmp) {
438 if (!tmp->b_uptodate)
439 ll_rw_block(READA, 1, &tmp);
440 tmp->b_count--;
441 }
442 }
443 va_end(args);
444 wait_on_buffer(bh);
445 if (bh->b_uptodate)
446 return bh;
447 brelse(bh);
448 return (NULL);
449 }
450
451
452
453
454 static void put_unused_buffer_head(struct buffer_head * bh)
455 {
456 struct wait_queue * wait;
457
458 wait = ((volatile struct buffer_head *) bh)->b_wait;
459 memset((void *) bh,0,sizeof(*bh));
460 ((volatile struct buffer_head *) bh)->b_wait = wait;
461 bh->b_next_free = unused_list;
462 unused_list = bh;
463 }
464
465 static void get_more_buffer_heads(void)
466 {
467 unsigned long page;
468 struct buffer_head * bh;
469
470 if (unused_list)
471 return;
472 page = get_free_page(GFP_KERNEL);
473 if (!page)
474 return;
475 bh = (struct buffer_head *) page;
476 while ((unsigned long) (bh+1) <= page+4096) {
477 put_unused_buffer_head(bh);
478 bh++;
479 nr_buffer_heads++;
480 }
481 }
482
483 static struct buffer_head * get_unused_buffer_head(void)
484 {
485 struct buffer_head * bh;
486
487 get_more_buffer_heads();
488 if (!unused_list)
489 return NULL;
490 bh = unused_list;
491 unused_list = bh->b_next_free;
492 bh->b_next_free = NULL;
493 bh->b_data = NULL;
494 bh->b_size = 0;
495 return bh;
496 }
497
498 static inline unsigned long try_to_share_buffers(unsigned long address, dev_t dev, int b[], int size)
499 {
500 return 0;
501 }
502
503 #define COPYBLK(from,to) \
504 __asm__ __volatile__("rep ; movsl" \
505 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
506 :"cx","di","si")
507
508
509
510
511
512
513
514
515 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
516 {
517 struct buffer_head * bh[4];
518 struct buffer_head * bhr[4];
519 unsigned long where;
520 int bhnum = 0;
521 int i;
522
523 if (!(prot & PAGE_RW)) {
524 where = try_to_share_buffers(address,dev,b,size);
525 if (where)
526 return where;
527 }
528 for (i=0 ; i<4 ; i++) {
529 bh[i] = NULL;
530 if (b[i]) {
531 bh[i] = getblk(dev, b[i], size);
532 if (bh[i] && !bh[i]->b_uptodate)
533 bhr[bhnum++] = bh[i];
534 }
535 }
536 if (bhnum)
537 ll_rw_block(READ, bhnum, bhr);
538 where = address;
539 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE) {
540 if (bh[i]) {
541 wait_on_buffer(bh[i]);
542 if (bh[i]->b_uptodate)
543 COPYBLK((unsigned long) bh[i]->b_data,address);
544 brelse(bh[i]);
545 }
546 }
547 return where;
548 }
549
550
551
552
553
554
555
556 void grow_buffers(int size)
557 {
558 unsigned long page;
559 int i;
560 struct buffer_head *bh, *tmp;
561
562 if ((size & 511) || (size > 4096)) {
563 printk("VFS: grow_buffers: size = %d\n",size);
564 return;
565 }
566 page = get_free_page(GFP_BUFFER);
567 if (!page)
568 return;
569 tmp = NULL;
570 i = 0;
571 for (i = 0 ; i+size <= 4096 ; i += size) {
572 bh = get_unused_buffer_head();
573 if (!bh)
574 goto no_grow;
575 bh->b_this_page = tmp;
576 tmp = bh;
577 bh->b_data = (char * ) (page+i);
578 bh->b_size = size;
579 }
580 tmp = bh;
581 while (1) {
582 if (free_list) {
583 tmp->b_next_free = free_list;
584 tmp->b_prev_free = free_list->b_prev_free;
585 free_list->b_prev_free->b_next_free = tmp;
586 free_list->b_prev_free = tmp;
587 } else {
588 tmp->b_prev_free = tmp;
589 tmp->b_next_free = tmp;
590 }
591 free_list = tmp;
592 ++nr_buffers;
593 if (tmp->b_this_page)
594 tmp = tmp->b_this_page;
595 else
596 break;
597 }
598 tmp->b_this_page = bh;
599 buffermem += 4096;
600 return;
601
602
603
604 no_grow:
605 bh = tmp;
606 while (bh) {
607 tmp = bh;
608 bh = bh->b_this_page;
609 put_unused_buffer_head(tmp);
610 }
611 free_page(page);
612 }
613
614
615
616
617
618 static int try_to_free(struct buffer_head * bh)
619 {
620 unsigned long page;
621 struct buffer_head * tmp, * p;
622
623 page = (unsigned long) bh->b_data;
624 page &= 0xfffff000;
625 if (mem_map[MAP_NR(page)] != 1)
626 return 0;
627 tmp = bh;
628 do {
629 if (!tmp)
630 return 0;
631 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
632 return 0;
633 tmp = tmp->b_this_page;
634 } while (tmp != bh);
635 tmp = bh;
636 do {
637 p = tmp;
638 tmp = tmp->b_this_page;
639 nr_buffers--;
640 remove_from_queues(p);
641 put_unused_buffer_head(p);
642 } while (tmp != bh);
643 buffermem -= 4096;
644 free_page(page);
645 return 1;
646 }
647
648
649
650
651
652
653
654
655 int shrink_buffers(unsigned int priority)
656 {
657 struct buffer_head *bh;
658 int i;
659
660 if (priority < 2)
661 sync_buffers(0);
662 bh = free_list;
663 i = nr_buffers >> priority;
664 for ( ; i-- > 0 ; bh = bh->b_next_free) {
665 if (bh->b_count || !bh->b_this_page)
666 continue;
667 if (bh->b_lock)
668 if (priority)
669 continue;
670 else
671 wait_on_buffer(bh);
672 if (bh->b_dirt) {
673 ll_rw_block(WRITEA, 1, &bh);
674 continue;
675 }
676 if (try_to_free(bh))
677 return 1;
678 }
679 return 0;
680 }
681
682
683
684
685
686
687
688
689 void buffer_init(void)
690 {
691 int i;
692
693 if (high_memory >= 4*1024*1024)
694 min_free_pages = 200;
695 else
696 min_free_pages = 20;
697 for (i = 0 ; i < NR_HASH ; i++)
698 hash_table[i] = NULL;
699 free_list = 0;
700 grow_buffers(BLOCK_SIZE);
701 if (!free_list)
702 panic("VFS: Unable to initialize buffer free list!");
703 return;
704 }