This source file includes following definitions.
- __wait_on_buffer
- sync_buffers
- sync_dev
- sys_sync
- sys_fsync
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- try_to_share_buffers
- bread_page
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <stdarg.h>
20
21 #include <linux/config.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/locks.h>
26 #include <linux/errno.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30
31 #ifdef CONFIG_SCSI
32 #ifdef CONFIG_BLK_DEV_SR
33 extern int check_cdrom_media_change(int, int);
34 #endif
35 #ifdef CONFIG_BLK_DEV_SD
36 extern int check_scsidisk_media_change(int, int);
37 extern int revalidate_scsidisk(int, int);
38 #endif
39 #endif
40
41 static struct buffer_head * hash_table[NR_HASH];
42 static struct buffer_head * free_list = NULL;
43 static struct buffer_head * unused_list = NULL;
44 static struct wait_queue * buffer_wait = NULL;
45
46 int nr_buffers = 0;
47 int buffermem = 0;
48 int nr_buffer_heads = 0;
49 static int min_free_pages = 20;
50
51
52
53
54
55
56
57
58
59
60 void __wait_on_buffer(struct buffer_head * bh)
61 {
62 struct wait_queue wait = { current, NULL };
63
64 add_wait_queue(&bh->b_wait, &wait);
65 repeat:
66 current->state = TASK_UNINTERRUPTIBLE;
67 if (bh->b_lock) {
68 schedule();
69 goto repeat;
70 }
71 remove_wait_queue(&bh->b_wait, &wait);
72 current->state = TASK_RUNNING;
73 }
74
75 static void sync_buffers(dev_t dev)
76 {
77 int i;
78 struct buffer_head * bh;
79
80 bh = free_list;
81 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
82 if (dev && bh->b_dev != dev)
83 continue;
84 if (bh->b_lock)
85 continue;
86 if (!bh->b_dirt)
87 continue;
88 ll_rw_block(WRITE, 1, &bh);
89 }
90 }
91
92 void sync_dev(dev_t dev)
93 {
94 sync_buffers(dev);
95 sync_supers(dev);
96 sync_inodes(dev);
97 sync_buffers(dev);
98 }
99
100 int sys_sync(void)
101 {
102 sync_dev(0);
103 return 0;
104 }
105
106 int sys_fsync(int fd)
107 {
108 return -ENOSYS;
109 }
110
111 void invalidate_buffers(dev_t dev)
112 {
113 int i;
114 struct buffer_head * bh;
115
116 bh = free_list;
117 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
118 if (bh->b_dev != dev)
119 continue;
120 wait_on_buffer(bh);
121 if (bh->b_dev == dev)
122 bh->b_uptodate = bh->b_dirt = 0;
123 }
124 }
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140 void check_disk_change(dev_t dev)
141 {
142 int i;
143 struct buffer_head * bh;
144
145 switch(MAJOR(dev)){
146 case 2:
147 if (!(bh = getblk(dev,0,1024)))
148 return;
149 i = floppy_change(bh);
150 brelse(bh);
151 break;
152
153 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
154 case 8:
155 i = check_scsidisk_media_change(dev, 0);
156 break;
157 #endif
158
159 #if defined(CONFIG_BLK_DEV_SR) && defined(CONFIG_SCSI)
160 case 11:
161 i = check_cdrom_media_change(dev, 0);
162 break;
163 #endif
164
165 default:
166 return;
167 };
168
169 if (!i) return;
170
171 printk("VFS: Disk change detected on device %d/%d\n",
172 MAJOR(dev), MINOR(dev));
173 for (i=0 ; i<NR_SUPER ; i++)
174 if (super_block[i].s_dev == dev)
175 put_super(super_block[i].s_dev);
176 invalidate_inodes(dev);
177 invalidate_buffers(dev);
178
179 #if defined(CONFIG_BLK_DEV_SD) && defined(CONFIG_SCSI)
180
181
182 if (MAJOR(dev) == 8)
183 revalidate_scsidisk(dev, 0);
184 #endif
185 }
186
187 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
188 #define hash(dev,block) hash_table[_hashfn(dev,block)]
189
190 static inline void remove_from_hash_queue(struct buffer_head * bh)
191 {
192 if (bh->b_next)
193 bh->b_next->b_prev = bh->b_prev;
194 if (bh->b_prev)
195 bh->b_prev->b_next = bh->b_next;
196 if (hash(bh->b_dev,bh->b_blocknr) == bh)
197 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
198 bh->b_next = bh->b_prev = NULL;
199 }
200
201 static inline void remove_from_free_list(struct buffer_head * bh)
202 {
203 if (!(bh->b_prev_free) || !(bh->b_next_free))
204 panic("VFS: Free block list corrupted");
205 bh->b_prev_free->b_next_free = bh->b_next_free;
206 bh->b_next_free->b_prev_free = bh->b_prev_free;
207 if (free_list == bh)
208 free_list = bh->b_next_free;
209 bh->b_next_free = bh->b_prev_free = NULL;
210 }
211
212 static inline void remove_from_queues(struct buffer_head * bh)
213 {
214 remove_from_hash_queue(bh);
215 remove_from_free_list(bh);
216 }
217
218 static inline void put_first_free(struct buffer_head * bh)
219 {
220 if (!bh || (bh == free_list))
221 return;
222 remove_from_free_list(bh);
223
224 bh->b_next_free = free_list;
225 bh->b_prev_free = free_list->b_prev_free;
226 free_list->b_prev_free->b_next_free = bh;
227 free_list->b_prev_free = bh;
228 free_list = bh;
229 }
230
231 static inline void put_last_free(struct buffer_head * bh)
232 {
233 if (!bh)
234 return;
235 if (bh == free_list) {
236 free_list = bh->b_next_free;
237 return;
238 }
239 remove_from_free_list(bh);
240
241 bh->b_next_free = free_list;
242 bh->b_prev_free = free_list->b_prev_free;
243 free_list->b_prev_free->b_next_free = bh;
244 free_list->b_prev_free = bh;
245 }
246
247 static inline void insert_into_queues(struct buffer_head * bh)
248 {
249
250 bh->b_next_free = free_list;
251 bh->b_prev_free = free_list->b_prev_free;
252 free_list->b_prev_free->b_next_free = bh;
253 free_list->b_prev_free = bh;
254
255 bh->b_prev = NULL;
256 bh->b_next = NULL;
257 if (!bh->b_dev)
258 return;
259 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
260 hash(bh->b_dev,bh->b_blocknr) = bh;
261 if (bh->b_next)
262 bh->b_next->b_prev = bh;
263 }
264
265 static struct buffer_head * find_buffer(dev_t dev, int block, int size)
266 {
267 struct buffer_head * tmp;
268
269 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
270 if (tmp->b_dev==dev && tmp->b_blocknr==block)
271 if (tmp->b_size == size)
272 return tmp;
273 else {
274 printk("VFS: Wrong blocksize on device %d/%d\n",
275 MAJOR(dev), MINOR(dev));
276 return NULL;
277 }
278 return NULL;
279 }
280
281
282
283
284
285
286
287
288 struct buffer_head * get_hash_table(dev_t dev, int block, int size)
289 {
290 struct buffer_head * bh;
291
292 for (;;) {
293 if (!(bh=find_buffer(dev,block,size)))
294 return NULL;
295 bh->b_count++;
296 wait_on_buffer(bh);
297 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size)
298 return bh;
299 bh->b_count--;
300 }
301 }
302
303
304
305
306
307
308
309
310
311
312
313 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
314 struct buffer_head * getblk(dev_t dev, int block, int size)
315 {
316 struct buffer_head * bh, * tmp;
317 int buffers;
318 static int grow_size = 0;
319
320 repeat:
321 bh = get_hash_table(dev, block, size);
322 if (bh) {
323 if (bh->b_uptodate && !bh->b_dirt)
324 put_last_free(bh);
325 return bh;
326 }
327 grow_size -= size;
328 if (nr_free_pages > min_free_pages &&
329 buffermem < 6*1024*1024 &&
330 grow_size <= 0) {
331 grow_buffers(size);
332 grow_size = 4096;
333 }
334 buffers = nr_buffers;
335 bh = NULL;
336
337 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
338 if (tmp->b_count || tmp->b_size != size)
339 continue;
340 if (mem_map[MAP_NR((unsigned long) tmp->b_data)] != 1)
341 continue;
342 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
343 bh = tmp;
344 if (!BADNESS(tmp))
345 break;
346 }
347 #if 0
348 if (tmp->b_dirt)
349 ll_rw_block(WRITEA, 1, &tmp);
350 #endif
351 }
352
353 if (!bh && nr_free_pages > 5) {
354 grow_buffers(size);
355 goto repeat;
356 }
357
358
359 if (!bh) {
360 sleep_on(&buffer_wait);
361 goto repeat;
362 }
363 wait_on_buffer(bh);
364 if (bh->b_count || bh->b_size != size)
365 goto repeat;
366 if (bh->b_dirt) {
367 sync_buffers(0);
368 goto repeat;
369 }
370
371
372 if (find_buffer(dev,block,size))
373 goto repeat;
374
375
376 bh->b_count=1;
377 bh->b_dirt=0;
378 bh->b_uptodate=0;
379 remove_from_queues(bh);
380 bh->b_dev=dev;
381 bh->b_blocknr=block;
382 insert_into_queues(bh);
383 return bh;
384 }
385
386 void brelse(struct buffer_head * buf)
387 {
388 if (!buf)
389 return;
390 wait_on_buffer(buf);
391 if (buf->b_count) {
392 if (--buf->b_count)
393 return;
394 wake_up(&buffer_wait);
395 return;
396 }
397 printk("VFS: brelse: Trying to free free buffer\n");
398 }
399
400
401
402
403
404 struct buffer_head * bread(dev_t dev, int block, int size)
405 {
406 struct buffer_head * bh;
407
408 if (!(bh = getblk(dev, block, size))) {
409 printk("VFS: bread: READ error on device %d/%d\n",
410 MAJOR(dev), MINOR(dev));
411 return NULL;
412 }
413 if (bh->b_uptodate)
414 return bh;
415 ll_rw_block(READ, 1, &bh);
416 wait_on_buffer(bh);
417 if (bh->b_uptodate)
418 return bh;
419 brelse(bh);
420 return NULL;
421 }
422
423
424
425
426
427
428 struct buffer_head * breada(dev_t dev,int first, ...)
429 {
430 va_list args;
431 struct buffer_head * bh, *tmp;
432
433 va_start(args,first);
434 if (!(bh = getblk(dev, first, 1024))) {
435 printk("VFS: breada: READ error on device %d/%d\n",
436 MAJOR(dev), MINOR(dev));
437 return NULL;
438 }
439 if (!bh->b_uptodate)
440 ll_rw_block(READ, 1, &bh);
441 while ((first=va_arg(args,int))>=0) {
442 tmp = getblk(dev, first, 1024);
443 if (tmp) {
444 if (!tmp->b_uptodate)
445 ll_rw_block(READA, 1, &tmp);
446 tmp->b_count--;
447 }
448 }
449 va_end(args);
450 wait_on_buffer(bh);
451 if (bh->b_uptodate)
452 return bh;
453 brelse(bh);
454 return (NULL);
455 }
456
457
458
459
460 static void put_unused_buffer_head(struct buffer_head * bh)
461 {
462 struct wait_queue * wait;
463
464 wait = ((volatile struct buffer_head *) bh)->b_wait;
465 memset((void *) bh,0,sizeof(*bh));
466 ((volatile struct buffer_head *) bh)->b_wait = wait;
467 bh->b_next_free = unused_list;
468 unused_list = bh;
469 }
470
471 static void get_more_buffer_heads(void)
472 {
473 unsigned long page;
474 struct buffer_head * bh;
475
476 if (unused_list)
477 return;
478 page = get_free_page(GFP_KERNEL);
479 if (!page)
480 return;
481 bh = (struct buffer_head *) page;
482 while ((unsigned long) (bh+1) <= page+4096) {
483 put_unused_buffer_head(bh);
484 bh++;
485 nr_buffer_heads++;
486 }
487 }
488
489 static struct buffer_head * get_unused_buffer_head(void)
490 {
491 struct buffer_head * bh;
492
493 get_more_buffer_heads();
494 if (!unused_list)
495 return NULL;
496 bh = unused_list;
497 unused_list = bh->b_next_free;
498 bh->b_next_free = NULL;
499 bh->b_data = NULL;
500 bh->b_size = 0;
501 return bh;
502 }
503
504 static inline unsigned long try_to_share_buffers(unsigned long address, dev_t dev, int b[], int size)
505 {
506 return 0;
507 }
508
509 #define COPYBLK(from,to) \
510 __asm__ __volatile__("rep ; movsl" \
511 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
512 :"cx","di","si")
513
514
515
516
517
518
519
520
521 unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
522 {
523 struct buffer_head * bh[4];
524 struct buffer_head * bhr[4];
525 unsigned long where;
526 int bhnum = 0;
527 int i;
528
529 if (!(prot & PAGE_RW)) {
530 where = try_to_share_buffers(address,dev,b,size);
531 if (where)
532 return where;
533 }
534 for (i=0 ; i<4 ; i++) {
535 bh[i] = NULL;
536 if (b[i]) {
537 bh[i] = getblk(dev, b[i], size);
538 if (bh[i] && !bh[i]->b_uptodate)
539 bhr[bhnum++] = bh[i];
540 }
541 }
542 if (bhnum)
543 ll_rw_block(READ, bhnum, bhr);
544 where = address;
545 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE) {
546 if (bh[i]) {
547 wait_on_buffer(bh[i]);
548 if (bh[i]->b_uptodate)
549 COPYBLK((unsigned long) bh[i]->b_data,address);
550 brelse(bh[i]);
551 }
552 }
553 return where;
554 }
555
556
557
558
559
560
561
562 void grow_buffers(int size)
563 {
564 unsigned long page;
565 int i;
566 struct buffer_head *bh, *tmp;
567
568 if ((size & 511) || (size > 4096)) {
569 printk("VFS: grow_buffers: size = %d\n",size);
570 return;
571 }
572 page = get_free_page(GFP_BUFFER);
573 if (!page)
574 return;
575 tmp = NULL;
576 i = 0;
577 for (i = 0 ; i+size <= 4096 ; i += size) {
578 bh = get_unused_buffer_head();
579 if (!bh)
580 goto no_grow;
581 bh->b_this_page = tmp;
582 tmp = bh;
583 bh->b_data = (char * ) (page+i);
584 bh->b_size = size;
585 }
586 tmp = bh;
587 while (1) {
588 if (free_list) {
589 tmp->b_next_free = free_list;
590 tmp->b_prev_free = free_list->b_prev_free;
591 free_list->b_prev_free->b_next_free = tmp;
592 free_list->b_prev_free = tmp;
593 } else {
594 tmp->b_prev_free = tmp;
595 tmp->b_next_free = tmp;
596 }
597 free_list = tmp;
598 ++nr_buffers;
599 if (tmp->b_this_page)
600 tmp = tmp->b_this_page;
601 else
602 break;
603 }
604 tmp->b_this_page = bh;
605 buffermem += 4096;
606 return;
607
608
609
610 no_grow:
611 bh = tmp;
612 while (bh) {
613 tmp = bh;
614 bh = bh->b_this_page;
615 put_unused_buffer_head(tmp);
616 }
617 free_page(page);
618 }
619
620
621
622
623
624 static int try_to_free(struct buffer_head * bh)
625 {
626 unsigned long page;
627 struct buffer_head * tmp, * p;
628
629 page = (unsigned long) bh->b_data;
630 page &= 0xfffff000;
631 if (mem_map[MAP_NR(page)] != 1)
632 return 0;
633 tmp = bh;
634 do {
635 if (!tmp)
636 return 0;
637 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
638 return 0;
639 tmp = tmp->b_this_page;
640 } while (tmp != bh);
641 tmp = bh;
642 do {
643 p = tmp;
644 tmp = tmp->b_this_page;
645 nr_buffers--;
646 remove_from_queues(p);
647 put_unused_buffer_head(p);
648 } while (tmp != bh);
649 buffermem -= 4096;
650 free_page(page);
651 return 1;
652 }
653
654
655
656
657
658
659
660
661 int shrink_buffers(unsigned int priority)
662 {
663 struct buffer_head *bh;
664 int i;
665
666 if (priority < 2)
667 sync_buffers(0);
668 bh = free_list;
669 i = nr_buffers >> priority;
670 for ( ; i-- > 0 ; bh = bh->b_next_free) {
671 if (bh->b_count || !bh->b_this_page)
672 continue;
673 if (bh->b_lock)
674 if (priority)
675 continue;
676 else
677 wait_on_buffer(bh);
678 if (bh->b_dirt) {
679 ll_rw_block(WRITEA, 1, &bh);
680 continue;
681 }
682 if (try_to_free(bh))
683 return 1;
684 }
685 return 0;
686 }
687
688
689
690
691
692
693
694
695 void buffer_init(void)
696 {
697 int i;
698
699 if (high_memory >= 4*1024*1024)
700 min_free_pages = 200;
701 else
702 min_free_pages = 20;
703 for (i = 0 ; i < NR_HASH ; i++)
704 hash_table[i] = NULL;
705 free_list = 0;
706 grow_buffers(BLOCK_SIZE);
707 if (!free_list)
708 panic("VFS: Unable to initialize buffer free list!");
709 return;
710 }