This source file includes following definitions.
- wait_on_buffer
- sync_buffers
- sys_sync
- sync_dev
- invalidate_buffers
- check_disk_change
- remove_from_hash_queue
- remove_from_free_list
- remove_from_queues
- put_first_free
- put_last_free
- insert_into_queues
- find_buffer
- get_hash_table
- getblk
- brelse
- bread
- bread_page
- breada
- put_unused_buffer_head
- get_more_buffer_heads
- get_unused_buffer_head
- grow_buffers
- try_to_free
- shrink_buffers
- buffer_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <stdarg.h>
22
23 #include <linux/config.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30
31 static struct buffer_head * hash_table[NR_HASH];
32 static struct buffer_head * free_list = NULL;
33 static struct buffer_head * unused_list = NULL;
34 static struct wait_queue * buffer_wait = NULL;
35
36 int nr_buffers = 0;
37 int nr_buffer_heads = 0;
38
39 static inline void wait_on_buffer(struct buffer_head * bh)
40 {
41 cli();
42 while (bh->b_lock)
43 sleep_on(&bh->b_wait);
44 sti();
45 }
46
47 static void sync_buffers(int dev)
48 {
49 int i;
50 struct buffer_head * bh;
51
52 bh = free_list;
53 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
54 if (bh->b_lock)
55 continue;
56 if (!bh->b_dirt)
57 continue;
58 ll_rw_block(WRITE,bh);
59 }
60 }
61
62 int sys_sync(void)
63 {
64 int i;
65
66 for (i=0 ; i<NR_SUPER ; i++)
67 if (super_block[i].s_dev
68 && super_block[i].s_op
69 && super_block[i].s_op->write_super
70 && super_block[i].s_dirt)
71 super_block[i].s_op->write_super(&super_block[i]);
72 sync_inodes();
73 sync_buffers(0);
74 return 0;
75 }
76
77 int sync_dev(int dev)
78 {
79 struct super_block * sb;
80
81 if (sb = get_super (dev))
82 if (sb->s_op && sb->s_op->write_super && sb->s_dirt)
83 sb->s_op->write_super (sb);
84 sync_buffers(dev);
85 sync_inodes();
86 sync_buffers(dev);
87 return 0;
88 }
89
90 void inline invalidate_buffers(int dev)
91 {
92 int i;
93 struct buffer_head * bh;
94
95 bh = free_list;
96 for (i = nr_buffers*2 ; --i > 0 ; bh = bh->b_next_free) {
97 if (bh->b_dev != dev)
98 continue;
99 wait_on_buffer(bh);
100 if (bh->b_dev == dev)
101 bh->b_uptodate = bh->b_dirt = 0;
102 }
103 }
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119 void check_disk_change(int dev)
120 {
121 int i;
122 struct buffer_head * bh;
123
124 if (MAJOR(dev) != 2)
125 return;
126 if (!(bh = getblk(dev,0,1024)))
127 return;
128 i = floppy_change(bh);
129 brelse(bh);
130 if (!i)
131 return;
132 for (i=0 ; i<NR_SUPER ; i++)
133 if (super_block[i].s_dev == dev)
134 put_super(super_block[i].s_dev);
135 invalidate_inodes(dev);
136 invalidate_buffers(dev);
137 }
138
139 #define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
140 #define hash(dev,block) hash_table[_hashfn(dev,block)]
141
142 static inline void remove_from_hash_queue(struct buffer_head * bh)
143 {
144 if (bh->b_next)
145 bh->b_next->b_prev = bh->b_prev;
146 if (bh->b_prev)
147 bh->b_prev->b_next = bh->b_next;
148 if (hash(bh->b_dev,bh->b_blocknr) == bh)
149 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
150 bh->b_next = bh->b_prev = NULL;
151 }
152
153 static inline void remove_from_free_list(struct buffer_head * bh)
154 {
155 if (!(bh->b_prev_free) || !(bh->b_next_free))
156 panic("Free block list corrupted");
157 bh->b_prev_free->b_next_free = bh->b_next_free;
158 bh->b_next_free->b_prev_free = bh->b_prev_free;
159 if (free_list == bh)
160 free_list = bh->b_next_free;
161 bh->b_next_free = bh->b_prev_free = NULL;
162 }
163
164 static inline void remove_from_queues(struct buffer_head * bh)
165 {
166 remove_from_hash_queue(bh);
167 remove_from_free_list(bh);
168 }
169
170 static inline void put_first_free(struct buffer_head * bh)
171 {
172 if (!bh || (bh == free_list))
173 return;
174 remove_from_free_list(bh);
175
176 bh->b_next_free = free_list;
177 bh->b_prev_free = free_list->b_prev_free;
178 free_list->b_prev_free->b_next_free = bh;
179 free_list->b_prev_free = bh;
180 free_list = bh;
181 }
182
183 static inline void put_last_free(struct buffer_head * bh)
184 {
185 if (!bh)
186 return;
187 if (bh == free_list) {
188 free_list = bh->b_next_free;
189 return;
190 }
191 remove_from_free_list(bh);
192
193 bh->b_next_free = free_list;
194 bh->b_prev_free = free_list->b_prev_free;
195 free_list->b_prev_free->b_next_free = bh;
196 free_list->b_prev_free = bh;
197 }
198
199 static inline void insert_into_queues(struct buffer_head * bh)
200 {
201
202 bh->b_next_free = free_list;
203 bh->b_prev_free = free_list->b_prev_free;
204 free_list->b_prev_free->b_next_free = bh;
205 free_list->b_prev_free = bh;
206
207 bh->b_prev = NULL;
208 bh->b_next = NULL;
209 if (!bh->b_dev)
210 return;
211 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
212 hash(bh->b_dev,bh->b_blocknr) = bh;
213 if (bh->b_next)
214 bh->b_next->b_prev = bh;
215 }
216
217 static struct buffer_head * find_buffer(int dev, int block, int size)
218 {
219 struct buffer_head * tmp;
220
221 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
222 if (tmp->b_dev==dev && tmp->b_blocknr==block)
223 if (tmp->b_size == size)
224 return tmp;
225 else {
226 printk("wrong block-size on device %04x\n",dev);
227 return NULL;
228 }
229 return NULL;
230 }
231
232
233
234
235
236
237
238
239 struct buffer_head * get_hash_table(int dev, int block, int size)
240 {
241 struct buffer_head * bh;
242
243 for (;;) {
244 if (!(bh=find_buffer(dev,block,size)))
245 return NULL;
246 bh->b_count++;
247 wait_on_buffer(bh);
248 if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) {
249 put_last_free(bh);
250 return bh;
251 }
252 bh->b_count--;
253 }
254 }
255
256
257
258
259
260
261
262
263
264
265
266 #define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
267 struct buffer_head * getblk(int dev, int block, int size)
268 {
269 struct buffer_head * bh, * tmp;
270 int buffers;
271
272 repeat:
273 if (bh = get_hash_table(dev, block, size))
274 return bh;
275
276 if (nr_free_pages > 30)
277 grow_buffers(size);
278
279 buffers = nr_buffers;
280 bh = NULL;
281
282 for (tmp = free_list; buffers-- > 0 ; tmp = tmp->b_next_free) {
283 if (tmp->b_count || tmp->b_size != size)
284 continue;
285 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
286 bh = tmp;
287 if (!BADNESS(tmp))
288 break;
289 }
290 #if 0
291 if (tmp->b_dirt)
292 ll_rw_block(WRITEA,tmp);
293 #endif
294 }
295
296 if (!bh && nr_free_pages > 5) {
297 grow_buffers(size);
298 goto repeat;
299 }
300
301
302 if (!bh) {
303 sleep_on(&buffer_wait);
304 goto repeat;
305 }
306 wait_on_buffer(bh);
307 if (bh->b_count || bh->b_size != size)
308 goto repeat;
309 if (bh->b_dirt) {
310 sync_buffers(bh->b_dev);
311 goto repeat;
312 }
313
314
315 if (find_buffer(dev,block,size))
316 goto repeat;
317
318
319 bh->b_count=1;
320 bh->b_dirt=0;
321 bh->b_uptodate=0;
322 remove_from_queues(bh);
323 bh->b_dev=dev;
324 bh->b_blocknr=block;
325 insert_into_queues(bh);
326 return bh;
327 }
328
329 void brelse(struct buffer_head * buf)
330 {
331 if (!buf)
332 return;
333 wait_on_buffer(buf);
334 if (!(buf->b_count--))
335 panic("Trying to free free buffer");
336 wake_up(&buffer_wait);
337 }
338
339
340
341
342
343 struct buffer_head * bread(int dev, int block, int size)
344 {
345 struct buffer_head * bh;
346
347 if (!(bh = getblk(dev, block, size))) {
348 printk("bread: getblk returned NULL\n");
349 return NULL;
350 }
351 if (bh->b_uptodate)
352 return bh;
353 ll_rw_block(READ,bh);
354 wait_on_buffer(bh);
355 if (bh->b_uptodate)
356 return bh;
357 brelse(bh);
358 return NULL;
359 }
360
361 #define COPYBLK(from,to) \
362 __asm__("cld\n\t" \
363 "rep\n\t" \
364 "movsl\n\t" \
365 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
366 :"cx","di","si")
367
368
369
370
371
372
373
374 void bread_page(unsigned long address,int dev,int b[4])
375 {
376 struct buffer_head * bh[4];
377 int i;
378
379 for (i=0 ; i<4 ; i++)
380 if (b[i]) {
381 if (bh[i] = getblk(dev, b[i], 1024))
382 if (!bh[i]->b_uptodate)
383 ll_rw_block(READ,bh[i]);
384 } else
385 bh[i] = NULL;
386 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
387 if (bh[i]) {
388 wait_on_buffer(bh[i]);
389 if (bh[i]->b_uptodate)
390 COPYBLK((unsigned long) bh[i]->b_data,address);
391 brelse(bh[i]);
392 }
393 }
394
395
396
397
398
399
400 struct buffer_head * breada(int dev,int first, ...)
401 {
402 va_list args;
403 struct buffer_head * bh, *tmp;
404
405 va_start(args,first);
406 if (!(bh = getblk(dev, first, 1024))) {
407 printk("breada: getblk returned NULL\n");
408 return NULL;
409 }
410 if (!bh->b_uptodate)
411 ll_rw_block(READ,bh);
412 while ((first=va_arg(args,int))>=0) {
413 tmp = getblk(dev, first, 1024);
414 if (tmp) {
415 if (!tmp->b_uptodate)
416 ll_rw_block(READA,tmp);
417 tmp->b_count--;
418 }
419 }
420 va_end(args);
421 wait_on_buffer(bh);
422 if (bh->b_uptodate)
423 return bh;
424 brelse(bh);
425 return (NULL);
426 }
427
428 static void put_unused_buffer_head(struct buffer_head * bh)
429 {
430 memset((void *) bh,0,sizeof(*bh));
431 bh->b_next_free = unused_list;
432 unused_list = bh;
433 }
434
435 static void get_more_buffer_heads(void)
436 {
437 unsigned long page;
438 struct buffer_head * bh;
439
440 if (unused_list)
441 return;
442 page = get_free_page(GFP_KERNEL);
443 if (!page)
444 return;
445 bh = (struct buffer_head *) page;
446 while ((unsigned long) (bh+1) <= page+4096) {
447 put_unused_buffer_head(bh);
448 bh++;
449 nr_buffer_heads++;
450 }
451 }
452
453 static struct buffer_head * get_unused_buffer_head(void)
454 {
455 struct buffer_head * bh;
456
457 get_more_buffer_heads();
458 if (!unused_list)
459 return NULL;
460 bh = unused_list;
461 unused_list = bh->b_next_free;
462 bh->b_next_free = NULL;
463 bh->b_data = NULL;
464 bh->b_size = 0;
465 return bh;
466 }
467
468
469
470
471
472
473
474 void grow_buffers(int size)
475 {
476 unsigned long page;
477 int i;
478 struct buffer_head *bh, *tmp;
479
480 if ((size & 511) || (size > 4096)) {
481 printk("grow_buffers: size = %d\n",size);
482 return;
483 }
484 page = get_free_page(GFP_BUFFER);
485 if (!page)
486 return;
487 tmp = NULL;
488 i = 0;
489 for (i = 0 ; i+size <= 4096 ; i += size) {
490 bh = get_unused_buffer_head();
491 if (!bh)
492 goto no_grow;
493 bh->b_this_page = tmp;
494 tmp = bh;
495 bh->b_data = (char * ) (page+i);
496 bh->b_size = size;
497 i += size;
498 }
499 tmp = bh;
500 while (1) {
501 tmp->b_next_free = free_list;
502 tmp->b_prev_free = free_list->b_prev_free;
503 free_list->b_prev_free->b_next_free = tmp;
504 free_list->b_prev_free = tmp;
505 free_list = tmp;
506 ++nr_buffers;
507 if (tmp->b_this_page)
508 tmp = tmp->b_this_page;
509 else
510 break;
511 }
512 tmp->b_this_page = bh;
513 return;
514
515
516
517 no_grow:
518 bh = tmp;
519 while (bh) {
520 tmp = bh;
521 bh = bh->b_this_page;
522 put_unused_buffer_head(tmp);
523 }
524 free_page(page);
525 }
526
527
528
529
530
531 static int try_to_free(struct buffer_head * bh)
532 {
533 unsigned long page;
534 struct buffer_head * tmp, * p;
535
536 tmp = bh;
537 do {
538 if (!tmp)
539 return 0;
540 if (tmp->b_count || tmp->b_dirt || tmp->b_lock)
541 return 0;
542 tmp = tmp->b_this_page;
543 } while (tmp != bh);
544 page = (unsigned long) bh->b_data;
545 page &= 0xfffff000;
546 tmp = bh;
547 do {
548 p = tmp;
549 tmp = tmp->b_this_page;
550 nr_buffers--;
551 remove_from_queues(p);
552 put_unused_buffer_head(p);
553 } while (tmp != bh);
554 free_page(page);
555 return 1;
556 }
557
558
559
560
561 int shrink_buffers(void)
562 {
563 struct buffer_head *bh;
564 int i;
565
566 bh = free_list;
567 for (i = nr_buffers*2 ; i-- > 0 ; bh = bh->b_next_free) {
568 wait_on_buffer(bh);
569 if (bh->b_count || !bh->b_this_page)
570 continue;
571 if (bh->b_dirt) {
572 ll_rw_block(WRITEA,bh);
573 continue;
574 }
575 if (try_to_free(bh))
576 return 1;
577 }
578 return 0;
579 }
580
581
582
583
584
585
586
587
588
589
590 void buffer_init(void)
591 {
592 struct buffer_head * bh;
593 extern int end;
594 unsigned long mem;
595 int i;
596
597 for (i = 0 ; i < NR_HASH ; i++)
598 hash_table[i] = NULL;
599 mem = (unsigned long) & end;
600 mem += BLOCK_SIZE-1;
601 mem &= ~(BLOCK_SIZE-1);
602 free_list = get_unused_buffer_head();
603 if (!free_list)
604 panic("unable to get a single buffer-head");
605 free_list->b_prev_free = free_list;
606 free_list->b_next_free = free_list;
607 free_list->b_data = (char *) mem;
608 free_list->b_size = BLOCK_SIZE;
609 mem += BLOCK_SIZE;
610 while (mem + 1024 < 0xA0000) {
611 bh = get_unused_buffer_head();
612 if (!bh)
613 break;
614 bh->b_data = (char *) mem;
615 bh->b_size = BLOCK_SIZE;
616 mem += BLOCK_SIZE;
617 bh->b_next_free = free_list;
618 bh->b_prev_free = free_list->b_prev_free;
619 free_list->b_prev_free->b_next_free = bh;
620 free_list->b_prev_free = bh;
621 free_list = bh;
622 ++nr_buffers;
623 }
624 return;
625 }