This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33
34 DECLARE_TASK_QUEUE(tq_disk);
35
36
37
38
39 struct wait_queue * wait_for_request = NULL;
40
41
42
43 int read_ahead[MAX_BLKDEV] = {0, };
44
45
46
47
48
49 struct blk_dev_struct blk_dev[MAX_BLKDEV];
50
51
52
53
54
55
56
57
58
59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
60
61
62
63
64
65
66
67
68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
69
70
71
72
73
74
75
76
77
78
79
80
81
82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
83
84
85
86
87 static void unplug_device(void * data)
88 {
89 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
90 unsigned long flags;
91
92 save_flags(flags);
93 cli();
94 dev->current_request = dev->plug.next;
95 dev->plug.next = NULL;
96 (dev->request_fn)();
97 restore_flags(flags);
98 }
99
100
101
102
103
104
105
106
107 static inline void plug_device(struct blk_dev_struct * dev)
108 {
109 dev->current_request = &dev->plug;
110 queue_task_irq_off(&dev->plug_tq, &tq_disk);
111 }
112
113
114
115
116
117
118 static inline struct request * get_request(int n, kdev_t dev)
119 {
120 static struct request *prev_found = NULL, *prev_limit = NULL;
121 register struct request *req, *limit;
122
123 if (n <= 0)
124 panic("get_request(%d): impossible!\n", n);
125
126 limit = all_requests + n;
127 if (limit != prev_limit) {
128 prev_limit = limit;
129 prev_found = all_requests;
130 }
131 req = prev_found;
132 for (;;) {
133 req = ((req > all_requests) ? req : limit) - 1;
134 if (req->rq_status == RQ_INACTIVE)
135 break;
136 if (req == prev_found)
137 return NULL;
138 }
139 prev_found = req;
140 req->rq_status = RQ_ACTIVE;
141 req->rq_dev = dev;
142 return req;
143 }
144
145
146
147
148 static struct request * __get_request_wait(int n, kdev_t dev)
149 {
150 register struct request *req;
151 struct wait_queue wait = { current, NULL };
152
153 add_wait_queue(&wait_for_request, &wait);
154 for (;;) {
155 current->state = TASK_UNINTERRUPTIBLE;
156 cli();
157 req = get_request(n, dev);
158 sti();
159 if (req)
160 break;
161 run_task_queue(&tq_disk);
162 schedule();
163 }
164 remove_wait_queue(&wait_for_request, &wait);
165 current->state = TASK_RUNNING;
166 return req;
167 }
168
169 static inline struct request * get_request_wait(int n, kdev_t dev)
170 {
171 register struct request *req;
172
173 cli();
174 req = get_request(n, dev);
175 sti();
176 if (req)
177 return req;
178 return __get_request_wait(n, dev);
179 }
180
181
182
183 static long ro_bits[MAX_BLKDEV][8];
184
185 int is_read_only(kdev_t dev)
186 {
187 int minor,major;
188
189 major = MAJOR(dev);
190 minor = MINOR(dev);
191 if (major < 0 || major >= MAX_BLKDEV) return 0;
192 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
193 }
194
195 void set_device_ro(kdev_t dev,int flag)
196 {
197 int minor,major;
198
199 major = MAJOR(dev);
200 minor = MINOR(dev);
201 if (major < 0 || major >= MAX_BLKDEV) return;
202 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
203 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
204 }
205
206 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
207 {
208 kstat.dk_drive[disk_index]++;
209 if (cmd == READ) {
210 kstat.dk_drive_rio[disk_index]++;
211 kstat.dk_drive_rblk[disk_index] += nr_sectors;
212 }
213 else if (cmd == WRITE) {
214 kstat.dk_drive_wio[disk_index]++;
215 kstat.dk_drive_wblk[disk_index] += nr_sectors;
216 } else
217 printk("drive_stat_acct: cmd not R/W?\n");
218 }
219
220
221
222
223
224
225
226
227
228
229 struct semaphore request_lock = MUTEX;
230
231 void add_request(struct blk_dev_struct * dev, struct request * req)
232 {
233 struct request * tmp;
234 short disk_index;
235
236 down (&request_lock);
237 switch (MAJOR(req->rq_dev)) {
238 case SCSI_DISK_MAJOR:
239 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
240 if (disk_index < 4)
241 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
242 break;
243 case IDE0_MAJOR:
244 case XT_DISK_MAJOR:
245 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
246 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
247 break;
248 case IDE1_MAJOR:
249 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
250 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
251 default:
252 break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 up (&request_lock);
262 (dev->request_fn)();
263 sti();
264 return;
265 }
266 for ( ; tmp->next ; tmp = tmp->next) {
267 if ((IN_ORDER(tmp,req) ||
268 !IN_ORDER(tmp,tmp->next)) &&
269 IN_ORDER(req,tmp->next))
270 break;
271 }
272 req->next = tmp->next;
273 tmp->next = req;
274
275 up (&request_lock);
276
277 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
278 (dev->request_fn)();
279
280 sti();
281 }
282
283 static void make_request(int major,int rw, struct buffer_head * bh)
284 {
285 unsigned int sector, count;
286 struct request * req;
287 int rw_ahead, max_req;
288
289 count = bh->b_size >> 9;
290 sector = bh->b_blocknr * count;
291 if (blk_size[major])
292 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
293 bh->b_state = 0;
294 printk("attempt to access beyond end of device\n");
295 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
296 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
297 return;
298 }
299
300 if (buffer_locked(bh))
301 return;
302
303 lock_buffer(bh);
304
305 rw_ahead = 0;
306 switch (rw) {
307 case READA:
308 rw_ahead = 1;
309 rw = READ;
310 case READ:
311 if (buffer_uptodate(bh)) {
312 unlock_buffer(bh);
313 return;
314 }
315 kstat.pgpgin++;
316 max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST;
317 break;
318 case WRITEA:
319 rw_ahead = 1;
320 rw = WRITE;
321 case WRITE:
322 if (!buffer_dirty(bh)) {
323 unlock_buffer(bh);
324 return;
325 }
326
327
328
329
330
331 kstat.pgpgout++;
332 max_req = (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
333 break;
334 default:
335 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
336 unlock_buffer(bh);
337 return;
338 }
339
340
341 down (&request_lock);
342
343
344
345
346 cli();
347 req = blk_dev[major].current_request;
348 if (!req) {
349
350 if (major != MD_MAJOR && major != LOOP_MAJOR)
351 plug_device(blk_dev + major);
352 } else switch (major) {
353 case IDE0_MAJOR:
354 case IDE1_MAJOR:
355 case FLOPPY_MAJOR:
356 case IDE2_MAJOR:
357 case IDE3_MAJOR:
358
359
360
361
362
363
364
365
366
367 req = req->next;
368 if (!req)
369 break;
370
371
372 case SCSI_DISK_MAJOR:
373 case SCSI_CDROM_MAJOR:
374 case MD_MAJOR:
375
376 do {
377 if (req->sem)
378 continue;
379 if (req->cmd != rw)
380 continue;
381 if (req->nr_sectors >= 244)
382 continue;
383 if (req->rq_dev != bh->b_dev)
384 continue;
385
386 if (req->sector + req->nr_sectors == sector) {
387 req->bhtail->b_reqnext = bh;
388 req->bhtail = bh;
389
390 } else if (req->sector - count == sector) {
391 bh->b_reqnext = req->bh;
392 req->bh = bh;
393 req->buffer = bh->b_data;
394 req->current_nr_sectors = count;
395 req->sector = sector;
396 } else
397 continue;
398
399 req->nr_sectors += count;
400 mark_buffer_clean(bh);
401 up (&request_lock);
402 sti();
403 return;
404 } while ((req = req->next) != NULL);
405 }
406
407 up (&request_lock);
408
409
410 req = get_request(max_req, bh->b_dev);
411 sti();
412
413
414 if (!req) {
415 if (rw_ahead) {
416 unlock_buffer(bh);
417 return;
418 }
419 req = __get_request_wait(max_req, bh->b_dev);
420 }
421
422
423 req->cmd = rw;
424 req->errors = 0;
425 req->sector = sector;
426 req->nr_sectors = count;
427 req->current_nr_sectors = count;
428 req->buffer = bh->b_data;
429 req->sem = NULL;
430 req->bh = bh;
431 req->bhtail = bh;
432 req->next = NULL;
433 add_request(major+blk_dev,req);
434 }
435
436 #ifdef CONFIG_BLK_DEV_MD
437
438 struct request *get_md_request (int max_req, kdev_t dev)
439 {
440 return (get_request_wait (max_req, dev));
441 }
442
443 #endif
444
445
446
447
448
449
450 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
451 {
452 unsigned int major = MAJOR(dev);
453 int block = page;
454
455 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
456 printk("Trying to read nonexistent block-device %s (%ld)\n",
457 kdevname(dev), page);
458 return;
459 }
460 switch (rw) {
461 case READ:
462 break;
463 case WRITE:
464 if (is_read_only(dev)) {
465 printk("Can't page to read-only device %s\n",
466 kdevname(dev));
467 return;
468 }
469 break;
470 default:
471 panic("ll_rw_page: bad block dev cmd, must be R/W");
472 }
473 if (set_bit(PG_locked, &mem_map[MAP_NR(buffer)].flags))
474 panic ("ll_rw_page: page already locked");
475 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
476 }
477
478
479
480
481
482 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
483 {
484 unsigned int major;
485 int correct_size;
486 struct blk_dev_struct * dev;
487 int i;
488
489
490 while (!*bh) {
491 bh++;
492 if (--nr <= 0)
493 return;
494 };
495
496 dev = NULL;
497 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
498 dev = blk_dev + major;
499 if (!dev || !dev->request_fn) {
500 printk(
501 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
502 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
503 goto sorry;
504 }
505
506
507 correct_size = BLOCK_SIZE;
508 if (blksize_size[major]) {
509 i = blksize_size[major][MINOR(bh[0]->b_dev)];
510 if (i)
511 correct_size = i;
512 }
513
514
515 for (i = 0; i < nr; i++) {
516 if (bh[i] && bh[i]->b_size != correct_size) {
517 printk("ll_rw_block: device %s: "
518 "only %d-char blocks implemented (%lu)\n",
519 kdevname(bh[0]->b_dev),
520 correct_size, bh[i]->b_size);
521 goto sorry;
522 }
523 }
524
525 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
526 printk("Can't write to read-only device %s\n",
527 kdevname(bh[0]->b_dev));
528 goto sorry;
529 }
530
531 for (i = 0; i < nr; i++) {
532 if (bh[i]) {
533 set_bit(BH_Req, &bh[i]->b_state);
534
535
536 bh[i]->b_rdev = bh[i]->b_dev;
537
538 make_request(major, rw, bh[i]);
539 }
540 }
541 return;
542
543 sorry:
544 for (i = 0; i < nr; i++) {
545 if (bh[i]) {
546 clear_bit(BH_Dirty, &bh[i]->b_state);
547 clear_bit(BH_Uptodate, &bh[i]->b_state);
548 }
549 }
550 return;
551 }
552
553 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
554 {
555 int i, j;
556 int buffersize;
557 struct request * req[8];
558 unsigned int major = MAJOR(dev);
559 struct semaphore sem = MUTEX_LOCKED;
560
561 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
562 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
563 return;
564 }
565 switch (rw) {
566 case READ:
567 break;
568 case WRITE:
569 if (is_read_only(dev)) {
570 printk("Can't swap to read-only device %s\n",
571 kdevname(dev));
572 return;
573 }
574 break;
575 default:
576 panic("ll_rw_swap: bad block dev cmd, must be R/W");
577 }
578 buffersize = PAGE_SIZE / nb;
579
580 for (j=0, i=0; i<nb;)
581 {
582 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
583 {
584 if (j == 0) {
585 req[j] = get_request_wait(NR_REQUEST, dev);
586 } else {
587 cli();
588 req[j] = get_request(NR_REQUEST, dev);
589 sti();
590 if (req[j] == NULL)
591 break;
592 }
593 req[j]->cmd = rw;
594 req[j]->errors = 0;
595 req[j]->sector = (b[i] * buffersize) >> 9;
596 req[j]->nr_sectors = buffersize >> 9;
597 req[j]->current_nr_sectors = buffersize >> 9;
598 req[j]->buffer = buf;
599 req[j]->sem = &sem;
600 req[j]->bh = NULL;
601 req[j]->next = NULL;
602 add_request(major+blk_dev,req[j]);
603 }
604 run_task_queue(&tq_disk);
605 while (j > 0) {
606 j--;
607 down(&sem);
608 }
609 }
610 }
611
612 int blk_dev_init(void)
613 {
614 struct request * req;
615 struct blk_dev_struct *dev;
616
617 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
618 dev->request_fn = NULL;
619 dev->current_request = NULL;
620 dev->plug.rq_status = RQ_INACTIVE;
621 dev->plug.cmd = -1;
622 dev->plug.next = NULL;
623 dev->plug_tq.routine = &unplug_device;
624 dev->plug_tq.data = dev;
625 }
626
627 req = all_requests + NR_REQUEST;
628 while (--req >= all_requests) {
629 req->rq_status = RQ_INACTIVE;
630 req->next = NULL;
631 }
632 memset(ro_bits,0,sizeof(ro_bits));
633 #ifdef CONFIG_BLK_DEV_RAM
634 rd_init();
635 #endif
636 #ifdef CONFIG_BLK_DEV_LOOP
637 loop_init();
638 #endif
639 #ifdef CONFIG_BLK_DEV_IDE
640 ide_init();
641 #endif
642 #ifdef CONFIG_BLK_DEV_HD
643 hd_init();
644 #endif
645 #ifdef CONFIG_BLK_DEV_XD
646 xd_init();
647 #endif
648 #ifdef CONFIG_BLK_DEV_FD
649 floppy_init();
650 #else
651 outb_p(0xc, 0x3f2);
652 #endif
653 #ifdef CONFIG_CDI_INIT
654 cdi_init();
655 #endif CONFIG_CDI_INIT
656 #ifdef CONFIG_CDU31A
657 cdu31a_init();
658 #endif CONFIG_CDU31A
659 #ifdef CONFIG_MCD
660 mcd_init();
661 #endif CONFIG_MCD
662 #ifdef CONFIG_MCDX
663 mcdx_init();
664 #endif CONFIG_MCDX
665 #ifdef CONFIG_SBPCD
666 sbpcd_init();
667 #endif CONFIG_SBPCD
668 #ifdef CONFIG_AZTCD
669 aztcd_init();
670 #endif CONFIG_AZTCD
671 #ifdef CONFIG_CDU535
672 sony535_init();
673 #endif CONFIG_CDU535
674 #ifdef CONFIG_GSCD
675 gscd_init();
676 #endif CONFIG_GSCD
677 #ifdef CONFIG_CM206
678 cm206_init();
679 #endif
680 #ifdef CONFIG_OPTCD
681 optcd_init();
682 #endif CONFIG_OPTCD
683 #ifdef CONFIG_SJCD
684 sjcd_init();
685 #endif CONFIG_SJCD
686 #ifdef CONFIG_BLK_DEV_MD
687 md_init();
688 #endif CONFIG_BLK_DEV_MD
689 return 0;
690 }