This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33
34 DECLARE_TASK_QUEUE(tq_disk);
35
36
37
38
39 struct wait_queue * wait_for_request = NULL;
40
41
42
43 int read_ahead[MAX_BLKDEV] = {0, };
44
45
46
47
48
49 struct blk_dev_struct blk_dev[MAX_BLKDEV];
50
51
52
53
54
55
56
57
58
59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
60
61
62
63
64
65
66
67
68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
69
70
71
72
73
74
75
76
77
78
79
80
81
82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
83
84
85
86
87 void unplug_device(void * data)
88 {
89 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
90 unsigned long flags;
91
92 save_flags(flags);
93 cli();
94 if (dev->current_request == &dev->plug) {
95 dev->current_request = dev->plug.next;
96 dev->plug.next = NULL;
97 if (dev->current_request)
98 (dev->request_fn)();
99 }
100 restore_flags(flags);
101 }
102
103
104
105
106
107
108
109
110 static inline void plug_device(struct blk_dev_struct * dev)
111 {
112 dev->current_request = &dev->plug;
113 queue_task_irq_off(&dev->plug_tq, &tq_disk);
114 }
115
116
117
118
119
120
121 static inline struct request * get_request(int n, kdev_t dev)
122 {
123 static struct request *prev_found = NULL, *prev_limit = NULL;
124 register struct request *req, *limit;
125
126 if (n <= 0)
127 panic("get_request(%d): impossible!\n", n);
128
129 limit = all_requests + n;
130 if (limit != prev_limit) {
131 prev_limit = limit;
132 prev_found = all_requests;
133 }
134 req = prev_found;
135 for (;;) {
136 req = ((req > all_requests) ? req : limit) - 1;
137 if (req->rq_status == RQ_INACTIVE)
138 break;
139 if (req == prev_found)
140 return NULL;
141 }
142 prev_found = req;
143 req->rq_status = RQ_ACTIVE;
144 req->rq_dev = dev;
145 return req;
146 }
147
148
149
150
151 static struct request * __get_request_wait(int n, kdev_t dev)
152 {
153 register struct request *req;
154 struct wait_queue wait = { current, NULL };
155
156 add_wait_queue(&wait_for_request, &wait);
157 for (;;) {
158 current->state = TASK_UNINTERRUPTIBLE;
159 cli();
160 req = get_request(n, dev);
161 sti();
162 if (req)
163 break;
164 run_task_queue(&tq_disk);
165 schedule();
166 }
167 remove_wait_queue(&wait_for_request, &wait);
168 current->state = TASK_RUNNING;
169 return req;
170 }
171
172 static inline struct request * get_request_wait(int n, kdev_t dev)
173 {
174 register struct request *req;
175
176 cli();
177 req = get_request(n, dev);
178 sti();
179 if (req)
180 return req;
181 return __get_request_wait(n, dev);
182 }
183
184
185
186 static long ro_bits[MAX_BLKDEV][8];
187
188 int is_read_only(kdev_t dev)
189 {
190 int minor,major;
191
192 major = MAJOR(dev);
193 minor = MINOR(dev);
194 if (major < 0 || major >= MAX_BLKDEV) return 0;
195 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
196 }
197
198 void set_device_ro(kdev_t dev,int flag)
199 {
200 int minor,major;
201
202 major = MAJOR(dev);
203 minor = MINOR(dev);
204 if (major < 0 || major >= MAX_BLKDEV) return;
205 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
206 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
207 }
208
209 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors,
210 short disk_index)
211 {
212 kstat.dk_drive[disk_index]++;
213 if (cmd == READ) {
214 kstat.dk_drive_rio[disk_index]++;
215 kstat.dk_drive_rblk[disk_index] += nr_sectors;
216 } else if (cmd == WRITE) {
217 kstat.dk_drive_wio[disk_index]++;
218 kstat.dk_drive_wblk[disk_index] += nr_sectors;
219 } else
220 printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
221 }
222
223
224
225
226
227
228
229
230
231
232 void add_request(struct blk_dev_struct * dev, struct request * req)
233 {
234 struct request * tmp;
235 short disk_index;
236
237 switch (MAJOR(req->rq_dev)) {
238 case SCSI_DISK_MAJOR:
239 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
240 if (disk_index < 4)
241 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
242 break;
243 case IDE0_MAJOR:
244 case XT_DISK_MAJOR:
245 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
246 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
247 break;
248 case IDE1_MAJOR:
249 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
250 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
251 default:
252 break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 (dev->request_fn)();
262 sti();
263 return;
264 }
265 for ( ; tmp->next ; tmp = tmp->next) {
266 if ((IN_ORDER(tmp,req) ||
267 !IN_ORDER(tmp,tmp->next)) &&
268 IN_ORDER(req,tmp->next))
269 break;
270 }
271 req->next = tmp->next;
272 tmp->next = req;
273
274
275 if (scsi_major(MAJOR(req->rq_dev)))
276 (dev->request_fn)();
277
278 sti();
279 }
280
281 static void make_request(int major,int rw, struct buffer_head * bh)
282 {
283 unsigned int sector, count;
284 struct request * req;
285 int rw_ahead, max_req;
286
287 count = bh->b_size >> 9;
288 sector = bh->b_rsector;
289 if (blk_size[major])
290 if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
291 bh->b_state = 0;
292
293
294
295 printk(KERN_INFO
296 "attempt to access beyond end of device\n");
297 printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
298 kdevname(bh->b_rdev), rw,
299 (sector + count)>>1,
300 blk_size[major][MINOR(bh->b_rdev)]);
301 return;
302 }
303
304 if (buffer_locked(bh))
305 return;
306
307 lock_buffer(bh);
308
309 rw_ahead = 0;
310 switch (rw) {
311 case READA:
312 rw_ahead = 1;
313 rw = READ;
314 case READ:
315 if (buffer_uptodate(bh)) {
316 unlock_buffer(bh);
317 return;
318 }
319 kstat.pgpgin++;
320 max_req = NR_REQUEST;
321 break;
322 case WRITEA:
323 rw_ahead = 1;
324 rw = WRITE;
325 case WRITE:
326 if (!buffer_dirty(bh)) {
327 unlock_buffer(bh);
328 return;
329 }
330
331
332
333
334
335 kstat.pgpgout++;
336 max_req = (NR_REQUEST * 2) / 3;
337 break;
338 default:
339 printk(KERN_ERR "make_request: bad block dev cmd,"
340 " must be R/W/RA/WA\n");
341 unlock_buffer(bh);
342 return;
343 }
344
345
346
347
348
349
350 cli();
351 req = blk_dev[major].current_request;
352 if (!req) {
353
354 if (major != MD_MAJOR && major != LOOP_MAJOR)
355 plug_device(blk_dev + major);
356 } else switch (major) {
357 case IDE0_MAJOR:
358 case IDE1_MAJOR:
359 case FLOPPY_MAJOR:
360 case IDE2_MAJOR:
361 case IDE3_MAJOR:
362
363
364
365
366
367
368
369
370
371 req = req->next;
372 if (!req)
373 break;
374
375
376 case SCSI_DISK_MAJOR:
377 case SCSI_CDROM_MAJOR:
378
379 do {
380 if (req->sem)
381 continue;
382 if (req->cmd != rw)
383 continue;
384 if (req->nr_sectors >= 244)
385 continue;
386 if (req->rq_dev != bh->b_rdev)
387 continue;
388
389 if (req->sector + req->nr_sectors == sector) {
390 req->bhtail->b_reqnext = bh;
391 req->bhtail = bh;
392
393 } else if (req->sector - count == sector) {
394 bh->b_reqnext = req->bh;
395 req->bh = bh;
396 req->buffer = bh->b_data;
397 req->current_nr_sectors = count;
398 req->sector = sector;
399 } else
400 continue;
401
402 req->nr_sectors += count;
403 mark_buffer_clean(bh);
404 sti();
405 return;
406 } while ((req = req->next) != NULL);
407 }
408
409
410 req = get_request(max_req, bh->b_rdev);
411 sti();
412
413
414 if (!req) {
415 if (rw_ahead) {
416 unlock_buffer(bh);
417 return;
418 }
419 req = __get_request_wait(max_req, bh->b_rdev);
420 }
421
422
423 req->cmd = rw;
424 req->errors = 0;
425 req->sector = sector;
426 req->nr_sectors = count;
427 req->current_nr_sectors = count;
428 req->buffer = bh->b_data;
429 req->sem = NULL;
430 req->bh = bh;
431 req->bhtail = bh;
432 req->next = NULL;
433 add_request(major+blk_dev,req);
434 }
435
436
437
438
439
440 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
441 {
442 unsigned int major;
443 int correct_size;
444 struct blk_dev_struct * dev;
445 int i;
446
447
448 while (!*bh) {
449 bh++;
450 if (--nr <= 0)
451 return;
452 }
453
454 dev = NULL;
455 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
456 dev = blk_dev + major;
457 if (!dev || !dev->request_fn) {
458 printk(KERN_ERR
459 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
460 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
461 goto sorry;
462 }
463
464
465 correct_size = BLOCK_SIZE;
466 if (blksize_size[major]) {
467 i = blksize_size[major][MINOR(bh[0]->b_dev)];
468 if (i)
469 correct_size = i;
470 }
471
472
473 for (i = 0; i < nr; i++) {
474 if (bh[i] && bh[i]->b_size != correct_size) {
475 printk(KERN_NOTICE "ll_rw_block: device %s: "
476 "only %d-char blocks implemented (%lu)\n",
477 kdevname(bh[0]->b_dev),
478 correct_size, bh[i]->b_size);
479 goto sorry;
480 }
481
482
483 bh[i]->b_rdev = bh[i]->b_dev;
484 bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
485 #ifdef CONFIG_BLK_DEV_MD
486 if (major==MD_MAJOR &&
487 md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
488 &bh[i]->b_rsector, bh[i]->b_size >> 9))
489 goto sorry;
490 #endif
491 }
492
493 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
494 printk(KERN_NOTICE "Can't write to read-only device %s\n",
495 kdevname(bh[0]->b_dev));
496 goto sorry;
497 }
498
499 for (i = 0; i < nr; i++) {
500 if (bh[i]) {
501 set_bit(BH_Req, &bh[i]->b_state);
502
503 make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
504 }
505 }
506 return;
507
508 sorry:
509 for (i = 0; i < nr; i++) {
510 if (bh[i]) {
511 clear_bit(BH_Dirty, &bh[i]->b_state);
512 clear_bit(BH_Uptodate, &bh[i]->b_state);
513 }
514 }
515 return;
516 }
517
518 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
519 {
520 int i, j;
521 int buffersize;
522 unsigned long rsector;
523 kdev_t rdev;
524 struct request * req[8];
525 unsigned int major = MAJOR(dev);
526 struct semaphore sem = MUTEX_LOCKED;
527
528 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
529 printk(KERN_NOTICE "ll_rw_swap_file: trying to swap to"
530 " nonexistent block-device\n");
531 return;
532 }
533 switch (rw) {
534 case READ:
535 break;
536 case WRITE:
537 if (is_read_only(dev)) {
538 printk(KERN_NOTICE
539 "Can't swap to read-only device %s\n",
540 kdevname(dev));
541 return;
542 }
543 break;
544 default:
545 panic("ll_rw_swap: bad block dev cmd, must be R/W");
546 }
547 buffersize = PAGE_SIZE / nb;
548
549 for (j=0, i=0; i<nb;)
550 {
551 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
552 {
553 rdev = dev;
554 rsector = (b[i] * buffersize) >> 9;
555 #ifdef CONFIG_BLK_DEV_MD
556 if (major==MD_MAJOR &&
557 md_map (MINOR(dev), &rdev,
558 &rsector, buffersize >> 9)) {
559 printk (KERN_ERR
560 "Bad md_map in ll_rw_page_size\n");
561 return;
562 }
563 #endif
564
565 if (j == 0) {
566 req[j] = get_request_wait(NR_REQUEST, rdev);
567 } else {
568 cli();
569 req[j] = get_request(NR_REQUEST, rdev);
570 sti();
571 if (req[j] == NULL)
572 break;
573 }
574 req[j]->cmd = rw;
575 req[j]->errors = 0;
576 req[j]->sector = rsector;
577 req[j]->nr_sectors = buffersize >> 9;
578 req[j]->current_nr_sectors = buffersize >> 9;
579 req[j]->buffer = buf;
580 req[j]->sem = &sem;
581 req[j]->bh = NULL;
582 req[j]->next = NULL;
583 add_request(MAJOR(rdev)+blk_dev,req[j]);
584 }
585 run_task_queue(&tq_disk);
586 while (j > 0) {
587 j--;
588 down(&sem);
589 }
590 }
591 }
592
593 int blk_dev_init(void)
594 {
595 struct request * req;
596 struct blk_dev_struct *dev;
597
598 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
599 dev->request_fn = NULL;
600 dev->current_request = NULL;
601 dev->plug.rq_status = RQ_INACTIVE;
602 dev->plug.cmd = -1;
603 dev->plug.next = NULL;
604 dev->plug_tq.routine = &unplug_device;
605 dev->plug_tq.data = dev;
606 }
607
608 req = all_requests + NR_REQUEST;
609 while (--req >= all_requests) {
610 req->rq_status = RQ_INACTIVE;
611 req->next = NULL;
612 }
613 memset(ro_bits,0,sizeof(ro_bits));
614 #ifdef CONFIG_BLK_DEV_RAM
615 rd_init();
616 #endif
617 #ifdef CONFIG_BLK_DEV_LOOP
618 loop_init();
619 #endif
620 #ifdef CONFIG_BLK_DEV_IDE
621 ide_init();
622 #endif
623 #ifdef CONFIG_BLK_DEV_HD
624 hd_init();
625 #endif
626 #ifdef CONFIG_BLK_DEV_XD
627 xd_init();
628 #endif
629 #ifdef CONFIG_BLK_DEV_FD
630 floppy_init();
631 #else
632 outb_p(0xc, 0x3f2);
633 #endif
634 #ifdef CONFIG_CDI_INIT
635 cdi_init();
636 #endif CONFIG_CDI_INIT
637 #ifdef CONFIG_CDU31A
638 cdu31a_init();
639 #endif CONFIG_CDU31A
640 #ifdef CONFIG_MCD
641 mcd_init();
642 #endif CONFIG_MCD
643 #ifdef CONFIG_MCDX
644 mcdx_init();
645 #endif CONFIG_MCDX
646 #ifdef CONFIG_SBPCD
647 sbpcd_init();
648 #endif CONFIG_SBPCD
649 #ifdef CONFIG_AZTCD
650 aztcd_init();
651 #endif CONFIG_AZTCD
652 #ifdef CONFIG_CDU535
653 sony535_init();
654 #endif CONFIG_CDU535
655 #ifdef CONFIG_GSCD
656 gscd_init();
657 #endif CONFIG_GSCD
658 #ifdef CONFIG_CM206
659 cm206_init();
660 #endif
661 #ifdef CONFIG_OPTCD
662 optcd_init();
663 #endif CONFIG_OPTCD
664 #ifdef CONFIG_SJCD
665 sjcd_init();
666 #endif CONFIG_SJCD
667 #ifdef CONFIG_BLK_DEV_MD
668 md_init();
669 #endif CONFIG_BLK_DEV_MD
670 return 0;
671 }