This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33
34 DECLARE_TASK_QUEUE(tq_disk);
35
36
37
38
39 struct wait_queue * wait_for_request = NULL;
40
41
42
43 int read_ahead[MAX_BLKDEV] = {0, };
44
45
46
47
48
49 struct blk_dev_struct blk_dev[MAX_BLKDEV];
50
51
52
53
54
55
56
57
58
59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
60
61
62
63
64
65
66
67
68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
69
70
71
72
73
74
75
76
77
78
79
80
81
82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
83
84
85
86
87 void unplug_device(void * data)
88 {
89 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
90 unsigned long flags;
91
92 save_flags(flags);
93 cli();
94 if (dev->current_request == &dev->plug) {
95 dev->current_request = dev->plug.next;
96 dev->plug.next = NULL;
97 if (dev->current_request)
98 (dev->request_fn)();
99 }
100 restore_flags(flags);
101 }
102
103
104
105
106
107
108
109
110 static inline void plug_device(struct blk_dev_struct * dev)
111 {
112 dev->current_request = &dev->plug;
113 queue_task_irq_off(&dev->plug_tq, &tq_disk);
114 }
115
116
117
118
119
120
121 static inline struct request * get_request(int n, kdev_t dev)
122 {
123 static struct request *prev_found = NULL, *prev_limit = NULL;
124 register struct request *req, *limit;
125
126 if (n <= 0)
127 panic("get_request(%d): impossible!\n", n);
128
129 limit = all_requests + n;
130 if (limit != prev_limit) {
131 prev_limit = limit;
132 prev_found = all_requests;
133 }
134 req = prev_found;
135 for (;;) {
136 req = ((req > all_requests) ? req : limit) - 1;
137 if (req->rq_status == RQ_INACTIVE)
138 break;
139 if (req == prev_found)
140 return NULL;
141 }
142 prev_found = req;
143 req->rq_status = RQ_ACTIVE;
144 req->rq_dev = dev;
145 return req;
146 }
147
148
149
150
151 static struct request * __get_request_wait(int n, kdev_t dev)
152 {
153 register struct request *req;
154 struct wait_queue wait = { current, NULL };
155
156 add_wait_queue(&wait_for_request, &wait);
157 for (;;) {
158 current->state = TASK_UNINTERRUPTIBLE;
159 cli();
160 req = get_request(n, dev);
161 sti();
162 if (req)
163 break;
164 run_task_queue(&tq_disk);
165 schedule();
166 }
167 remove_wait_queue(&wait_for_request, &wait);
168 current->state = TASK_RUNNING;
169 return req;
170 }
171
172 static inline struct request * get_request_wait(int n, kdev_t dev)
173 {
174 register struct request *req;
175
176 cli();
177 req = get_request(n, dev);
178 sti();
179 if (req)
180 return req;
181 return __get_request_wait(n, dev);
182 }
183
184
185
186 static long ro_bits[MAX_BLKDEV][8];
187
188 int is_read_only(kdev_t dev)
189 {
190 int minor,major;
191
192 major = MAJOR(dev);
193 minor = MINOR(dev);
194 if (major < 0 || major >= MAX_BLKDEV) return 0;
195 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
196 }
197
198 void set_device_ro(kdev_t dev,int flag)
199 {
200 int minor,major;
201
202 major = MAJOR(dev);
203 minor = MINOR(dev);
204 if (major < 0 || major >= MAX_BLKDEV) return;
205 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
206 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
207 }
208
209 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
210 {
211 kstat.dk_drive[disk_index]++;
212 if (cmd == READ) {
213 kstat.dk_drive_rio[disk_index]++;
214 kstat.dk_drive_rblk[disk_index] += nr_sectors;
215 }
216 else if (cmd == WRITE) {
217 kstat.dk_drive_wio[disk_index]++;
218 kstat.dk_drive_wblk[disk_index] += nr_sectors;
219 } else
220 printk("drive_stat_acct: cmd not R/W?\n");
221 }
222
223
224
225
226
227
228
229
230
231
232 void add_request(struct blk_dev_struct * dev, struct request * req)
233 {
234 struct request * tmp;
235 short disk_index;
236
237 switch (MAJOR(req->rq_dev)) {
238 case SCSI_DISK_MAJOR:
239 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
240 if (disk_index < 4)
241 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
242 break;
243 case IDE0_MAJOR:
244 case XT_DISK_MAJOR:
245 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
246 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
247 break;
248 case IDE1_MAJOR:
249 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
250 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
251 default:
252 break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 (dev->request_fn)();
262 sti();
263 return;
264 }
265 for ( ; tmp->next ; tmp = tmp->next) {
266 if ((IN_ORDER(tmp,req) ||
267 !IN_ORDER(tmp,tmp->next)) &&
268 IN_ORDER(req,tmp->next))
269 break;
270 }
271 req->next = tmp->next;
272 tmp->next = req;
273
274
275 if (scsi_major(MAJOR(req->rq_dev)))
276 (dev->request_fn)();
277
278 sti();
279 }
280
281 static void make_request(int major,int rw, struct buffer_head * bh)
282 {
283 unsigned int sector, count;
284 struct request * req;
285 int rw_ahead, max_req;
286
287 count = bh->b_size >> 9;
288 sector = bh->b_rsector;
289 if (blk_size[major])
290 if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
291 bh->b_state = 0;
292 printk("attempt to access beyond end of device\n");
293 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_rdev),
294 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_rdev)]);
295 return;
296 }
297
298 if (buffer_locked(bh))
299 return;
300
301 lock_buffer(bh);
302
303 rw_ahead = 0;
304 switch (rw) {
305 case READA:
306 rw_ahead = 1;
307 rw = READ;
308 case READ:
309 if (buffer_uptodate(bh)) {
310 unlock_buffer(bh);
311 return;
312 }
313 kstat.pgpgin++;
314 max_req = NR_REQUEST;
315 break;
316 case WRITEA:
317 rw_ahead = 1;
318 rw = WRITE;
319 case WRITE:
320 if (!buffer_dirty(bh)) {
321 unlock_buffer(bh);
322 return;
323 }
324
325
326
327
328
329 kstat.pgpgout++;
330 max_req = (NR_REQUEST * 2) / 3;
331 break;
332 default:
333 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
334 unlock_buffer(bh);
335 return;
336 }
337
338
339
340
341
342
343 cli();
344 req = blk_dev[major].current_request;
345 if (!req) {
346
347 if (major != MD_MAJOR && major != LOOP_MAJOR)
348 plug_device(blk_dev + major);
349 } else switch (major) {
350 case IDE0_MAJOR:
351 case IDE1_MAJOR:
352 case FLOPPY_MAJOR:
353 case IDE2_MAJOR:
354 case IDE3_MAJOR:
355
356
357
358
359
360
361
362
363
364 req = req->next;
365 if (!req)
366 break;
367
368
369 case SCSI_DISK_MAJOR:
370 case SCSI_CDROM_MAJOR:
371
372 do {
373 if (req->sem)
374 continue;
375 if (req->cmd != rw)
376 continue;
377 if (req->nr_sectors >= 244)
378 continue;
379 if (req->rq_dev != bh->b_rdev)
380 continue;
381
382 if (req->sector + req->nr_sectors == sector) {
383 req->bhtail->b_reqnext = bh;
384 req->bhtail = bh;
385
386 } else if (req->sector - count == sector) {
387 bh->b_reqnext = req->bh;
388 req->bh = bh;
389 req->buffer = bh->b_data;
390 req->current_nr_sectors = count;
391 req->sector = sector;
392 } else
393 continue;
394
395 req->nr_sectors += count;
396 mark_buffer_clean(bh);
397 sti();
398 return;
399 } while ((req = req->next) != NULL);
400 }
401
402
403 req = get_request(max_req, bh->b_rdev);
404 sti();
405
406
407 if (!req) {
408 if (rw_ahead) {
409 unlock_buffer(bh);
410 return;
411 }
412 req = __get_request_wait(max_req, bh->b_rdev);
413 }
414
415
416 req->cmd = rw;
417 req->errors = 0;
418 req->sector = sector;
419 req->nr_sectors = count;
420 req->current_nr_sectors = count;
421 req->buffer = bh->b_data;
422 req->sem = NULL;
423 req->bh = bh;
424 req->bhtail = bh;
425 req->next = NULL;
426 add_request(major+blk_dev,req);
427 }
428
429
430
431
432
433 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
434 {
435 unsigned int major;
436 int correct_size;
437 struct blk_dev_struct * dev;
438 int i;
439
440
441 while (!*bh) {
442 bh++;
443 if (--nr <= 0)
444 return;
445 };
446
447 dev = NULL;
448 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
449 dev = blk_dev + major;
450 if (!dev || !dev->request_fn) {
451 printk(
452 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
453 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
454 goto sorry;
455 }
456
457
458 correct_size = BLOCK_SIZE;
459 if (blksize_size[major]) {
460 i = blksize_size[major][MINOR(bh[0]->b_dev)];
461 if (i)
462 correct_size = i;
463 }
464
465
466 for (i = 0; i < nr; i++) {
467 if (bh[i] && bh[i]->b_size != correct_size) {
468 printk("ll_rw_block: device %s: "
469 "only %d-char blocks implemented (%lu)\n",
470 kdevname(bh[0]->b_dev),
471 correct_size, bh[i]->b_size);
472 goto sorry;
473 }
474
475
476 bh[i]->b_rdev = bh[i]->b_dev;
477 bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
478 #ifdef CONFIG_BLK_DEV_MD
479 if (major==MD_MAJOR &&
480 md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
481 &bh[i]->b_rsector, bh[i]->b_size >> 9))
482 goto sorry;
483 #endif
484 }
485
486 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
487 printk("Can't write to read-only device %s\n",
488 kdevname(bh[0]->b_dev));
489 goto sorry;
490 }
491
492 for (i = 0; i < nr; i++) {
493 if (bh[i]) {
494 set_bit(BH_Req, &bh[i]->b_state);
495
496 make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
497 }
498 }
499 return;
500
501 sorry:
502 for (i = 0; i < nr; i++) {
503 if (bh[i]) {
504 clear_bit(BH_Dirty, &bh[i]->b_state);
505 clear_bit(BH_Uptodate, &bh[i]->b_state);
506 }
507 }
508 return;
509 }
510
511 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
512 {
513 int i, j;
514 int buffersize;
515 unsigned long rsector;
516 kdev_t rdev;
517 struct request * req[8];
518 unsigned int major = MAJOR(dev);
519 struct semaphore sem = MUTEX_LOCKED;
520
521 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
522 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
523 return;
524 }
525 switch (rw) {
526 case READ:
527 break;
528 case WRITE:
529 if (is_read_only(dev)) {
530 printk("Can't swap to read-only device %s\n",
531 kdevname(dev));
532 return;
533 }
534 break;
535 default:
536 panic("ll_rw_swap: bad block dev cmd, must be R/W");
537 }
538 buffersize = PAGE_SIZE / nb;
539
540 for (j=0, i=0; i<nb;)
541 {
542 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
543 {
544 rdev = dev;
545 rsector = (b[i] * buffersize) >> 9;
546 #ifdef CONFIG_BLK_DEV_MD
547 if (major==MD_MAJOR &&
548 md_map (MINOR(dev), &rdev,
549 &rsector, buffersize >> 9)) {
550 printk ("Bad md_map in ll_rw_page_size\n");
551 return;
552 }
553 #endif
554
555 if (j == 0) {
556 req[j] = get_request_wait(NR_REQUEST, rdev);
557 } else {
558 cli();
559 req[j] = get_request(NR_REQUEST, rdev);
560 sti();
561 if (req[j] == NULL)
562 break;
563 }
564 req[j]->cmd = rw;
565 req[j]->errors = 0;
566 req[j]->sector = rsector;
567 req[j]->nr_sectors = buffersize >> 9;
568 req[j]->current_nr_sectors = buffersize >> 9;
569 req[j]->buffer = buf;
570 req[j]->sem = &sem;
571 req[j]->bh = NULL;
572 req[j]->next = NULL;
573 add_request(MAJOR(rdev)+blk_dev,req[j]);
574 }
575 run_task_queue(&tq_disk);
576 while (j > 0) {
577 j--;
578 down(&sem);
579 }
580 }
581 }
582
583 int blk_dev_init(void)
584 {
585 struct request * req;
586 struct blk_dev_struct *dev;
587
588 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
589 dev->request_fn = NULL;
590 dev->current_request = NULL;
591 dev->plug.rq_status = RQ_INACTIVE;
592 dev->plug.cmd = -1;
593 dev->plug.next = NULL;
594 dev->plug_tq.routine = &unplug_device;
595 dev->plug_tq.data = dev;
596 }
597
598 req = all_requests + NR_REQUEST;
599 while (--req >= all_requests) {
600 req->rq_status = RQ_INACTIVE;
601 req->next = NULL;
602 }
603 memset(ro_bits,0,sizeof(ro_bits));
604 #ifdef CONFIG_BLK_DEV_RAM
605 rd_init();
606 #endif
607 #ifdef CONFIG_BLK_DEV_LOOP
608 loop_init();
609 #endif
610 #ifdef CONFIG_BLK_DEV_IDE
611 ide_init();
612 #endif
613 #ifdef CONFIG_BLK_DEV_HD
614 hd_init();
615 #endif
616 #ifdef CONFIG_BLK_DEV_XD
617 xd_init();
618 #endif
619 #ifdef CONFIG_BLK_DEV_FD
620 floppy_init();
621 #else
622 outb_p(0xc, 0x3f2);
623 #endif
624 #ifdef CONFIG_CDI_INIT
625 cdi_init();
626 #endif CONFIG_CDI_INIT
627 #ifdef CONFIG_CDU31A
628 cdu31a_init();
629 #endif CONFIG_CDU31A
630 #ifdef CONFIG_MCD
631 mcd_init();
632 #endif CONFIG_MCD
633 #ifdef CONFIG_MCDX
634 mcdx_init();
635 #endif CONFIG_MCDX
636 #ifdef CONFIG_SBPCD
637 sbpcd_init();
638 #endif CONFIG_SBPCD
639 #ifdef CONFIG_AZTCD
640 aztcd_init();
641 #endif CONFIG_AZTCD
642 #ifdef CONFIG_CDU535
643 sony535_init();
644 #endif CONFIG_CDU535
645 #ifdef CONFIG_GSCD
646 gscd_init();
647 #endif CONFIG_GSCD
648 #ifdef CONFIG_CM206
649 cm206_init();
650 #endif
651 #ifdef CONFIG_OPTCD
652 optcd_init();
653 #endif CONFIG_OPTCD
654 #ifdef CONFIG_SJCD
655 sjcd_init();
656 #endif CONFIG_SJCD
657 #ifdef CONFIG_BLK_DEV_MD
658 md_init();
659 #endif CONFIG_BLK_DEV_MD
660 return 0;
661 }