This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV];
44
45
46
47
48
49
50
51
52
53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
54
55
56
57
58
59
60
61
62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71
72
73
74
75
76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
77
78
79
80
81 static void unplug_device(void * data)
82 {
83 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
84 unsigned long flags;
85
86 save_flags(flags);
87 cli();
88 dev->current_request = dev->plug.next;
89 dev->plug.next = NULL;
90 (dev->request_fn)();
91 restore_flags(flags);
92 }
93
94
95
96
97
98
99
100
101 static inline void plug_device(struct blk_dev_struct * dev)
102 {
103 dev->current_request = &dev->plug;
104 queue_task_irq_off(&dev->plug_tq, &tq_scheduler);
105 }
106
107
108
109
110
111
112 static inline struct request * get_request(int n, kdev_t dev)
113 {
114 static struct request *prev_found = NULL, *prev_limit = NULL;
115 register struct request *req, *limit;
116
117 if (n <= 0)
118 panic("get_request(%d): impossible!\n", n);
119
120 limit = all_requests + n;
121 if (limit != prev_limit) {
122 prev_limit = limit;
123 prev_found = all_requests;
124 }
125 req = prev_found;
126 for (;;) {
127 req = ((req > all_requests) ? req : limit) - 1;
128 if (req->rq_status == RQ_INACTIVE)
129 break;
130 if (req == prev_found)
131 return NULL;
132 }
133 prev_found = req;
134 req->rq_status = RQ_ACTIVE;
135 req->rq_dev = dev;
136 return req;
137 }
138
139
140
141
142 static struct request * __get_request_wait(int n, kdev_t dev)
143 {
144 register struct request *req;
145 struct wait_queue wait = { current, NULL };
146
147 add_wait_queue(&wait_for_request, &wait);
148 for (;;) {
149 current->state = TASK_UNINTERRUPTIBLE;
150 cli();
151 req = get_request(n, dev);
152 sti();
153 if (req)
154 break;
155 schedule();
156 }
157 remove_wait_queue(&wait_for_request, &wait);
158 current->state = TASK_RUNNING;
159 return req;
160 }
161
162 static inline struct request * get_request_wait(int n, kdev_t dev)
163 {
164 register struct request *req;
165
166 cli();
167 req = get_request(n, dev);
168 sti();
169 if (req)
170 return req;
171 return __get_request_wait(n, dev);
172 }
173
174
175
176 static long ro_bits[MAX_BLKDEV][8];
177
178 int is_read_only(kdev_t dev)
179 {
180 int minor,major;
181
182 major = MAJOR(dev);
183 minor = MINOR(dev);
184 if (major < 0 || major >= MAX_BLKDEV) return 0;
185 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
186 }
187
188 void set_device_ro(kdev_t dev,int flag)
189 {
190 int minor,major;
191
192 major = MAJOR(dev);
193 minor = MINOR(dev);
194 if (major < 0 || major >= MAX_BLKDEV) return;
195 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
196 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
197 }
198
199 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
200 {
201 kstat.dk_drive[disk_index]++;
202 if (cmd == READ) {
203 kstat.dk_drive_rio[disk_index]++;
204 kstat.dk_drive_rblk[disk_index] += nr_sectors;
205 }
206 else if (cmd == WRITE) {
207 kstat.dk_drive_wio[disk_index]++;
208 kstat.dk_drive_wblk[disk_index] += nr_sectors;
209 } else
210 printk("drive_stat_acct: cmd not R/W?\n");
211 }
212
213
214
215
216
217
218
219
220
221
222 struct semaphore request_lock = MUTEX;
223
224 void add_request(struct blk_dev_struct * dev, struct request * req)
225 {
226 struct request * tmp;
227 short disk_index;
228
229 down (&request_lock);
230 switch (MAJOR(req->rq_dev)) {
231 case SCSI_DISK_MAJOR:
232 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
233 if (disk_index < 4)
234 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
235 break;
236 case IDE0_MAJOR:
237 case XT_DISK_MAJOR:
238 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
239 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
240 break;
241 case IDE1_MAJOR:
242 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
243 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
244 default:
245 break;
246 }
247
248 req->next = NULL;
249 cli();
250 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
251 mark_buffer_clean(req->bh);
252 if (!(tmp = dev->current_request)) {
253 dev->current_request = req;
254 up (&request_lock);
255 (dev->request_fn)();
256 sti();
257 return;
258 }
259 for ( ; tmp->next ; tmp = tmp->next) {
260 if ((IN_ORDER(tmp,req) ||
261 !IN_ORDER(tmp,tmp->next)) &&
262 IN_ORDER(req,tmp->next))
263 break;
264 }
265 req->next = tmp->next;
266 tmp->next = req;
267
268 up (&request_lock);
269
270 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
271 (dev->request_fn)();
272
273 sti();
274 }
275
276 static void make_request(int major,int rw, struct buffer_head * bh)
277 {
278 unsigned int sector, count;
279 struct request * req;
280 int rw_ahead, max_req;
281
282 count = bh->b_size >> 9;
283 sector = bh->b_blocknr * count;
284 if (blk_size[major])
285 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
286 bh->b_state = 0;
287 printk("attempt to access beyond end of device\n");
288 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
289 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
290 return;
291 }
292
293 if (buffer_locked(bh))
294 return;
295
296 lock_buffer(bh);
297
298 rw_ahead = 0;
299 switch (rw) {
300 case READA:
301 rw_ahead = 1;
302 rw = READ;
303 case READ:
304 if (buffer_uptodate(bh)) {
305 unlock_buffer(bh);
306 return;
307 }
308 kstat.pgpgin++;
309 max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST;
310 break;
311 case WRITEA:
312 rw_ahead = 1;
313 rw = WRITE;
314 case WRITE:
315 if (!buffer_dirty(bh)) {
316 unlock_buffer(bh);
317 return;
318 }
319
320
321
322
323
324 kstat.pgpgout++;
325 max_req = (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
326 break;
327 default:
328 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
329 unlock_buffer(bh);
330 return;
331 }
332
333
334 down (&request_lock);
335
336
337
338
339 cli();
340 req = blk_dev[major].current_request;
341 if (!req) {
342 plug_device(blk_dev + major);
343 } else switch (major) {
344 case IDE0_MAJOR:
345 case IDE1_MAJOR:
346 case FLOPPY_MAJOR:
347 case IDE2_MAJOR:
348 case IDE3_MAJOR:
349
350
351
352
353
354
355
356
357
358 req = req->next;
359 if (!req)
360 break;
361
362
363 case SCSI_DISK_MAJOR:
364 case SCSI_CDROM_MAJOR:
365 case MD_MAJOR:
366
367 do {
368 if (req->sem)
369 continue;
370 if (req->cmd != rw)
371 continue;
372 if (req->nr_sectors >= 244)
373 continue;
374 if (req->rq_dev != bh->b_dev)
375 continue;
376
377 if (req->sector + req->nr_sectors == sector) {
378 req->bhtail->b_reqnext = bh;
379 req->bhtail = bh;
380
381 } else if (req->sector - count == sector) {
382 bh->b_reqnext = req->bh;
383 req->bh = bh;
384 req->buffer = bh->b_data;
385 req->current_nr_sectors = count;
386 req->sector = sector;
387 } else
388 continue;
389
390 req->nr_sectors += count;
391 mark_buffer_clean(bh);
392 up (&request_lock);
393 sti();
394 return;
395 } while ((req = req->next) != NULL);
396 }
397
398 up (&request_lock);
399
400
401 req = get_request(max_req, bh->b_dev);
402 sti();
403
404
405 if (!req) {
406 if (rw_ahead) {
407 unlock_buffer(bh);
408 return;
409 }
410 req = __get_request_wait(max_req, bh->b_dev);
411 }
412
413
414 req->cmd = rw;
415 req->errors = 0;
416 req->sector = sector;
417 req->nr_sectors = count;
418 req->current_nr_sectors = count;
419 req->buffer = bh->b_data;
420 req->sem = NULL;
421 req->bh = bh;
422 req->bhtail = bh;
423 req->next = NULL;
424 add_request(major+blk_dev,req);
425 }
426
427 #ifdef CONFIG_BLK_DEV_MD
428
429 struct request *get_md_request (int max_req, kdev_t dev)
430 {
431 return (get_request_wait (max_req, dev));
432 }
433
434 #endif
435
436
437
438
439
440
441 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
442 {
443 unsigned int major = MAJOR(dev);
444 int block = page;
445
446 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
447 printk("Trying to read nonexistent block-device %s (%ld)\n",
448 kdevname(dev), page);
449 return;
450 }
451 switch (rw) {
452 case READ:
453 break;
454 case WRITE:
455 if (is_read_only(dev)) {
456 printk("Can't page to read-only device %s\n",
457 kdevname(dev));
458 return;
459 }
460 break;
461 default:
462 panic("ll_rw_page: bad block dev cmd, must be R/W");
463 }
464 if (mem_map[MAP_NR(buffer)].locked)
465 panic ("ll_rw_page: page already locked");
466 mem_map[MAP_NR(buffer)].locked = 1;
467 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
468 }
469
470
471
472
473
474 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
475 {
476 unsigned int major;
477 int correct_size;
478 struct blk_dev_struct * dev;
479 int i;
480
481
482 while (!*bh) {
483 bh++;
484 if (--nr <= 0)
485 return;
486 };
487
488 dev = NULL;
489 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
490 dev = blk_dev + major;
491 if (!dev || !dev->request_fn) {
492 printk(
493 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
494 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
495 goto sorry;
496 }
497
498
499 correct_size = BLOCK_SIZE;
500 if (blksize_size[major]) {
501 i = blksize_size[major][MINOR(bh[0]->b_dev)];
502 if (i)
503 correct_size = i;
504 }
505
506
507 for (i = 0; i < nr; i++) {
508 if (bh[i] && bh[i]->b_size != correct_size) {
509 printk("ll_rw_block: device %s: "
510 "only %d-char blocks implemented (%lu)\n",
511 kdevname(bh[0]->b_dev),
512 correct_size, bh[i]->b_size);
513 goto sorry;
514 }
515 }
516
517 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
518 printk("Can't write to read-only device %s\n",
519 kdevname(bh[0]->b_dev));
520 goto sorry;
521 }
522
523 for (i = 0; i < nr; i++) {
524 if (bh[i]) {
525 set_bit(BH_Req, &bh[i]->b_state);
526
527
528 bh[i]->b_rdev = bh[i]->b_dev;
529
530 make_request(major, rw, bh[i]);
531 }
532 }
533 return;
534
535 sorry:
536 for (i = 0; i < nr; i++) {
537 if (bh[i]) {
538 clear_bit(BH_Dirty, &bh[i]->b_state);
539 clear_bit(BH_Uptodate, &bh[i]->b_state);
540 }
541 }
542 return;
543 }
544
545 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
546 {
547 int i, j;
548 int buffersize;
549 struct request * req[8];
550 unsigned int major = MAJOR(dev);
551 struct semaphore sem = MUTEX_LOCKED;
552
553 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
554 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
555 return;
556 }
557 switch (rw) {
558 case READ:
559 break;
560 case WRITE:
561 if (is_read_only(dev)) {
562 printk("Can't swap to read-only device %s\n",
563 kdevname(dev));
564 return;
565 }
566 break;
567 default:
568 panic("ll_rw_swap: bad block dev cmd, must be R/W");
569 }
570 buffersize = PAGE_SIZE / nb;
571
572 for (j=0, i=0; i<nb;)
573 {
574 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
575 {
576 if (j == 0) {
577 req[j] = get_request_wait(NR_REQUEST, dev);
578 } else {
579 cli();
580 req[j] = get_request(NR_REQUEST, dev);
581 sti();
582 if (req[j] == NULL)
583 break;
584 }
585 req[j]->cmd = rw;
586 req[j]->errors = 0;
587 req[j]->sector = (b[i] * buffersize) >> 9;
588 req[j]->nr_sectors = buffersize >> 9;
589 req[j]->current_nr_sectors = buffersize >> 9;
590 req[j]->buffer = buf;
591 req[j]->sem = &sem;
592 req[j]->bh = NULL;
593 req[j]->next = NULL;
594 add_request(major+blk_dev,req[j]);
595 }
596 while (j > 0) {
597 j--;
598 down(&sem);
599 }
600 }
601 }
602
603 int blk_dev_init(void)
604 {
605 struct request * req;
606 struct blk_dev_struct *dev;
607
608 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
609 dev->request_fn = NULL;
610 dev->current_request = NULL;
611 dev->plug.rq_status = RQ_INACTIVE;
612 dev->plug.cmd = -1;
613 dev->plug.next = NULL;
614 dev->plug_tq.routine = &unplug_device;
615 dev->plug_tq.data = dev;
616 }
617
618 req = all_requests + NR_REQUEST;
619 while (--req >= all_requests) {
620 req->rq_status = RQ_INACTIVE;
621 req->next = NULL;
622 }
623 memset(ro_bits,0,sizeof(ro_bits));
624 #ifdef CONFIG_BLK_DEV_RAM
625 rd_init();
626 #endif
627 #ifdef CONFIG_BLK_DEV_LOOP
628 loop_init();
629 #endif
630 #ifdef CONFIG_BLK_DEV_IDE
631 ide_init();
632 #endif
633 #ifdef CONFIG_BLK_DEV_HD
634 hd_init();
635 #endif
636 #ifdef CONFIG_BLK_DEV_XD
637 xd_init();
638 #endif
639 #ifdef CONFIG_BLK_DEV_FD
640 floppy_init();
641 #else
642 outb_p(0xc, 0x3f2);
643 #endif
644 #ifdef CONFIG_CDI_INIT
645 cdi_init();
646 #endif CONFIG_CDI_INIT
647 #ifdef CONFIG_CDU31A
648 cdu31a_init();
649 #endif CONFIG_CDU31A
650 #ifdef CONFIG_MCD
651 mcd_init();
652 #endif CONFIG_MCD
653 #ifdef CONFIG_MCDX
654 mcdx_init();
655 #endif CONFIG_MCDX
656 #ifdef CONFIG_SBPCD
657 sbpcd_init();
658 #endif CONFIG_SBPCD
659 #ifdef CONFIG_AZTCD
660 aztcd_init();
661 #endif CONFIG_AZTCD
662 #ifdef CONFIG_CDU535
663 sony535_init();
664 #endif CONFIG_CDU535
665 #ifdef CONFIG_GSCD
666 gscd_init();
667 #endif CONFIG_GSCD
668 #ifdef CONFIG_CM206
669 cm206_init();
670 #endif
671 #ifdef CONFIG_OPTCD
672 optcd_init();
673 #endif CONFIG_OPTCD
674 #ifdef CONFIG_SJCD
675 sjcd_init();
676 #endif CONFIG_SJCD
677 #ifdef CONFIG_BLK_DEV_MD
678 md_init();
679 #endif CONFIG_BLK_DEV_MD
680 return 0;
681 }