This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV];
44
45
46
47
48
49
50
51
52
53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
54
55
56
57
58
59
60
61
62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71
72
73
74
75
76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
77
78
79
80
81 static void unplug_device(void * data)
82 {
83 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
84 unsigned long flags;
85
86 save_flags(flags);
87 cli();
88 dev->current_request = dev->plug.next;
89 dev->plug.next = NULL;
90 (dev->request_fn)();
91 restore_flags(flags);
92 }
93
94
95
96
97
98
99
100
101
102 static inline void plug_device(struct blk_dev_struct * dev)
103 {
104 if (!dev->current_request) {
105 unsigned long flags;
106
107 save_flags(flags);
108 cli();
109 dev->current_request = &dev->plug;
110 queue_task_irq_off(&dev->plug_tq, &tq_scheduler);
111 restore_flags(flags);
112 }
113 }
114
115
116
117
118
119
120 static inline struct request * get_request(int n, kdev_t dev)
121 {
122 static struct request *prev_found = NULL, *prev_limit = NULL;
123 register struct request *req, *limit;
124
125 if (n <= 0)
126 panic("get_request(%d): impossible!\n", n);
127
128 limit = all_requests + n;
129 if (limit != prev_limit) {
130 prev_limit = limit;
131 prev_found = all_requests;
132 }
133 req = prev_found;
134 for (;;) {
135 req = ((req > all_requests) ? req : limit) - 1;
136 if (req->rq_status == RQ_INACTIVE)
137 break;
138 if (req == prev_found)
139 return NULL;
140 }
141 prev_found = req;
142 req->rq_status = RQ_ACTIVE;
143 req->rq_dev = dev;
144 return req;
145 }
146
147
148
149
150 static struct request * __get_request_wait(int n, kdev_t dev)
151 {
152 register struct request *req;
153 struct wait_queue wait = { current, NULL };
154
155 add_wait_queue(&wait_for_request, &wait);
156 for (;;) {
157 current->state = TASK_UNINTERRUPTIBLE;
158 cli();
159 req = get_request(n, dev);
160 sti();
161 if (req)
162 break;
163 schedule();
164 }
165 remove_wait_queue(&wait_for_request, &wait);
166 current->state = TASK_RUNNING;
167 return req;
168 }
169
170 static inline struct request * get_request_wait(int n, kdev_t dev)
171 {
172 register struct request *req;
173
174 cli();
175 req = get_request(n, dev);
176 sti();
177 if (req)
178 return req;
179 return __get_request_wait(n, dev);
180 }
181
182
183
184 static long ro_bits[MAX_BLKDEV][8];
185
186 int is_read_only(kdev_t dev)
187 {
188 int minor,major;
189
190 major = MAJOR(dev);
191 minor = MINOR(dev);
192 if (major < 0 || major >= MAX_BLKDEV) return 0;
193 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
194 }
195
196 void set_device_ro(kdev_t dev,int flag)
197 {
198 int minor,major;
199
200 major = MAJOR(dev);
201 minor = MINOR(dev);
202 if (major < 0 || major >= MAX_BLKDEV) return;
203 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
204 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
205 }
206
207 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
208 {
209 kstat.dk_drive[disk_index]++;
210 if (cmd == READ) {
211 kstat.dk_drive_rio[disk_index]++;
212 kstat.dk_drive_rblk[disk_index] += nr_sectors;
213 }
214 else if (cmd == WRITE) {
215 kstat.dk_drive_wio[disk_index]++;
216 kstat.dk_drive_wblk[disk_index] += nr_sectors;
217 } else
218 printk("drive_stat_acct: cmd not R/W?\n");
219 }
220
221
222
223
224
225
226
227
228
229
230 struct semaphore request_lock = MUTEX;
231
232 void add_request(struct blk_dev_struct * dev, struct request * req)
233 {
234 struct request * tmp;
235 short disk_index;
236
237 down (&request_lock);
238 switch (MAJOR(req->rq_dev)) {
239 case SCSI_DISK_MAJOR:
240 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
241 if (disk_index < 4)
242 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
243 break;
244 case IDE0_MAJOR:
245 case XT_DISK_MAJOR:
246 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
247 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
248 break;
249 case IDE1_MAJOR:
250 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
251 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
252 default:
253 break;
254 }
255
256 req->next = NULL;
257 cli();
258 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
259 mark_buffer_clean(req->bh);
260 if (!(tmp = dev->current_request)) {
261 dev->current_request = req;
262 up (&request_lock);
263 (dev->request_fn)();
264 sti();
265 return;
266 }
267 for ( ; tmp->next ; tmp = tmp->next) {
268 if ((IN_ORDER(tmp,req) ||
269 !IN_ORDER(tmp,tmp->next)) &&
270 IN_ORDER(req,tmp->next))
271 break;
272 }
273 req->next = tmp->next;
274 tmp->next = req;
275
276 up (&request_lock);
277
278 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
279 (dev->request_fn)();
280
281 sti();
282 }
283
284 static void make_request(int major,int rw, struct buffer_head * bh)
285 {
286 unsigned int sector, count;
287 struct request * req;
288 int rw_ahead, max_req;
289
290 count = bh->b_size >> 9;
291 sector = bh->b_blocknr * count;
292 if (blk_size[major])
293 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
294 bh->b_state = 0;
295 printk("attempt to access beyond end of device\n");
296 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
297 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
298 return;
299 }
300
301 if (buffer_locked(bh))
302 return;
303
304 lock_buffer(bh);
305
306 rw_ahead = 0;
307 switch (rw) {
308 case READA:
309 rw_ahead = 1;
310 rw = READ;
311 case READ:
312 if (buffer_uptodate(bh)) {
313 unlock_buffer(bh);
314 return;
315 }
316 kstat.pgpgin++;
317 max_req = NR_REQUEST;
318 break;
319 case WRITEA:
320 rw_ahead = 1;
321 rw = WRITE;
322 case WRITE:
323 if (!buffer_dirty(bh)) {
324 unlock_buffer(bh);
325 return;
326 }
327
328
329
330
331
332 kstat.pgpgout++;
333 max_req = (NR_REQUEST * 2) / 3;
334 break;
335 default:
336 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
337 unlock_buffer(bh);
338 return;
339 }
340
341
342 down (&request_lock);
343 cli();
344
345
346
347
348
349 if (( major == IDE0_MAJOR
350 || major == IDE1_MAJOR
351 || major == MD_MAJOR
352 || major == FLOPPY_MAJOR
353 || major == SCSI_DISK_MAJOR
354 || major == SCSI_CDROM_MAJOR
355 || major == IDE2_MAJOR
356 || major == IDE3_MAJOR)
357 && (req = blk_dev[major].current_request))
358 {
359 if (major != SCSI_DISK_MAJOR &&
360 major != SCSI_CDROM_MAJOR &&
361 major != MD_MAJOR)
362 req = req->next;
363
364 while (req) {
365 if (req->rq_dev == bh->b_dev &&
366 !req->sem &&
367 req->cmd == rw &&
368 req->sector + req->nr_sectors == sector &&
369 req->nr_sectors < 244)
370 {
371 req->bhtail->b_reqnext = bh;
372 req->bhtail = bh;
373 req->nr_sectors += count;
374 mark_buffer_clean(bh);
375 up (&request_lock);
376 sti();
377 return;
378 }
379
380 if (req->rq_dev == bh->b_dev &&
381 !req->sem &&
382 req->cmd == rw &&
383 req->sector - count == sector &&
384 req->nr_sectors < 244)
385 {
386 req->nr_sectors += count;
387 bh->b_reqnext = req->bh;
388 req->buffer = bh->b_data;
389 req->current_nr_sectors = count;
390 req->sector = sector;
391 mark_buffer_clean(bh);
392 req->bh = bh;
393 up (&request_lock);
394 sti();
395 return;
396 }
397
398 req = req->next;
399 }
400 }
401
402 up (&request_lock);
403
404
405 req = get_request(max_req, bh->b_dev);
406 sti();
407
408
409 if (!req) {
410 if (rw_ahead) {
411 unlock_buffer(bh);
412 return;
413 }
414 req = __get_request_wait(max_req, bh->b_dev);
415 }
416
417
418 req->cmd = rw;
419 req->errors = 0;
420 req->sector = sector;
421 req->nr_sectors = count;
422 req->current_nr_sectors = count;
423 req->buffer = bh->b_data;
424 req->sem = NULL;
425 req->bh = bh;
426 req->bhtail = bh;
427 req->next = NULL;
428 add_request(major+blk_dev,req);
429 }
430
431 #ifdef CONFIG_BLK_DEV_MD
432
433 struct request *get_md_request (int max_req, kdev_t dev)
434 {
435 return (get_request_wait (max_req, dev));
436 }
437
438 #endif
439
440
441
442
443
444
445 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
446 {
447 unsigned int major = MAJOR(dev);
448 int block = page;
449
450 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
451 printk("Trying to read nonexistent block-device %s (%ld)\n",
452 kdevname(dev), page);
453 return;
454 }
455 switch (rw) {
456 case READ:
457 break;
458 case WRITE:
459 if (is_read_only(dev)) {
460 printk("Can't page to read-only device %s\n",
461 kdevname(dev));
462 return;
463 }
464 break;
465 default:
466 panic("ll_rw_page: bad block dev cmd, must be R/W");
467 }
468 if (mem_map[MAP_NR(buffer)].locked)
469 panic ("ll_rw_page: page already locked");
470 mem_map[MAP_NR(buffer)].locked = 1;
471 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
472 }
473
474
475
476
477
478 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
479 {
480 unsigned int major;
481 int correct_size;
482 struct blk_dev_struct * dev;
483 int i;
484
485
486 while (!*bh) {
487 bh++;
488 if (--nr <= 0)
489 return;
490 };
491
492 dev = NULL;
493 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
494 dev = blk_dev + major;
495 if (!dev || !dev->request_fn) {
496 printk(
497 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
498 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
499 goto sorry;
500 }
501
502
503 correct_size = BLOCK_SIZE;
504 if (blksize_size[major]) {
505 i = blksize_size[major][MINOR(bh[0]->b_dev)];
506 if (i)
507 correct_size = i;
508 }
509
510
511 for (i = 0; i < nr; i++) {
512 if (bh[i] && bh[i]->b_size != correct_size) {
513 printk("ll_rw_block: device %s: "
514 "only %d-char blocks implemented (%lu)\n",
515 kdevname(bh[0]->b_dev),
516 correct_size, bh[i]->b_size);
517 goto sorry;
518 }
519 }
520
521 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
522 printk("Can't write to read-only device %s\n",
523 kdevname(bh[0]->b_dev));
524 goto sorry;
525 }
526
527
528
529
530
531
532 plug_device(dev);
533 for (i = 0; i < nr; i++) {
534 if (bh[i]) {
535 set_bit(BH_Req, &bh[i]->b_state);
536
537
538 bh[i]->b_rdev = bh[i]->b_dev;
539
540 make_request(major, rw, bh[i]);
541 }
542 }
543 return;
544
545 sorry:
546 for (i = 0; i < nr; i++) {
547 if (bh[i]) {
548 clear_bit(BH_Dirty, &bh[i]->b_state);
549 clear_bit(BH_Uptodate, &bh[i]->b_state);
550 }
551 }
552 return;
553 }
554
555 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
556 {
557 int i, j;
558 int buffersize;
559 struct request * req[8];
560 unsigned int major = MAJOR(dev);
561 struct semaphore sem = MUTEX_LOCKED;
562
563 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
564 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
565 return;
566 }
567 switch (rw) {
568 case READ:
569 break;
570 case WRITE:
571 if (is_read_only(dev)) {
572 printk("Can't swap to read-only device %s\n",
573 kdevname(dev));
574 return;
575 }
576 break;
577 default:
578 panic("ll_rw_swap: bad block dev cmd, must be R/W");
579 }
580 buffersize = PAGE_SIZE / nb;
581
582 for (j=0, i=0; i<nb;)
583 {
584 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
585 {
586 if (j == 0) {
587 req[j] = get_request_wait(NR_REQUEST, dev);
588 } else {
589 cli();
590 req[j] = get_request(NR_REQUEST, dev);
591 sti();
592 if (req[j] == NULL)
593 break;
594 }
595 req[j]->cmd = rw;
596 req[j]->errors = 0;
597 req[j]->sector = (b[i] * buffersize) >> 9;
598 req[j]->nr_sectors = buffersize >> 9;
599 req[j]->current_nr_sectors = buffersize >> 9;
600 req[j]->buffer = buf;
601 req[j]->sem = &sem;
602 req[j]->bh = NULL;
603 req[j]->next = NULL;
604 add_request(major+blk_dev,req[j]);
605 }
606 while (j > 0) {
607 j--;
608 down(&sem);
609 }
610 }
611 }
612
613 int blk_dev_init(void)
614 {
615 struct request * req;
616 struct blk_dev_struct *dev;
617
618 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
619 dev->request_fn = NULL;
620 dev->current_request = NULL;
621 dev->plug.rq_status = RQ_INACTIVE;
622 dev->plug.cmd = -1;
623 dev->plug.next = NULL;
624 dev->plug_tq.routine = &unplug_device;
625 dev->plug_tq.data = dev;
626 }
627
628 req = all_requests + NR_REQUEST;
629 while (--req >= all_requests) {
630 req->rq_status = RQ_INACTIVE;
631 req->next = NULL;
632 }
633 memset(ro_bits,0,sizeof(ro_bits));
634 #ifdef CONFIG_BLK_DEV_RAM
635 rd_init();
636 #endif
637 #ifdef CONFIG_BLK_DEV_LOOP
638 loop_init();
639 #endif
640 #ifdef CONFIG_BLK_DEV_IDE
641 ide_init();
642 #endif
643 #ifdef CONFIG_BLK_DEV_HD
644 hd_init();
645 #endif
646 #ifdef CONFIG_BLK_DEV_XD
647 xd_init();
648 #endif
649 #ifdef CONFIG_BLK_DEV_FD
650 floppy_init();
651 #else
652 outb_p(0xc, 0x3f2);
653 #endif
654 #ifdef CONFIG_CDI_INIT
655 cdi_init();
656 #endif CONFIG_CDI_INIT
657 #ifdef CONFIG_CDU31A
658 cdu31a_init();
659 #endif CONFIG_CDU31A
660 #ifdef CONFIG_MCD
661 mcd_init();
662 #endif CONFIG_MCD
663 #ifdef CONFIG_MCDX
664 mcdx_init();
665 #endif CONFIG_MCDX
666 #ifdef CONFIG_SBPCD
667 sbpcd_init();
668 #endif CONFIG_SBPCD
669 #ifdef CONFIG_AZTCD
670 aztcd_init();
671 #endif CONFIG_AZTCD
672 #ifdef CONFIG_CDU535
673 sony535_init();
674 #endif CONFIG_CDU535
675 #ifdef CONFIG_GSCD
676 gscd_init();
677 #endif CONFIG_GSCD
678 #ifdef CONFIG_CM206
679 cm206_init();
680 #endif
681 #ifdef CONFIG_OPTCD
682 optcd_init();
683 #endif CONFIG_OPTCD
684 #ifdef CONFIG_SJCD
685 sjcd_init();
686 #endif CONFIG_SJCD
687 #ifdef CONFIG_BLK_DEV_MD
688 md_init();
689 #endif CONFIG_BLK_DEV_MD
690 return 0;
691 }