This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV];
44
45
46
47
48
49
50
51
52
53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
54
55
56
57
58
59
60
61
62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71
72
73
74
75
76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
77
78
79
80
81
82
83 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
84 {
85 unsigned long flags;
86
87 plug->rq_status = RQ_INACTIVE;
88 plug->cmd = -1;
89 plug->next = NULL;
90 save_flags(flags);
91 cli();
92 if (!dev->current_request)
93 dev->current_request = plug;
94 restore_flags(flags);
95 }
96
97
98
99
100 static inline void unplug_device(struct blk_dev_struct * dev)
101 {
102 struct request * req;
103 unsigned long flags;
104
105 save_flags(flags);
106 cli();
107 req = dev->current_request;
108 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
109 dev->current_request = req->next;
110 (dev->request_fn)();
111 }
112 restore_flags(flags);
113 }
114
115
116
117
118
119
120 static inline struct request * get_request(int n, kdev_t dev)
121 {
122 static struct request *prev_found = NULL, *prev_limit = NULL;
123 register struct request *req, *limit;
124
125 if (n <= 0)
126 panic("get_request(%d): impossible!\n", n);
127
128 limit = all_requests + n;
129 if (limit != prev_limit) {
130 prev_limit = limit;
131 prev_found = all_requests;
132 }
133 req = prev_found;
134 for (;;) {
135 req = ((req > all_requests) ? req : limit) - 1;
136 if (req->rq_status == RQ_INACTIVE)
137 break;
138 if (req == prev_found)
139 return NULL;
140 }
141 prev_found = req;
142 req->rq_status = RQ_ACTIVE;
143 req->rq_dev = dev;
144 return req;
145 }
146
147
148
149
150 static struct request * __get_request_wait(int n, kdev_t dev)
151 {
152 register struct request *req;
153 struct wait_queue wait = { current, NULL };
154
155 add_wait_queue(&wait_for_request, &wait);
156 for (;;) {
157 unplug_device(MAJOR(dev)+blk_dev);
158 current->state = TASK_UNINTERRUPTIBLE;
159 cli();
160 req = get_request(n, dev);
161 sti();
162 if (req)
163 break;
164 schedule();
165 }
166 remove_wait_queue(&wait_for_request, &wait);
167 current->state = TASK_RUNNING;
168 return req;
169 }
170
171 static inline struct request * get_request_wait(int n, kdev_t dev)
172 {
173 register struct request *req;
174
175 cli();
176 req = get_request(n, dev);
177 sti();
178 if (req)
179 return req;
180 return __get_request_wait(n, dev);
181 }
182
183
184
185 static long ro_bits[MAX_BLKDEV][8];
186
187 int is_read_only(kdev_t dev)
188 {
189 int minor,major;
190
191 major = MAJOR(dev);
192 minor = MINOR(dev);
193 if (major < 0 || major >= MAX_BLKDEV) return 0;
194 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
195 }
196
197 void set_device_ro(kdev_t dev,int flag)
198 {
199 int minor,major;
200
201 major = MAJOR(dev);
202 minor = MINOR(dev);
203 if (major < 0 || major >= MAX_BLKDEV) return;
204 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
205 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
206 }
207
208 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
209 {
210 kstat.dk_drive[disk_index]++;
211 if (cmd == READ) {
212 kstat.dk_drive_rio[disk_index]++;
213 kstat.dk_drive_rblk[disk_index] += nr_sectors;
214 }
215 else if (cmd == WRITE) {
216 kstat.dk_drive_wio[disk_index]++;
217 kstat.dk_drive_wblk[disk_index] += nr_sectors;
218 } else
219 printk("drive_stat_acct: cmd not R/W?\n");
220 }
221
222
223
224
225
226
227
228
229
230
231 struct semaphore request_lock = MUTEX;
232
233 void add_request(struct blk_dev_struct * dev, struct request * req)
234 {
235 struct request * tmp;
236 short disk_index;
237
238 down (&request_lock);
239 switch (MAJOR(req->rq_dev)) {
240 case SCSI_DISK_MAJOR:
241 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
242 if (disk_index < 4)
243 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
244 break;
245 case IDE0_MAJOR:
246 case XT_DISK_MAJOR:
247 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
248 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
249 break;
250 case IDE1_MAJOR:
251 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
252 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
253 default:
254 break;
255 }
256
257 req->next = NULL;
258 cli();
259 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
260 mark_buffer_clean(req->bh);
261 if (!(tmp = dev->current_request)) {
262 dev->current_request = req;
263 up (&request_lock);
264 (dev->request_fn)();
265 sti();
266 return;
267 }
268 for ( ; tmp->next ; tmp = tmp->next) {
269 if ((IN_ORDER(tmp,req) ||
270 !IN_ORDER(tmp,tmp->next)) &&
271 IN_ORDER(req,tmp->next))
272 break;
273 }
274 req->next = tmp->next;
275 tmp->next = req;
276
277 up (&request_lock);
278
279 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
280 (dev->request_fn)();
281
282 sti();
283 }
284
285 static void make_request(int major,int rw, struct buffer_head * bh)
286 {
287 unsigned int sector, count;
288 struct request * req;
289 int rw_ahead, max_req;
290
291 count = bh->b_size >> 9;
292 sector = bh->b_blocknr * count;
293 if (blk_size[major])
294 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
295 bh->b_state = 0;
296 printk("attempt to access beyond end of device\n");
297 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
298 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
299 return;
300 }
301
302 if (buffer_locked(bh))
303 return;
304
305 lock_buffer(bh);
306
307 rw_ahead = 0;
308 switch (rw) {
309 case READA:
310 rw_ahead = 1;
311 rw = READ;
312 case READ:
313 if (buffer_uptodate(bh)) {
314 unlock_buffer(bh);
315 return;
316 }
317 kstat.pgpgin++;
318 max_req = NR_REQUEST;
319 break;
320 case WRITEA:
321 rw_ahead = 1;
322 rw = WRITE;
323 case WRITE:
324 if (!buffer_dirty(bh)) {
325 unlock_buffer(bh);
326 return;
327 }
328
329
330
331
332
333 kstat.pgpgout++;
334 max_req = (NR_REQUEST * 2) / 3;
335 break;
336 default:
337 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
338 unlock_buffer(bh);
339 return;
340 }
341
342
343 cli();
344 down (&request_lock);
345
346
347
348
349
350 if (( major == IDE0_MAJOR
351 || major == IDE1_MAJOR
352 || major == MD_MAJOR
353 || major == FLOPPY_MAJOR
354 || major == SCSI_DISK_MAJOR
355 || major == SCSI_CDROM_MAJOR
356 || major == IDE2_MAJOR
357 || major == IDE3_MAJOR)
358 && (req = blk_dev[major].current_request))
359 {
360 if (major != SCSI_DISK_MAJOR &&
361 major != SCSI_CDROM_MAJOR &&
362 major != MD_MAJOR)
363 req = req->next;
364
365 while (req) {
366 if (req->rq_dev == bh->b_dev &&
367 !req->sem &&
368 req->cmd == rw &&
369 req->sector + req->nr_sectors == sector &&
370 req->nr_sectors < 244)
371 {
372 req->bhtail->b_reqnext = bh;
373 req->bhtail = bh;
374 req->nr_sectors += count;
375 mark_buffer_clean(bh);
376 up (&request_lock);
377 sti();
378 return;
379 }
380
381 if (req->rq_dev == bh->b_dev &&
382 !req->sem &&
383 req->cmd == rw &&
384 req->sector - count == sector &&
385 req->nr_sectors < 244)
386 {
387 req->nr_sectors += count;
388 bh->b_reqnext = req->bh;
389 req->buffer = bh->b_data;
390 req->current_nr_sectors = count;
391 req->sector = sector;
392 mark_buffer_clean(bh);
393 req->bh = bh;
394 up (&request_lock);
395 sti();
396 return;
397 }
398
399 req = req->next;
400 }
401 }
402
403 up (&request_lock);
404
405
406 req = get_request(max_req, bh->b_dev);
407 sti();
408
409
410 if (!req) {
411 if (rw_ahead) {
412 unlock_buffer(bh);
413 return;
414 }
415 req = __get_request_wait(max_req, bh->b_dev);
416 }
417
418
419 req->cmd = rw;
420 req->errors = 0;
421 req->sector = sector;
422 req->nr_sectors = count;
423 req->current_nr_sectors = count;
424 req->buffer = bh->b_data;
425 req->sem = NULL;
426 req->bh = bh;
427 req->bhtail = bh;
428 req->next = NULL;
429 add_request(major+blk_dev,req);
430 }
431
432 #ifdef CONFIG_BLK_DEV_MD
433
434 struct request *get_md_request (int max_req, kdev_t dev)
435 {
436 return (get_request_wait (max_req, dev));
437 }
438
439 #endif
440
441
442
443
444
445
446 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
447 {
448 unsigned int major = MAJOR(dev);
449 int block = page;
450
451 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
452 printk("Trying to read nonexistent block-device %s (%ld)\n",
453 kdevname(dev), page);
454 return;
455 }
456 switch (rw) {
457 case READ:
458 break;
459 case WRITE:
460 if (is_read_only(dev)) {
461 printk("Can't page to read-only device %s\n",
462 kdevname(dev));
463 return;
464 }
465 break;
466 default:
467 panic("ll_rw_page: bad block dev cmd, must be R/W");
468 }
469 if (mem_map[MAP_NR(buffer)].locked)
470 panic ("ll_rw_page: page already locked");
471 mem_map[MAP_NR(buffer)].locked = 1;
472 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
473 }
474
475
476
477
478
479 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
480 {
481 unsigned int major;
482 struct request plug;
483 int correct_size;
484 struct blk_dev_struct * dev;
485 int i;
486
487
488 while (!*bh) {
489 bh++;
490 if (--nr <= 0)
491 return;
492 };
493
494 dev = NULL;
495 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
496 dev = blk_dev + major;
497 if (!dev || !dev->request_fn) {
498 printk(
499 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
500 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
501 goto sorry;
502 }
503
504
505 correct_size = BLOCK_SIZE;
506 if (blksize_size[major]) {
507 i = blksize_size[major][MINOR(bh[0]->b_dev)];
508 if (i)
509 correct_size = i;
510 }
511
512
513 for (i = 0; i < nr; i++) {
514 if (bh[i] && bh[i]->b_size != correct_size) {
515 printk("ll_rw_block: device %s: "
516 "only %d-char blocks implemented (%lu)\n",
517 kdevname(bh[0]->b_dev),
518 correct_size, bh[i]->b_size);
519 goto sorry;
520 }
521 }
522
523 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
524 printk("Can't write to read-only device %s\n",
525 kdevname(bh[0]->b_dev));
526 goto sorry;
527 }
528
529
530
531
532
533
534 if (nr > 1)
535 plug_device(dev, &plug);
536 for (i = 0; i < nr; i++) {
537 if (bh[i]) {
538 set_bit(BH_Req, &bh[i]->b_state);
539
540
541 bh[i]->b_rdev = bh[i]->b_dev;
542
543 make_request(major, rw, bh[i]);
544 }
545 }
546 unplug_device(dev);
547 return;
548
549 sorry:
550 for (i = 0; i < nr; i++) {
551 if (bh[i]) {
552 clear_bit(BH_Dirty, &bh[i]->b_state);
553 clear_bit(BH_Uptodate, &bh[i]->b_state);
554 }
555 }
556 return;
557 }
558
559 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
560 {
561 int i, j;
562 int buffersize;
563 struct request * req[8];
564 unsigned int major = MAJOR(dev);
565 struct semaphore sem = MUTEX_LOCKED;
566
567 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
568 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
569 return;
570 }
571 switch (rw) {
572 case READ:
573 break;
574 case WRITE:
575 if (is_read_only(dev)) {
576 printk("Can't swap to read-only device %s\n",
577 kdevname(dev));
578 return;
579 }
580 break;
581 default:
582 panic("ll_rw_swap: bad block dev cmd, must be R/W");
583 }
584 buffersize = PAGE_SIZE / nb;
585
586 for (j=0, i=0; i<nb;)
587 {
588 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
589 {
590 if (j == 0) {
591 req[j] = get_request_wait(NR_REQUEST, dev);
592 } else {
593 cli();
594 req[j] = get_request(NR_REQUEST, dev);
595 sti();
596 if (req[j] == NULL)
597 break;
598 }
599 req[j]->cmd = rw;
600 req[j]->errors = 0;
601 req[j]->sector = (b[i] * buffersize) >> 9;
602 req[j]->nr_sectors = buffersize >> 9;
603 req[j]->current_nr_sectors = buffersize >> 9;
604 req[j]->buffer = buf;
605 req[j]->sem = &sem;
606 req[j]->bh = NULL;
607 req[j]->next = NULL;
608 add_request(major+blk_dev,req[j]);
609 }
610 while (j > 0) {
611 j--;
612 down(&sem);
613 }
614 }
615 }
616
617 int blk_dev_init(void)
618 {
619 struct request * req;
620 struct blk_dev_struct *dev;
621
622 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
623 dev->request_fn = NULL;
624 dev->current_request = NULL;
625 }
626
627 req = all_requests + NR_REQUEST;
628 while (--req >= all_requests) {
629 req->rq_status = RQ_INACTIVE;
630 req->next = NULL;
631 }
632 memset(ro_bits,0,sizeof(ro_bits));
633 #ifdef CONFIG_BLK_DEV_RAM
634 rd_init();
635 #endif
636 #ifdef CONFIG_BLK_DEV_LOOP
637 loop_init();
638 #endif
639 #ifdef CONFIG_BLK_DEV_IDE
640 ide_init();
641 #endif
642 #ifdef CONFIG_BLK_DEV_HD
643 hd_init();
644 #endif
645 #ifdef CONFIG_BLK_DEV_XD
646 xd_init();
647 #endif
648 #ifdef CONFIG_BLK_DEV_FD
649 floppy_init();
650 #else
651 outb_p(0xc, 0x3f2);
652 #endif
653 #ifdef CONFIG_CDI_INIT
654 cdi_init();
655 #endif CONFIG_CDI_INIT
656 #ifdef CONFIG_CDU31A
657 cdu31a_init();
658 #endif CONFIG_CDU31A
659 #ifdef CONFIG_MCD
660 mcd_init();
661 #endif CONFIG_MCD
662 #ifdef CONFIG_MCDX
663 mcdx_init();
664 #endif CONFIG_MCDX
665 #ifdef CONFIG_SBPCD
666 sbpcd_init();
667 #endif CONFIG_SBPCD
668 #ifdef CONFIG_AZTCD
669 aztcd_init();
670 #endif CONFIG_AZTCD
671 #ifdef CONFIG_CDU535
672 sony535_init();
673 #endif CONFIG_CDU535
674 #ifdef CONFIG_GSCD
675 gscd_init();
676 #endif CONFIG_GSCD
677 #ifdef CONFIG_CM206
678 cm206_init();
679 #endif
680 #ifdef CONFIG_OPTCD
681 optcd_init();
682 #endif CONFIG_OPTCD
683 #ifdef CONFIG_SJCD
684 sjcd_init();
685 #endif CONFIG_SJCD
686 #ifdef CONFIG_BLK_DEV_MD
687 md_init();
688 #endif CONFIG_BLK_DEV_MD
689 return 0;
690 }