This source file includes following definitions.
- unplug_device
- plug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33
34 DECLARE_TASK_QUEUE(tq_disk);
35
36
37
38
39 struct wait_queue * wait_for_request = NULL;
40
41
42
43 int read_ahead[MAX_BLKDEV] = {0, };
44
45
46
47
48
49 struct blk_dev_struct blk_dev[MAX_BLKDEV];
50
51
52
53
54
55
56
57
58
59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
60
61
62
63
64
65
66
67
68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
69
70
71
72
73
74
75
76
77
78
79
80
81
82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
83
84
85
86
87 static void unplug_device(void * data)
88 {
89 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
90 unsigned long flags;
91
92 save_flags(flags);
93 cli();
94 dev->current_request = dev->plug.next;
95 dev->plug.next = NULL;
96 (dev->request_fn)();
97 restore_flags(flags);
98 }
99
100
101
102
103
104
105
106
107 static inline void plug_device(struct blk_dev_struct * dev)
108 {
109 dev->current_request = &dev->plug;
110 queue_task_irq_off(&dev->plug_tq, &tq_disk);
111 }
112
113
114
115
116
117
118 static inline struct request * get_request(int n, kdev_t dev)
119 {
120 static struct request *prev_found = NULL, *prev_limit = NULL;
121 register struct request *req, *limit;
122
123 if (n <= 0)
124 panic("get_request(%d): impossible!\n", n);
125
126 limit = all_requests + n;
127 if (limit != prev_limit) {
128 prev_limit = limit;
129 prev_found = all_requests;
130 }
131 req = prev_found;
132 for (;;) {
133 req = ((req > all_requests) ? req : limit) - 1;
134 if (req->rq_status == RQ_INACTIVE)
135 break;
136 if (req == prev_found)
137 return NULL;
138 }
139 prev_found = req;
140 req->rq_status = RQ_ACTIVE;
141 req->rq_dev = dev;
142 return req;
143 }
144
145
146
147
148 static struct request * __get_request_wait(int n, kdev_t dev)
149 {
150 register struct request *req;
151 struct wait_queue wait = { current, NULL };
152
153 add_wait_queue(&wait_for_request, &wait);
154 for (;;) {
155 current->state = TASK_UNINTERRUPTIBLE;
156 cli();
157 req = get_request(n, dev);
158 sti();
159 if (req)
160 break;
161 run_task_queue(&tq_disk);
162 schedule();
163 }
164 remove_wait_queue(&wait_for_request, &wait);
165 current->state = TASK_RUNNING;
166 return req;
167 }
168
169 static inline struct request * get_request_wait(int n, kdev_t dev)
170 {
171 register struct request *req;
172
173 cli();
174 req = get_request(n, dev);
175 sti();
176 if (req)
177 return req;
178 return __get_request_wait(n, dev);
179 }
180
181
182
183 static long ro_bits[MAX_BLKDEV][8];
184
185 int is_read_only(kdev_t dev)
186 {
187 int minor,major;
188
189 major = MAJOR(dev);
190 minor = MINOR(dev);
191 if (major < 0 || major >= MAX_BLKDEV) return 0;
192 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
193 }
194
195 void set_device_ro(kdev_t dev,int flag)
196 {
197 int minor,major;
198
199 major = MAJOR(dev);
200 minor = MINOR(dev);
201 if (major < 0 || major >= MAX_BLKDEV) return;
202 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
203 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
204 }
205
206 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
207 {
208 kstat.dk_drive[disk_index]++;
209 if (cmd == READ) {
210 kstat.dk_drive_rio[disk_index]++;
211 kstat.dk_drive_rblk[disk_index] += nr_sectors;
212 }
213 else if (cmd == WRITE) {
214 kstat.dk_drive_wio[disk_index]++;
215 kstat.dk_drive_wblk[disk_index] += nr_sectors;
216 } else
217 printk("drive_stat_acct: cmd not R/W?\n");
218 }
219
220
221
222
223
224
225
226
227
228
229 struct semaphore request_lock = MUTEX;
230
231 void add_request(struct blk_dev_struct * dev, struct request * req)
232 {
233 struct request * tmp;
234 short disk_index;
235
236 down (&request_lock);
237 switch (MAJOR(req->rq_dev)) {
238 case SCSI_DISK_MAJOR:
239 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
240 if (disk_index < 4)
241 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
242 break;
243 case IDE0_MAJOR:
244 case XT_DISK_MAJOR:
245 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
246 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
247 break;
248 case IDE1_MAJOR:
249 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
250 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
251 default:
252 break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 up (&request_lock);
262 (dev->request_fn)();
263 sti();
264 return;
265 }
266 for ( ; tmp->next ; tmp = tmp->next) {
267 if ((IN_ORDER(tmp,req) ||
268 !IN_ORDER(tmp,tmp->next)) &&
269 IN_ORDER(req,tmp->next))
270 break;
271 }
272 req->next = tmp->next;
273 tmp->next = req;
274
275 up (&request_lock);
276
277 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
278 (dev->request_fn)();
279
280 sti();
281 }
282
283 static void make_request(int major,int rw, struct buffer_head * bh)
284 {
285 unsigned int sector, count;
286 struct request * req;
287 int rw_ahead, max_req;
288
289 count = bh->b_size >> 9;
290 sector = bh->b_blocknr * count;
291 if (blk_size[major])
292 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
293 bh->b_state = 0;
294 printk("attempt to access beyond end of device\n");
295 printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
296 rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
297 return;
298 }
299
300 if (buffer_locked(bh))
301 return;
302
303 lock_buffer(bh);
304
305 rw_ahead = 0;
306 switch (rw) {
307 case READA:
308 rw_ahead = 1;
309 rw = READ;
310 case READ:
311 if (buffer_uptodate(bh)) {
312 unlock_buffer(bh);
313 return;
314 }
315 kstat.pgpgin++;
316 max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST;
317 break;
318 case WRITEA:
319 rw_ahead = 1;
320 rw = WRITE;
321 case WRITE:
322 if (!buffer_dirty(bh)) {
323 unlock_buffer(bh);
324 return;
325 }
326
327
328
329
330
331 kstat.pgpgout++;
332 max_req = (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
333 break;
334 default:
335 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
336 unlock_buffer(bh);
337 return;
338 }
339
340
341 down (&request_lock);
342
343
344
345
346 cli();
347 req = blk_dev[major].current_request;
348 if (!req) {
349
350 if (major != MD_MAJOR && major != LOOP_MAJOR)
351 plug_device(blk_dev + major);
352 } else switch (major) {
353 case IDE0_MAJOR:
354 case IDE1_MAJOR:
355 case FLOPPY_MAJOR:
356 case IDE2_MAJOR:
357 case IDE3_MAJOR:
358
359
360
361
362
363
364
365
366
367 req = req->next;
368 if (!req)
369 break;
370
371
372 case SCSI_DISK_MAJOR:
373 case SCSI_CDROM_MAJOR:
374 case MD_MAJOR:
375
376 do {
377 if (req->sem)
378 continue;
379 if (req->cmd != rw)
380 continue;
381 if (req->nr_sectors >= 244)
382 continue;
383 if (req->rq_dev != bh->b_dev)
384 continue;
385
386 if (req->sector + req->nr_sectors == sector) {
387 req->bhtail->b_reqnext = bh;
388 req->bhtail = bh;
389
390 } else if (req->sector - count == sector) {
391 bh->b_reqnext = req->bh;
392 req->bh = bh;
393 req->buffer = bh->b_data;
394 req->current_nr_sectors = count;
395 req->sector = sector;
396 } else
397 continue;
398
399 req->nr_sectors += count;
400 mark_buffer_clean(bh);
401 up (&request_lock);
402 sti();
403 return;
404 } while ((req = req->next) != NULL);
405 }
406
407 up (&request_lock);
408
409
410 req = get_request(max_req, bh->b_dev);
411 sti();
412
413
414 if (!req) {
415 if (rw_ahead) {
416 unlock_buffer(bh);
417 return;
418 }
419 req = __get_request_wait(max_req, bh->b_dev);
420 }
421
422
423 req->cmd = rw;
424 req->errors = 0;
425 req->sector = sector;
426 req->nr_sectors = count;
427 req->current_nr_sectors = count;
428 req->buffer = bh->b_data;
429 req->sem = NULL;
430 req->bh = bh;
431 req->bhtail = bh;
432 req->next = NULL;
433 add_request(major+blk_dev,req);
434 }
435
436 #ifdef CONFIG_BLK_DEV_MD
437
438 struct request *get_md_request (int max_req, kdev_t dev)
439 {
440 return (get_request_wait (max_req, dev));
441 }
442
443 #endif
444
445
446
447
448
449 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
450 {
451 unsigned int major;
452 int correct_size;
453 struct blk_dev_struct * dev;
454 int i;
455
456
457 while (!*bh) {
458 bh++;
459 if (--nr <= 0)
460 return;
461 };
462
463 dev = NULL;
464 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
465 dev = blk_dev + major;
466 if (!dev || !dev->request_fn) {
467 printk(
468 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
469 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
470 goto sorry;
471 }
472
473
474 correct_size = BLOCK_SIZE;
475 if (blksize_size[major]) {
476 i = blksize_size[major][MINOR(bh[0]->b_dev)];
477 if (i)
478 correct_size = i;
479 }
480
481
482 for (i = 0; i < nr; i++) {
483 if (bh[i] && bh[i]->b_size != correct_size) {
484 printk("ll_rw_block: device %s: "
485 "only %d-char blocks implemented (%lu)\n",
486 kdevname(bh[0]->b_dev),
487 correct_size, bh[i]->b_size);
488 goto sorry;
489 }
490 }
491
492 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
493 printk("Can't write to read-only device %s\n",
494 kdevname(bh[0]->b_dev));
495 goto sorry;
496 }
497
498 for (i = 0; i < nr; i++) {
499 if (bh[i]) {
500 set_bit(BH_Req, &bh[i]->b_state);
501
502
503 bh[i]->b_rdev = bh[i]->b_dev;
504
505 make_request(major, rw, bh[i]);
506 }
507 }
508 return;
509
510 sorry:
511 for (i = 0; i < nr; i++) {
512 if (bh[i]) {
513 clear_bit(BH_Dirty, &bh[i]->b_state);
514 clear_bit(BH_Uptodate, &bh[i]->b_state);
515 }
516 }
517 return;
518 }
519
520 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
521 {
522 int i, j;
523 int buffersize;
524 struct request * req[8];
525 unsigned int major = MAJOR(dev);
526 struct semaphore sem = MUTEX_LOCKED;
527
528 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
529 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
530 return;
531 }
532 switch (rw) {
533 case READ:
534 break;
535 case WRITE:
536 if (is_read_only(dev)) {
537 printk("Can't swap to read-only device %s\n",
538 kdevname(dev));
539 return;
540 }
541 break;
542 default:
543 panic("ll_rw_swap: bad block dev cmd, must be R/W");
544 }
545 buffersize = PAGE_SIZE / nb;
546
547 for (j=0, i=0; i<nb;)
548 {
549 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
550 {
551 if (j == 0) {
552 req[j] = get_request_wait(NR_REQUEST, dev);
553 } else {
554 cli();
555 req[j] = get_request(NR_REQUEST, dev);
556 sti();
557 if (req[j] == NULL)
558 break;
559 }
560 req[j]->cmd = rw;
561 req[j]->errors = 0;
562 req[j]->sector = (b[i] * buffersize) >> 9;
563 req[j]->nr_sectors = buffersize >> 9;
564 req[j]->current_nr_sectors = buffersize >> 9;
565 req[j]->buffer = buf;
566 req[j]->sem = &sem;
567 req[j]->bh = NULL;
568 req[j]->next = NULL;
569 add_request(major+blk_dev,req[j]);
570 }
571 run_task_queue(&tq_disk);
572 while (j > 0) {
573 j--;
574 down(&sem);
575 }
576 }
577 }
578
579 int blk_dev_init(void)
580 {
581 struct request * req;
582 struct blk_dev_struct *dev;
583
584 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
585 dev->request_fn = NULL;
586 dev->current_request = NULL;
587 dev->plug.rq_status = RQ_INACTIVE;
588 dev->plug.cmd = -1;
589 dev->plug.next = NULL;
590 dev->plug_tq.routine = &unplug_device;
591 dev->plug_tq.data = dev;
592 }
593
594 req = all_requests + NR_REQUEST;
595 while (--req >= all_requests) {
596 req->rq_status = RQ_INACTIVE;
597 req->next = NULL;
598 }
599 memset(ro_bits,0,sizeof(ro_bits));
600 #ifdef CONFIG_BLK_DEV_RAM
601 rd_init();
602 #endif
603 #ifdef CONFIG_BLK_DEV_LOOP
604 loop_init();
605 #endif
606 #ifdef CONFIG_BLK_DEV_IDE
607 ide_init();
608 #endif
609 #ifdef CONFIG_BLK_DEV_HD
610 hd_init();
611 #endif
612 #ifdef CONFIG_BLK_DEV_XD
613 xd_init();
614 #endif
615 #ifdef CONFIG_BLK_DEV_FD
616 floppy_init();
617 #else
618 outb_p(0xc, 0x3f2);
619 #endif
620 #ifdef CONFIG_CDI_INIT
621 cdi_init();
622 #endif CONFIG_CDI_INIT
623 #ifdef CONFIG_CDU31A
624 cdu31a_init();
625 #endif CONFIG_CDU31A
626 #ifdef CONFIG_MCD
627 mcd_init();
628 #endif CONFIG_MCD
629 #ifdef CONFIG_MCDX
630 mcdx_init();
631 #endif CONFIG_MCDX
632 #ifdef CONFIG_SBPCD
633 sbpcd_init();
634 #endif CONFIG_SBPCD
635 #ifdef CONFIG_AZTCD
636 aztcd_init();
637 #endif CONFIG_AZTCD
638 #ifdef CONFIG_CDU535
639 sony535_init();
640 #endif CONFIG_CDU535
641 #ifdef CONFIG_GSCD
642 gscd_init();
643 #endif CONFIG_GSCD
644 #ifdef CONFIG_CM206
645 cm206_init();
646 #endif
647 #ifdef CONFIG_OPTCD
648 optcd_init();
649 #endif CONFIG_OPTCD
650 #ifdef CONFIG_SJCD
651 sjcd_init();
652 #endif CONFIG_SJCD
653 #ifdef CONFIG_BLK_DEV_MD
654 md_init();
655 #endif CONFIG_BLK_DEV_MD
656 return 0;
657 }