This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->rq_status = RQ_INACTIVE;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static inline void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, kdev_t dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->rq_status == RQ_INACTIVE)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->rq_status = RQ_ACTIVE;
167 req->rq_dev = dev;
168 return req;
169 }
170
171
172
173
174 static struct request * __get_request_wait(int n, kdev_t dev)
175 {
176 register struct request *req;
177 struct wait_queue wait = { current, NULL };
178
179 add_wait_queue(&wait_for_request, &wait);
180 for (;;) {
181 unplug_device(MAJOR(dev)+blk_dev);
182 current->state = TASK_UNINTERRUPTIBLE;
183 cli();
184 req = get_request(n, dev);
185 sti();
186 if (req)
187 break;
188 schedule();
189 }
190 remove_wait_queue(&wait_for_request, &wait);
191 current->state = TASK_RUNNING;
192 return req;
193 }
194
195 static inline struct request * get_request_wait(int n, kdev_t dev)
196 {
197 register struct request *req;
198
199 cli();
200 req = get_request(n, dev);
201 sti();
202 if (req)
203 return req;
204 return __get_request_wait(n, dev);
205 }
206
207
208
209 static long ro_bits[MAX_BLKDEV][8];
210
211 int is_read_only(kdev_t dev)
212 {
213 int minor,major;
214
215 major = MAJOR(dev);
216 minor = MINOR(dev);
217 if (major < 0 || major >= MAX_BLKDEV) return 0;
218 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
219 }
220
221 void set_device_ro(kdev_t dev,int flag)
222 {
223 int minor,major;
224
225 major = MAJOR(dev);
226 minor = MINOR(dev);
227 if (major < 0 || major >= MAX_BLKDEV) return;
228 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
229 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
230 }
231
232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
233 {
234 kstat.dk_drive[disk_index]++;
235 if (cmd == READ) {
236 kstat.dk_drive_rio[disk_index]++;
237 kstat.dk_drive_rblk[disk_index] += nr_sectors;
238 }
239 else if (cmd == WRITE) {
240 kstat.dk_drive_wio[disk_index]++;
241 kstat.dk_drive_wblk[disk_index] += nr_sectors;
242 } else
243 printk("drive_stat_acct: cmd not R/W?\n");
244 }
245
246
247
248
249
250
251
252
253
254 static void add_request(struct blk_dev_struct * dev, struct request * req)
255 {
256 struct request * tmp;
257 short disk_index;
258
259 switch (MAJOR(req->rq_dev)) {
260 case SCSI_DISK_MAJOR:
261 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
262 if (disk_index < 4)
263 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
264 break;
265 case IDE0_MAJOR:
266 case XT_DISK_MAJOR:
267 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
268 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
269 break;
270 case IDE1_MAJOR:
271 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
272 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
273 default:
274 break;
275 }
276
277 req->next = NULL;
278 cli();
279 if (req->bh)
280 mark_buffer_clean(req->bh);
281 if (!(tmp = dev->current_request)) {
282 dev->current_request = req;
283 (dev->request_fn)();
284 sti();
285 return;
286 }
287 for ( ; tmp->next ; tmp = tmp->next) {
288 if ((IN_ORDER(tmp,req) ||
289 !IN_ORDER(tmp,tmp->next)) &&
290 IN_ORDER(req,tmp->next))
291 break;
292 }
293 req->next = tmp->next;
294 tmp->next = req;
295
296
297 if (scsi_major(MAJOR(req->rq_dev)))
298 (dev->request_fn)();
299
300 sti();
301 }
302
303 static void make_request(int major,int rw, struct buffer_head * bh)
304 {
305 unsigned int sector, count;
306 struct request * req;
307 int rw_ahead, max_req;
308
309 count = bh->b_size >> 9;
310 sector = bh->b_blocknr * count;
311 if (blk_size[major])
312 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
313 bh->b_state = 0;
314 printk("attempt to access beyond end of device\n");
315 return;
316 }
317
318 if (buffer_locked(bh))
319 return;
320
321 lock_buffer(bh);
322
323 rw_ahead = 0;
324 switch (rw) {
325 case READA:
326 rw_ahead = 1;
327 rw = READ;
328 case READ:
329 if (buffer_uptodate(bh)) {
330 unlock_buffer(bh);
331 return;
332 }
333 kstat.pgpgin++;
334 max_req = NR_REQUEST;
335 break;
336 case WRITEA:
337 rw_ahead = 1;
338 rw = WRITE;
339 case WRITE:
340 if (!buffer_dirty(bh)) {
341 unlock_buffer(bh);
342 return;
343 }
344
345
346
347
348
349 kstat.pgpgout++;
350 max_req = (NR_REQUEST * 2) / 3;
351 break;
352 default:
353 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
354 unlock_buffer(bh);
355 return;
356 }
357
358
359 cli();
360
361
362
363
364
365 if (( major == IDE0_MAJOR
366 || major == IDE1_MAJOR
367 || major == FLOPPY_MAJOR
368 || major == SCSI_DISK_MAJOR
369 || major == SCSI_CDROM_MAJOR
370 || major == IDE2_MAJOR
371 || major == IDE3_MAJOR)
372 && (req = blk_dev[major].current_request))
373 {
374 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
375 req = req->next;
376 while (req) {
377 if (req->rq_dev == bh->b_dev &&
378 !req->sem &&
379 req->cmd == rw &&
380 req->sector + req->nr_sectors == sector &&
381 req->nr_sectors < 244)
382 {
383 req->bhtail->b_reqnext = bh;
384 req->bhtail = bh;
385 req->nr_sectors += count;
386 mark_buffer_clean(bh);
387 sti();
388 return;
389 }
390
391 if (req->rq_dev == bh->b_dev &&
392 !req->sem &&
393 req->cmd == rw &&
394 req->sector - count == sector &&
395 req->nr_sectors < 244)
396 {
397 req->nr_sectors += count;
398 bh->b_reqnext = req->bh;
399 req->buffer = bh->b_data;
400 req->current_nr_sectors = count;
401 req->sector = sector;
402 mark_buffer_clean(bh);
403 req->bh = bh;
404 sti();
405 return;
406 }
407
408 req = req->next;
409 }
410 }
411
412
413 req = get_request(max_req, bh->b_dev);
414 sti();
415
416
417 if (!req) {
418 if (rw_ahead) {
419 unlock_buffer(bh);
420 return;
421 }
422 req = __get_request_wait(max_req, bh->b_dev);
423 }
424
425
426 req->cmd = rw;
427 req->errors = 0;
428 req->sector = sector;
429 req->nr_sectors = count;
430 req->current_nr_sectors = count;
431 req->buffer = bh->b_data;
432 req->sem = NULL;
433 req->bh = bh;
434 req->bhtail = bh;
435 req->next = NULL;
436 add_request(major+blk_dev,req);
437 }
438
439 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
440 {
441 struct request * req;
442 unsigned int major = MAJOR(dev);
443 unsigned long sector = page * (PAGE_SIZE / 512);
444 struct semaphore sem = MUTEX_LOCKED;
445
446 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
447 printk("Trying to read nonexistent block-device %s (%ld)\n",
448 kdevname(dev), sector);
449 return;
450 }
451 switch (rw) {
452 case READ:
453 break;
454 case WRITE:
455 if (is_read_only(dev)) {
456 printk("Can't page to read-only device %s\n",
457 kdevname(dev));
458 return;
459 }
460 break;
461 default:
462 panic("ll_rw_page: bad block dev cmd, must be R/W");
463 }
464 req = get_request_wait(NR_REQUEST, dev);
465
466 req->cmd = rw;
467 req->errors = 0;
468 req->sector = sector;
469 req->nr_sectors = PAGE_SIZE / 512;
470 req->current_nr_sectors = PAGE_SIZE / 512;
471 req->buffer = buffer;
472 req->sem = &sem;
473 req->bh = NULL;
474 req->next = NULL;
475 add_request(major+blk_dev,req);
476 down(&sem);
477 }
478
479
480
481
482
483 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
484 {
485 unsigned int major;
486 struct request plug;
487 int correct_size;
488 struct blk_dev_struct * dev;
489 int i;
490
491
492 while (!*bh) {
493 bh++;
494 if (--nr <= 0)
495 return;
496 };
497
498 dev = NULL;
499 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
500 dev = blk_dev + major;
501 if (!dev || !dev->request_fn) {
502 printk(
503 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
504 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
505 goto sorry;
506 }
507
508
509 correct_size = BLOCK_SIZE;
510 if (blksize_size[major]) {
511 i = blksize_size[major][MINOR(bh[0]->b_dev)];
512 if (i)
513 correct_size = i;
514 }
515
516
517 for (i = 0; i < nr; i++) {
518 if (bh[i] && bh[i]->b_size != correct_size) {
519 printk(
520 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
521 correct_size, bh[i]->b_size);
522 goto sorry;
523 }
524 }
525
526 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
527 printk("Can't write to read-only device %s\n",
528 kdevname(bh[0]->b_dev));
529 goto sorry;
530 }
531
532
533
534
535
536
537 if (nr > 1)
538 plug_device(dev, &plug);
539 for (i = 0; i < nr; i++) {
540 if (bh[i]) {
541 set_bit(BH_Req, &bh[i]->b_state);
542 make_request(major, rw, bh[i]);
543 }
544 }
545 unplug_device(dev);
546 return;
547
548 sorry:
549 for (i = 0; i < nr; i++) {
550 if (bh[i]) {
551 clear_bit(BH_Dirty, &bh[i]->b_state);
552 clear_bit(BH_Uptodate, &bh[i]->b_state);
553 }
554 }
555 return;
556 }
557
558 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
559 {
560 int i, j;
561 int buffersize;
562 struct request * req[8];
563 unsigned int major = MAJOR(dev);
564 struct semaphore sem = MUTEX_LOCKED;
565
566 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
567 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
568 return;
569 }
570 switch (rw) {
571 case READ:
572 break;
573 case WRITE:
574 if (is_read_only(dev)) {
575 printk("Can't swap to read-only device %s\n",
576 kdevname(dev));
577 return;
578 }
579 break;
580 default:
581 panic("ll_rw_swap: bad block dev cmd, must be R/W");
582 }
583 buffersize = PAGE_SIZE / nb;
584
585 for (j=0, i=0; i<nb;)
586 {
587 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
588 {
589 if (j == 0) {
590 req[j] = get_request_wait(NR_REQUEST, dev);
591 } else {
592 cli();
593 req[j] = get_request(NR_REQUEST, dev);
594 sti();
595 if (req[j] == NULL)
596 break;
597 }
598 req[j]->cmd = rw;
599 req[j]->errors = 0;
600 req[j]->sector = (b[i] * buffersize) >> 9;
601 req[j]->nr_sectors = buffersize >> 9;
602 req[j]->current_nr_sectors = buffersize >> 9;
603 req[j]->buffer = buf;
604 req[j]->sem = &sem;
605 req[j]->bh = NULL;
606 req[j]->next = NULL;
607 add_request(major+blk_dev,req[j]);
608 }
609 while (j > 0) {
610 j--;
611 down(&sem);
612 }
613 }
614 }
615
616 int blk_dev_init(void)
617 {
618 struct request * req;
619
620 req = all_requests + NR_REQUEST;
621 while (--req >= all_requests) {
622 req->rq_status = RQ_INACTIVE;
623 req->next = NULL;
624 }
625 memset(ro_bits,0,sizeof(ro_bits));
626 #ifdef CONFIG_BLK_DEV_RAM
627 rd_init();
628 #endif
629 #ifdef CONFIG_BLK_DEV_IDE
630 ide_init();
631 #endif
632 #ifdef CONFIG_BLK_DEV_HD
633 hd_init();
634 #endif
635 #ifdef CONFIG_BLK_DEV_XD
636 xd_init();
637 #endif
638 #ifdef CONFIG_BLK_DEV_FD
639 floppy_init();
640 #else
641 outb_p(0xc, 0x3f2);
642 #endif
643 #ifdef CONFIG_CDU31A
644 cdu31a_init();
645 #endif CONFIG_CDU31A
646 #ifdef CONFIG_MCD
647 mcd_init();
648 #endif CONFIG_MCD
649 #ifdef CONFIG_MCDX
650 mcdx_init();
651 #endif CONFIG_MCDX
652 #ifdef CONFIG_SBPCD
653 sbpcd_init();
654 #endif CONFIG_SBPCD
655 #ifdef CONFIG_AZTCD
656 aztcd_init();
657 #endif CONFIG_AZTCD
658 #ifdef CONFIG_CDU535
659 sony535_init();
660 #endif CONFIG_CDU535
661 #ifdef CONFIG_GSCD
662 gscd_init();
663 #endif CONFIG_GSCD
664 #ifdef CONFIG_CM206
665 cm206_init();
666 #endif
667 #ifdef CONFIG_OPTCD
668 optcd_init();
669 #endif CONFIG_OPTCD
670 #ifdef CONFIG_SJCD
671 sjcd_init();
672 #endif CONFIG_SJCD
673 return 0;
674 }