This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- get_md_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV];
44
45
46
47
48
49
50
51
52
53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
54
55
56
57
58
59
60
61
62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71
72
73
74
75
76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
77
78
79
80
81
82
83 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
84 {
85 unsigned long flags;
86
87 plug->rq_status = RQ_INACTIVE;
88 plug->cmd = -1;
89 plug->next = NULL;
90 save_flags(flags);
91 cli();
92 if (!dev->current_request)
93 dev->current_request = plug;
94 restore_flags(flags);
95 }
96
97
98
99
100 static inline void unplug_device(struct blk_dev_struct * dev)
101 {
102 struct request * req;
103 unsigned long flags;
104
105 save_flags(flags);
106 cli();
107 req = dev->current_request;
108 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
109 dev->current_request = req->next;
110 (dev->request_fn)();
111 }
112 restore_flags(flags);
113 }
114
115
116
117
118
119
120 static inline struct request * get_request(int n, kdev_t dev)
121 {
122 static struct request *prev_found = NULL, *prev_limit = NULL;
123 register struct request *req, *limit;
124
125 if (n <= 0)
126 panic("get_request(%d): impossible!\n", n);
127
128 limit = all_requests + n;
129 if (limit != prev_limit) {
130 prev_limit = limit;
131 prev_found = all_requests;
132 }
133 req = prev_found;
134 for (;;) {
135 req = ((req > all_requests) ? req : limit) - 1;
136 if (req->rq_status == RQ_INACTIVE)
137 break;
138 if (req == prev_found)
139 return NULL;
140 }
141 prev_found = req;
142 req->rq_status = RQ_ACTIVE;
143 req->rq_dev = dev;
144 return req;
145 }
146
147
148
149
150 static struct request * __get_request_wait(int n, kdev_t dev)
151 {
152 register struct request *req;
153 struct wait_queue wait = { current, NULL };
154
155 add_wait_queue(&wait_for_request, &wait);
156 for (;;) {
157 unplug_device(MAJOR(dev)+blk_dev);
158 current->state = TASK_UNINTERRUPTIBLE;
159 cli();
160 req = get_request(n, dev);
161 sti();
162 if (req)
163 break;
164 schedule();
165 }
166 remove_wait_queue(&wait_for_request, &wait);
167 current->state = TASK_RUNNING;
168 return req;
169 }
170
171 static inline struct request * get_request_wait(int n, kdev_t dev)
172 {
173 register struct request *req;
174
175 cli();
176 req = get_request(n, dev);
177 sti();
178 if (req)
179 return req;
180 return __get_request_wait(n, dev);
181 }
182
183
184
185 static long ro_bits[MAX_BLKDEV][8];
186
187 int is_read_only(kdev_t dev)
188 {
189 int minor,major;
190
191 major = MAJOR(dev);
192 minor = MINOR(dev);
193 if (major < 0 || major >= MAX_BLKDEV) return 0;
194 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
195 }
196
197 void set_device_ro(kdev_t dev,int flag)
198 {
199 int minor,major;
200
201 major = MAJOR(dev);
202 minor = MINOR(dev);
203 if (major < 0 || major >= MAX_BLKDEV) return;
204 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
205 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
206 }
207
208 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
209 {
210 kstat.dk_drive[disk_index]++;
211 if (cmd == READ) {
212 kstat.dk_drive_rio[disk_index]++;
213 kstat.dk_drive_rblk[disk_index] += nr_sectors;
214 }
215 else if (cmd == WRITE) {
216 kstat.dk_drive_wio[disk_index]++;
217 kstat.dk_drive_wblk[disk_index] += nr_sectors;
218 } else
219 printk("drive_stat_acct: cmd not R/W?\n");
220 }
221
222
223
224
225
226
227
228
229
230
231 struct semaphore request_lock = MUTEX;
232
233 void add_request(struct blk_dev_struct * dev, struct request * req)
234 {
235 struct request * tmp;
236 short disk_index;
237
238 down (&request_lock);
239 switch (MAJOR(req->rq_dev)) {
240 case SCSI_DISK_MAJOR:
241 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
242 if (disk_index < 4)
243 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
244 break;
245 case IDE0_MAJOR:
246 case XT_DISK_MAJOR:
247 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
248 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
249 break;
250 case IDE1_MAJOR:
251 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
252 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
253 default:
254 break;
255 }
256
257 req->next = NULL;
258 cli();
259 if (req->bh && req->bh->b_dev==req->bh->b_rdev)
260 mark_buffer_clean(req->bh);
261 if (!(tmp = dev->current_request)) {
262 dev->current_request = req;
263 up (&request_lock);
264 (dev->request_fn)();
265 sti();
266 return;
267 }
268 for ( ; tmp->next ; tmp = tmp->next) {
269 if ((IN_ORDER(tmp,req) ||
270 !IN_ORDER(tmp,tmp->next)) &&
271 IN_ORDER(req,tmp->next))
272 break;
273 }
274 req->next = tmp->next;
275 tmp->next = req;
276
277 up (&request_lock);
278
279 if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
280 (dev->request_fn)();
281
282 sti();
283 }
284
285 static void make_request(int major,int rw, struct buffer_head * bh)
286 {
287 unsigned int sector, count;
288 struct request * req;
289 int rw_ahead, max_req;
290
291 count = bh->b_size >> 9;
292 sector = bh->b_blocknr * count;
293 if (blk_size[major])
294 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
295 bh->b_state = 0;
296 printk("attempt to access beyond end of device\n");
297 return;
298 }
299
300 if (buffer_locked(bh))
301 return;
302
303 lock_buffer(bh);
304
305 rw_ahead = 0;
306 switch (rw) {
307 case READA:
308 rw_ahead = 1;
309 rw = READ;
310 case READ:
311 if (buffer_uptodate(bh)) {
312 unlock_buffer(bh);
313 return;
314 }
315 kstat.pgpgin++;
316 max_req = NR_REQUEST;
317 break;
318 case WRITEA:
319 rw_ahead = 1;
320 rw = WRITE;
321 case WRITE:
322 if (!buffer_dirty(bh)) {
323 unlock_buffer(bh);
324 return;
325 }
326
327
328
329
330
331 kstat.pgpgout++;
332 max_req = (NR_REQUEST * 2) / 3;
333 break;
334 default:
335 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
336 unlock_buffer(bh);
337 return;
338 }
339
340
341 cli();
342 down (&request_lock);
343
344
345
346
347
348 if (( major == IDE0_MAJOR
349 || major == IDE1_MAJOR
350 || major == MD_MAJOR
351 || major == FLOPPY_MAJOR
352 || major == SCSI_DISK_MAJOR
353 || major == SCSI_CDROM_MAJOR
354 || major == IDE2_MAJOR
355 || major == IDE3_MAJOR)
356 && (req = blk_dev[major].current_request))
357 {
358 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
359 req = req->next;
360
361 while (req) {
362 if (req->rq_dev == bh->b_dev &&
363 !req->sem &&
364 req->cmd == rw &&
365 req->sector + req->nr_sectors == sector &&
366 req->nr_sectors < 244)
367 {
368 req->bhtail->b_reqnext = bh;
369 req->bhtail = bh;
370 req->nr_sectors += count;
371 mark_buffer_clean(bh);
372 up (&request_lock);
373 sti();
374 return;
375 }
376
377 if (req->rq_dev == bh->b_dev &&
378 !req->sem &&
379 req->cmd == rw &&
380 req->sector - count == sector &&
381 req->nr_sectors < 244)
382 {
383 req->nr_sectors += count;
384 bh->b_reqnext = req->bh;
385 req->buffer = bh->b_data;
386 req->current_nr_sectors = count;
387 req->sector = sector;
388 mark_buffer_clean(bh);
389 req->bh = bh;
390 up (&request_lock);
391 sti();
392 return;
393 }
394
395 req = req->next;
396 }
397 }
398
399 up (&request_lock);
400
401
402 req = get_request(max_req, bh->b_dev);
403 sti();
404
405
406 if (!req) {
407 if (rw_ahead) {
408 unlock_buffer(bh);
409 return;
410 }
411 req = __get_request_wait(max_req, bh->b_dev);
412 }
413
414
415 req->cmd = rw;
416 req->errors = 0;
417 req->sector = sector;
418 req->nr_sectors = count;
419 req->current_nr_sectors = count;
420 req->buffer = bh->b_data;
421 req->sem = NULL;
422 req->bh = bh;
423 req->bhtail = bh;
424 req->next = NULL;
425 add_request(major+blk_dev,req);
426 }
427
428 #ifdef CONFIG_BLK_DEV_MD
429
430 struct request *get_md_request (int max_req, kdev_t dev)
431 {
432 return (get_request_wait (max_req, dev));
433 }
434
435 #endif
436
437
438
439
440
441
442 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
443 {
444 unsigned int major = MAJOR(dev);
445 int block = page;
446
447 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
448 printk("Trying to read nonexistent block-device %s (%ld)\n",
449 kdevname(dev), page);
450 return;
451 }
452 switch (rw) {
453 case READ:
454 break;
455 case WRITE:
456 if (is_read_only(dev)) {
457 printk("Can't page to read-only device %s\n",
458 kdevname(dev));
459 return;
460 }
461 break;
462 default:
463 panic("ll_rw_page: bad block dev cmd, must be R/W");
464 }
465 if (mem_map[MAP_NR(buffer)].locked)
466 panic ("ll_rw_page: page already locked");
467 mem_map[MAP_NR(buffer)].locked = 1;
468 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
469 }
470
471
472
473
474
475 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
476 {
477 unsigned int major;
478 struct request plug;
479 int correct_size;
480 struct blk_dev_struct * dev;
481 int i;
482
483
484 while (!*bh) {
485 bh++;
486 if (--nr <= 0)
487 return;
488 };
489
490 dev = NULL;
491 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
492 dev = blk_dev + major;
493 if (!dev || !dev->request_fn) {
494 printk(
495 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
496 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
497 goto sorry;
498 }
499
500
501 correct_size = BLOCK_SIZE;
502 if (blksize_size[major]) {
503 i = blksize_size[major][MINOR(bh[0]->b_dev)];
504 if (i)
505 correct_size = i;
506 }
507
508
509 for (i = 0; i < nr; i++) {
510 if (bh[i] && bh[i]->b_size != correct_size) {
511 printk("ll_rw_block: device %s: "
512 "only %d-char blocks implemented (%lu)\n",
513 kdevname(bh[0]->b_dev),
514 correct_size, bh[i]->b_size);
515 goto sorry;
516 }
517 }
518
519 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
520 printk("Can't write to read-only device %s\n",
521 kdevname(bh[0]->b_dev));
522 goto sorry;
523 }
524
525
526
527
528
529
530 if (nr > 1)
531 plug_device(dev, &plug);
532 for (i = 0; i < nr; i++) {
533 if (bh[i]) {
534 set_bit(BH_Req, &bh[i]->b_state);
535
536
537 bh[i]->b_rdev = bh[i]->b_dev;
538
539 make_request(major, rw, bh[i]);
540 }
541 }
542 unplug_device(dev);
543 return;
544
545 sorry:
546 for (i = 0; i < nr; i++) {
547 if (bh[i]) {
548 clear_bit(BH_Dirty, &bh[i]->b_state);
549 clear_bit(BH_Uptodate, &bh[i]->b_state);
550 }
551 }
552 return;
553 }
554
555 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
556 {
557 int i, j;
558 int buffersize;
559 struct request * req[8];
560 unsigned int major = MAJOR(dev);
561 struct semaphore sem = MUTEX_LOCKED;
562
563 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
564 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
565 return;
566 }
567 switch (rw) {
568 case READ:
569 break;
570 case WRITE:
571 if (is_read_only(dev)) {
572 printk("Can't swap to read-only device %s\n",
573 kdevname(dev));
574 return;
575 }
576 break;
577 default:
578 panic("ll_rw_swap: bad block dev cmd, must be R/W");
579 }
580 buffersize = PAGE_SIZE / nb;
581
582 for (j=0, i=0; i<nb;)
583 {
584 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
585 {
586 if (j == 0) {
587 req[j] = get_request_wait(NR_REQUEST, dev);
588 } else {
589 cli();
590 req[j] = get_request(NR_REQUEST, dev);
591 sti();
592 if (req[j] == NULL)
593 break;
594 }
595 req[j]->cmd = rw;
596 req[j]->errors = 0;
597 req[j]->sector = (b[i] * buffersize) >> 9;
598 req[j]->nr_sectors = buffersize >> 9;
599 req[j]->current_nr_sectors = buffersize >> 9;
600 req[j]->buffer = buf;
601 req[j]->sem = &sem;
602 req[j]->bh = NULL;
603 req[j]->next = NULL;
604 add_request(major+blk_dev,req[j]);
605 }
606 while (j > 0) {
607 j--;
608 down(&sem);
609 }
610 }
611 }
612
613 int blk_dev_init(void)
614 {
615 struct request * req;
616 struct blk_dev_struct *dev;
617
618 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
619 dev->request_fn = NULL;
620 dev->current_request = NULL;
621 }
622
623 req = all_requests + NR_REQUEST;
624 while (--req >= all_requests) {
625 req->rq_status = RQ_INACTIVE;
626 req->next = NULL;
627 }
628 memset(ro_bits,0,sizeof(ro_bits));
629 #ifdef CONFIG_BLK_DEV_RAM
630 rd_init();
631 #endif
632 #ifdef CONFIG_BLK_DEV_LOOP
633 loop_init();
634 #endif
635 #ifdef CONFIG_BLK_DEV_IDE
636 ide_init();
637 #endif
638 #ifdef CONFIG_BLK_DEV_HD
639 hd_init();
640 #endif
641 #ifdef CONFIG_BLK_DEV_XD
642 xd_init();
643 #endif
644 #ifdef CONFIG_BLK_DEV_FD
645 floppy_init();
646 #else
647 outb_p(0xc, 0x3f2);
648 #endif
649 #ifdef CONFIG_CDI_INIT
650 cdi_init();
651 #endif CONFIG_CDI_INIT
652 #ifdef CONFIG_CDU31A
653 cdu31a_init();
654 #endif CONFIG_CDU31A
655 #ifdef CONFIG_MCD
656 mcd_init();
657 #endif CONFIG_MCD
658 #ifdef CONFIG_MCDX
659 mcdx_init();
660 #endif CONFIG_MCDX
661 #ifdef CONFIG_SBPCD
662 sbpcd_init();
663 #endif CONFIG_SBPCD
664 #ifdef CONFIG_AZTCD
665 aztcd_init();
666 #endif CONFIG_AZTCD
667 #ifdef CONFIG_CDU535
668 sony535_init();
669 #endif CONFIG_CDU535
670 #ifdef CONFIG_GSCD
671 gscd_init();
672 #endif CONFIG_GSCD
673 #ifdef CONFIG_CM206
674 cm206_init();
675 #endif
676 #ifdef CONFIG_OPTCD
677 optcd_init();
678 #endif CONFIG_OPTCD
679 #ifdef CONFIG_SJCD
680 sjcd_init();
681 #endif CONFIG_SJCD
682 #ifdef CONFIG_BLK_DEV_MD
683 md_init();
684 #endif CONFIG_BLK_DEV_MD
685 return 0;
686 }