This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28
29
30
31
32 #define NR_REQUEST 64
33 static struct request all_requests[NR_REQUEST];
34
35
36
37
38 struct wait_queue * wait_for_request = NULL;
39
40
41
42 int read_ahead[MAX_BLKDEV] = {0, };
43
44
45
46
47
48 struct blk_dev_struct blk_dev[MAX_BLKDEV];
49
50
51
52
53
54
55
56
57
58 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
59
60
61
62
63
64
65
66
67 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
68
69
70
71
72
73
74
75
76
77
78
79
80
81 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
82
83
84
85
86
87
88 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
89 {
90 unsigned long flags;
91
92 plug->rq_status = RQ_INACTIVE;
93 plug->cmd = -1;
94 plug->next = NULL;
95 save_flags(flags);
96 cli();
97 if (!dev->current_request)
98 dev->current_request = plug;
99 restore_flags(flags);
100 }
101
102
103
104
105 static inline void unplug_device(struct blk_dev_struct * dev)
106 {
107 struct request * req;
108 unsigned long flags;
109
110 save_flags(flags);
111 cli();
112 req = dev->current_request;
113 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
114 dev->current_request = req->next;
115 (dev->request_fn)();
116 }
117 restore_flags(flags);
118 }
119
120
121
122
123
124
125 static inline struct request * get_request(int n, kdev_t dev)
126 {
127 static struct request *prev_found = NULL, *prev_limit = NULL;
128 register struct request *req, *limit;
129
130 if (n <= 0)
131 panic("get_request(%d): impossible!\n", n);
132
133 limit = all_requests + n;
134 if (limit != prev_limit) {
135 prev_limit = limit;
136 prev_found = all_requests;
137 }
138 req = prev_found;
139 for (;;) {
140 req = ((req > all_requests) ? req : limit) - 1;
141 if (req->rq_status == RQ_INACTIVE)
142 break;
143 if (req == prev_found)
144 return NULL;
145 }
146 prev_found = req;
147 req->rq_status = RQ_ACTIVE;
148 req->rq_dev = dev;
149 return req;
150 }
151
152
153
154
155 static struct request * __get_request_wait(int n, kdev_t dev)
156 {
157 register struct request *req;
158 struct wait_queue wait = { current, NULL };
159
160 add_wait_queue(&wait_for_request, &wait);
161 for (;;) {
162 unplug_device(MAJOR(dev)+blk_dev);
163 current->state = TASK_UNINTERRUPTIBLE;
164 cli();
165 req = get_request(n, dev);
166 sti();
167 if (req)
168 break;
169 schedule();
170 }
171 remove_wait_queue(&wait_for_request, &wait);
172 current->state = TASK_RUNNING;
173 return req;
174 }
175
176 static inline struct request * get_request_wait(int n, kdev_t dev)
177 {
178 register struct request *req;
179
180 cli();
181 req = get_request(n, dev);
182 sti();
183 if (req)
184 return req;
185 return __get_request_wait(n, dev);
186 }
187
188
189
190 static long ro_bits[MAX_BLKDEV][8];
191
192 int is_read_only(kdev_t dev)
193 {
194 int minor,major;
195
196 major = MAJOR(dev);
197 minor = MINOR(dev);
198 if (major < 0 || major >= MAX_BLKDEV) return 0;
199 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
200 }
201
202 void set_device_ro(kdev_t dev,int flag)
203 {
204 int minor,major;
205
206 major = MAJOR(dev);
207 minor = MINOR(dev);
208 if (major < 0 || major >= MAX_BLKDEV) return;
209 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
210 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
211 }
212
213 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
214 {
215 kstat.dk_drive[disk_index]++;
216 if (cmd == READ) {
217 kstat.dk_drive_rio[disk_index]++;
218 kstat.dk_drive_rblk[disk_index] += nr_sectors;
219 }
220 else if (cmd == WRITE) {
221 kstat.dk_drive_wio[disk_index]++;
222 kstat.dk_drive_wblk[disk_index] += nr_sectors;
223 } else
224 printk("drive_stat_acct: cmd not R/W?\n");
225 }
226
227
228
229
230
231
232
233
234
235 static void add_request(struct blk_dev_struct * dev, struct request * req)
236 {
237 struct request * tmp;
238 short disk_index;
239
240 switch (MAJOR(req->rq_dev)) {
241 case SCSI_DISK_MAJOR:
242 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
243 if (disk_index < 4)
244 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
245 break;
246 case IDE0_MAJOR:
247 case XT_DISK_MAJOR:
248 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
249 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
250 break;
251 case IDE1_MAJOR:
252 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
253 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
254 default:
255 break;
256 }
257
258 req->next = NULL;
259 cli();
260 if (req->bh)
261 mark_buffer_clean(req->bh);
262 if (!(tmp = dev->current_request)) {
263 dev->current_request = req;
264 (dev->request_fn)();
265 sti();
266 return;
267 }
268 for ( ; tmp->next ; tmp = tmp->next) {
269 if ((IN_ORDER(tmp,req) ||
270 !IN_ORDER(tmp,tmp->next)) &&
271 IN_ORDER(req,tmp->next))
272 break;
273 }
274 req->next = tmp->next;
275 tmp->next = req;
276
277
278 if (scsi_major(MAJOR(req->rq_dev)))
279 (dev->request_fn)();
280
281 sti();
282 }
283
284 static void make_request(int major,int rw, struct buffer_head * bh)
285 {
286 unsigned int sector, count;
287 struct request * req;
288 int rw_ahead, max_req;
289
290 count = bh->b_size >> 9;
291 sector = bh->b_blocknr * count;
292 if (blk_size[major])
293 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
294 bh->b_state = 0;
295 printk("attempt to access beyond end of device\n");
296 return;
297 }
298
299 if (buffer_locked(bh))
300 return;
301
302 lock_buffer(bh);
303
304 rw_ahead = 0;
305 switch (rw) {
306 case READA:
307 rw_ahead = 1;
308 rw = READ;
309 case READ:
310 if (buffer_uptodate(bh)) {
311 unlock_buffer(bh);
312 return;
313 }
314 kstat.pgpgin++;
315 max_req = NR_REQUEST;
316 break;
317 case WRITEA:
318 rw_ahead = 1;
319 rw = WRITE;
320 case WRITE:
321 if (!buffer_dirty(bh)) {
322 unlock_buffer(bh);
323 return;
324 }
325
326
327
328
329
330 kstat.pgpgout++;
331 max_req = (NR_REQUEST * 2) / 3;
332 break;
333 default:
334 printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
335 unlock_buffer(bh);
336 return;
337 }
338
339
340 cli();
341
342
343
344
345
346 if (( major == IDE0_MAJOR
347 || major == IDE1_MAJOR
348 || major == FLOPPY_MAJOR
349 || major == SCSI_DISK_MAJOR
350 || major == SCSI_CDROM_MAJOR
351 || major == IDE2_MAJOR
352 || major == IDE3_MAJOR)
353 && (req = blk_dev[major].current_request))
354 {
355 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
356 req = req->next;
357 while (req) {
358 if (req->rq_dev == bh->b_dev &&
359 !req->sem &&
360 req->cmd == rw &&
361 req->sector + req->nr_sectors == sector &&
362 req->nr_sectors < 244)
363 {
364 req->bhtail->b_reqnext = bh;
365 req->bhtail = bh;
366 req->nr_sectors += count;
367 mark_buffer_clean(bh);
368 sti();
369 return;
370 }
371
372 if (req->rq_dev == bh->b_dev &&
373 !req->sem &&
374 req->cmd == rw &&
375 req->sector - count == sector &&
376 req->nr_sectors < 244)
377 {
378 req->nr_sectors += count;
379 bh->b_reqnext = req->bh;
380 req->buffer = bh->b_data;
381 req->current_nr_sectors = count;
382 req->sector = sector;
383 mark_buffer_clean(bh);
384 req->bh = bh;
385 sti();
386 return;
387 }
388
389 req = req->next;
390 }
391 }
392
393
394 req = get_request(max_req, bh->b_dev);
395 sti();
396
397
398 if (!req) {
399 if (rw_ahead) {
400 unlock_buffer(bh);
401 return;
402 }
403 req = __get_request_wait(max_req, bh->b_dev);
404 }
405
406
407 req->cmd = rw;
408 req->errors = 0;
409 req->sector = sector;
410 req->nr_sectors = count;
411 req->current_nr_sectors = count;
412 req->buffer = bh->b_data;
413 req->sem = NULL;
414 req->bh = bh;
415 req->bhtail = bh;
416 req->next = NULL;
417 add_request(major+blk_dev,req);
418 }
419
420
421
422
423
424
425 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
426 {
427 unsigned int major = MAJOR(dev);
428 int block = page;
429
430 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
431 printk("Trying to read nonexistent block-device %s (%ld)\n",
432 kdevname(dev), page);
433 return;
434 }
435 switch (rw) {
436 case READ:
437 break;
438 case WRITE:
439 if (is_read_only(dev)) {
440 printk("Can't page to read-only device %s\n",
441 kdevname(dev));
442 return;
443 }
444 break;
445 default:
446 panic("ll_rw_page: bad block dev cmd, must be R/W");
447 }
448 if (mem_map[MAP_NR(buffer)].locked)
449 panic ("ll_rw_page: page already locked");
450 mem_map[MAP_NR(buffer)].locked = 1;
451 brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
452 }
453
454
455
456
457
458 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
459 {
460 unsigned int major;
461 struct request plug;
462 int correct_size;
463 struct blk_dev_struct * dev;
464 int i;
465
466
467 while (!*bh) {
468 bh++;
469 if (--nr <= 0)
470 return;
471 };
472
473 dev = NULL;
474 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
475 dev = blk_dev + major;
476 if (!dev || !dev->request_fn) {
477 printk(
478 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
479 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
480 goto sorry;
481 }
482
483
484 correct_size = BLOCK_SIZE;
485 if (blksize_size[major]) {
486 i = blksize_size[major][MINOR(bh[0]->b_dev)];
487 if (i)
488 correct_size = i;
489 }
490
491
492 for (i = 0; i < nr; i++) {
493 if (bh[i] && bh[i]->b_size != correct_size) {
494 printk("ll_rw_block: device %s: "
495 "only %d-char blocks implemented (%lu)\n",
496 kdevname(bh[0]->b_dev),
497 correct_size, bh[i]->b_size);
498 goto sorry;
499 }
500 }
501
502 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
503 printk("Can't write to read-only device %s\n",
504 kdevname(bh[0]->b_dev));
505 goto sorry;
506 }
507
508
509
510
511
512
513 if (nr > 1)
514 plug_device(dev, &plug);
515 for (i = 0; i < nr; i++) {
516 if (bh[i]) {
517 set_bit(BH_Req, &bh[i]->b_state);
518 make_request(major, rw, bh[i]);
519 }
520 }
521 unplug_device(dev);
522 return;
523
524 sorry:
525 for (i = 0; i < nr; i++) {
526 if (bh[i]) {
527 clear_bit(BH_Dirty, &bh[i]->b_state);
528 clear_bit(BH_Uptodate, &bh[i]->b_state);
529 }
530 }
531 return;
532 }
533
534 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
535 {
536 int i, j;
537 int buffersize;
538 struct request * req[8];
539 unsigned int major = MAJOR(dev);
540 struct semaphore sem = MUTEX_LOCKED;
541
542 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
543 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
544 return;
545 }
546 switch (rw) {
547 case READ:
548 break;
549 case WRITE:
550 if (is_read_only(dev)) {
551 printk("Can't swap to read-only device %s\n",
552 kdevname(dev));
553 return;
554 }
555 break;
556 default:
557 panic("ll_rw_swap: bad block dev cmd, must be R/W");
558 }
559 buffersize = PAGE_SIZE / nb;
560
561 for (j=0, i=0; i<nb;)
562 {
563 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
564 {
565 if (j == 0) {
566 req[j] = get_request_wait(NR_REQUEST, dev);
567 } else {
568 cli();
569 req[j] = get_request(NR_REQUEST, dev);
570 sti();
571 if (req[j] == NULL)
572 break;
573 }
574 req[j]->cmd = rw;
575 req[j]->errors = 0;
576 req[j]->sector = (b[i] * buffersize) >> 9;
577 req[j]->nr_sectors = buffersize >> 9;
578 req[j]->current_nr_sectors = buffersize >> 9;
579 req[j]->buffer = buf;
580 req[j]->sem = &sem;
581 req[j]->bh = NULL;
582 req[j]->next = NULL;
583 add_request(major+blk_dev,req[j]);
584 }
585 while (j > 0) {
586 j--;
587 down(&sem);
588 }
589 }
590 }
591
592 int blk_dev_init(void)
593 {
594 struct request * req;
595 struct blk_dev_struct *dev;
596
597 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
598 dev->request_fn = NULL;
599 dev->current_request = NULL;
600 }
601
602 req = all_requests + NR_REQUEST;
603 while (--req >= all_requests) {
604 req->rq_status = RQ_INACTIVE;
605 req->next = NULL;
606 }
607 memset(ro_bits,0,sizeof(ro_bits));
608 #ifdef CONFIG_BLK_DEV_RAM
609 rd_init();
610 #endif
611 #ifdef CONFIG_BLK_DEV_IDE
612 ide_init();
613 #endif
614 #ifdef CONFIG_BLK_DEV_HD
615 hd_init();
616 #endif
617 #ifdef CONFIG_BLK_DEV_XD
618 xd_init();
619 #endif
620 #ifdef CONFIG_BLK_DEV_FD
621 floppy_init();
622 #else
623 outb_p(0xc, 0x3f2);
624 #endif
625 #ifdef CONFIG_CDI_INIT
626 cdi_init();
627 #endif CONFIG_CDI_INIT
628 #ifdef CONFIG_CDU31A
629 cdu31a_init();
630 #endif CONFIG_CDU31A
631 #ifdef CONFIG_MCD
632 mcd_init();
633 #endif CONFIG_MCD
634 #ifdef CONFIG_MCDX
635 mcdx_init();
636 #endif CONFIG_MCDX
637 #ifdef CONFIG_SBPCD
638 sbpcd_init();
639 #endif CONFIG_SBPCD
640 #ifdef CONFIG_AZTCD
641 aztcd_init();
642 #endif CONFIG_AZTCD
643 #ifdef CONFIG_CDU535
644 sony535_init();
645 #endif CONFIG_CDU535
646 #ifdef CONFIG_GSCD
647 gscd_init();
648 #endif CONFIG_GSCD
649 #ifdef CONFIG_CM206
650 cm206_init();
651 #endif
652 #ifdef CONFIG_OPTCD
653 optcd_init();
654 #endif CONFIG_OPTCD
655 #ifdef CONFIG_SJCD
656 sjcd_init();
657 #endif CONFIG_SJCD
658 return 0;
659 }