This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->rq_status = RQ_INACTIVE;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static inline void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, kdev_t dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->rq_status == RQ_INACTIVE)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->rq_status = RQ_ACTIVE;
167 req->rq_dev = dev;
168 return req;
169 }
170
171
172
173
174 static struct request * __get_request_wait(int n, kdev_t dev)
175 {
176 register struct request *req;
177 struct wait_queue wait = { current, NULL };
178
179 add_wait_queue(&wait_for_request, &wait);
180 for (;;) {
181 unplug_device(MAJOR(dev)+blk_dev);
182 current->state = TASK_UNINTERRUPTIBLE;
183 cli();
184 req = get_request(n, dev);
185 sti();
186 if (req)
187 break;
188 schedule();
189 }
190 remove_wait_queue(&wait_for_request, &wait);
191 current->state = TASK_RUNNING;
192 return req;
193 }
194
195 static inline struct request * get_request_wait(int n, kdev_t dev)
196 {
197 register struct request *req;
198
199 cli();
200 req = get_request(n, dev);
201 sti();
202 if (req)
203 return req;
204 return __get_request_wait(n, dev);
205 }
206
207
208
209 static long ro_bits[MAX_BLKDEV][8];
210
211 int is_read_only(kdev_t dev)
212 {
213 int minor,major;
214
215 major = MAJOR(dev);
216 minor = MINOR(dev);
217 if (major < 0 || major >= MAX_BLKDEV) return 0;
218 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
219 }
220
221 void set_device_ro(kdev_t dev,int flag)
222 {
223 int minor,major;
224
225 major = MAJOR(dev);
226 minor = MINOR(dev);
227 if (major < 0 || major >= MAX_BLKDEV) return;
228 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
229 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
230 }
231
232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
233 {
234 kstat.dk_drive[disk_index]++;
235 if (cmd == READ || cmd == READA) {
236 kstat.dk_drive_rio[disk_index]++;
237 kstat.dk_drive_rblk[disk_index] += nr_sectors;
238 }
239 else if (cmd == WRITE || cmd == WRITEA) {
240 kstat.dk_drive_wio[disk_index]++;
241 kstat.dk_drive_wblk[disk_index] += nr_sectors;
242 }
243 }
244
245
246
247
248
249
250 static void add_request(struct blk_dev_struct * dev, struct request * req)
251 {
252 struct request * tmp;
253 short disk_index;
254
255 switch (MAJOR(req->rq_dev)) {
256 case SCSI_DISK_MAJOR:
257 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
258 if (disk_index < 4)
259 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
260 break;
261 case IDE0_MAJOR:
262 case XT_DISK_MAJOR:
263 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
264 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
265 break;
266 case IDE1_MAJOR:
267 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
268 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
269 default:
270 break;
271 }
272
273 req->next = NULL;
274 cli();
275 if (req->bh)
276 mark_buffer_clean(req->bh);
277 if (!(tmp = dev->current_request)) {
278 dev->current_request = req;
279 (dev->request_fn)();
280 sti();
281 return;
282 }
283 for ( ; tmp->next ; tmp = tmp->next) {
284 if ((IN_ORDER(tmp,req) ||
285 !IN_ORDER(tmp,tmp->next)) &&
286 IN_ORDER(req,tmp->next))
287 break;
288 }
289 req->next = tmp->next;
290 tmp->next = req;
291
292
293 if (scsi_major(MAJOR(req->rq_dev)))
294 (dev->request_fn)();
295
296 sti();
297 }
298
299 static void make_request(int major,int rw, struct buffer_head * bh)
300 {
301 unsigned int sector, count;
302 struct request * req;
303 int rw_ahead, max_req;
304
305
306
307 rw_ahead = (rw == READA || rw == WRITEA);
308 if (rw_ahead) {
309 if (buffer_locked(bh))
310 return;
311 if (rw == READA)
312 rw = READ;
313 else
314 rw = WRITE;
315 }
316 if (rw!=READ && rw!=WRITE) {
317 printk("Bad block dev command, must be R/W/RA/WA\n");
318 return;
319 }
320 count = bh->b_size >> 9;
321 sector = bh->b_blocknr * count;
322 if (blk_size[major])
323 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
324 bh->b_state = 0;
325 printk("attempt to access beyond end of device\n");
326 return;
327 }
328
329 if (buffer_locked(bh))
330 return;
331
332 lock_buffer(bh);
333 if ((rw == WRITE && !buffer_dirty(bh)) || (rw == READ && buffer_uptodate(bh))) {
334 unlock_buffer(bh);
335 return;
336 }
337
338
339
340
341
342 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
343
344
345 cli();
346
347
348
349
350
351 if (( major == IDE0_MAJOR
352 || major == IDE1_MAJOR
353 || major == FLOPPY_MAJOR
354 || major == SCSI_DISK_MAJOR
355 || major == SCSI_CDROM_MAJOR
356 || major == IDE2_MAJOR
357 || major == IDE3_MAJOR)
358 && (req = blk_dev[major].current_request))
359 {
360 #ifdef CONFIG_BLK_DEV_HD
361 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
362 #else
363 if (major == FLOPPY_MAJOR)
364 #endif CONFIG_BLK_DEV_HD
365 req = req->next;
366 while (req) {
367 if (req->rq_dev == bh->b_dev &&
368 !req->sem &&
369 req->cmd == rw &&
370 req->sector + req->nr_sectors == sector &&
371 req->nr_sectors < 244)
372 {
373 req->bhtail->b_reqnext = bh;
374 req->bhtail = bh;
375 req->nr_sectors += count;
376 mark_buffer_clean(bh);
377 sti();
378 return;
379 }
380
381 if (req->rq_dev == bh->b_dev &&
382 !req->sem &&
383 req->cmd == rw &&
384 req->sector - count == sector &&
385 req->nr_sectors < 244)
386 {
387 req->nr_sectors += count;
388 bh->b_reqnext = req->bh;
389 req->buffer = bh->b_data;
390 req->current_nr_sectors = count;
391 req->sector = sector;
392 mark_buffer_clean(bh);
393 req->bh = bh;
394 sti();
395 return;
396 }
397
398 req = req->next;
399 }
400 }
401
402
403 req = get_request(max_req, bh->b_dev);
404 sti();
405
406
407 if (!req) {
408 if (rw_ahead) {
409 unlock_buffer(bh);
410 return;
411 }
412 req = __get_request_wait(max_req, bh->b_dev);
413 }
414
415
416 req->cmd = rw;
417 req->errors = 0;
418 req->sector = sector;
419 req->nr_sectors = count;
420 req->current_nr_sectors = count;
421 req->buffer = bh->b_data;
422 req->sem = NULL;
423 req->bh = bh;
424 req->bhtail = bh;
425 req->next = NULL;
426 add_request(major+blk_dev,req);
427 }
428
429 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
430 {
431 struct request * req;
432 unsigned int major = MAJOR(dev);
433 unsigned long sector = page * (PAGE_SIZE / 512);
434 struct semaphore sem = MUTEX_LOCKED;
435
436 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
437 printk("Trying to read nonexistent block-device %s (%ld)\n",
438 kdevname(dev), sector);
439 return;
440 }
441 if (rw!=READ && rw!=WRITE)
442 panic("Bad block dev command, must be R/W");
443 if (rw == WRITE && is_read_only(dev)) {
444 printk("Can't page to read-only device %s\n",
445 kdevname(dev));
446 return;
447 }
448 req = get_request_wait(NR_REQUEST, dev);
449
450 req->cmd = rw;
451 req->errors = 0;
452 req->sector = sector;
453 req->nr_sectors = PAGE_SIZE / 512;
454 req->current_nr_sectors = PAGE_SIZE / 512;
455 req->buffer = buffer;
456 req->sem = &sem;
457 req->bh = NULL;
458 req->next = NULL;
459 add_request(major+blk_dev,req);
460 down(&sem);
461 }
462
463
464
465
466
467 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
468 {
469 unsigned int major;
470 struct request plug;
471 int correct_size;
472 struct blk_dev_struct * dev;
473 int i;
474
475
476 while (!*bh) {
477 bh++;
478 if (--nr <= 0)
479 return;
480 };
481
482 dev = NULL;
483 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
484 dev = blk_dev + major;
485 if (!dev || !dev->request_fn) {
486 printk(
487 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
488 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
489 goto sorry;
490 }
491
492
493 correct_size = BLOCK_SIZE;
494 if (blksize_size[major]) {
495 i = blksize_size[major][MINOR(bh[0]->b_dev)];
496 if (i)
497 correct_size = i;
498 }
499
500
501 for (i = 0; i < nr; i++) {
502 if (bh[i] && bh[i]->b_size != correct_size) {
503 printk(
504 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
505 correct_size, bh[i]->b_size);
506 goto sorry;
507 }
508 }
509
510 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
511 printk("Can't write to read-only device %s\n",
512 kdevname(bh[0]->b_dev));
513 goto sorry;
514 }
515
516
517
518
519
520
521 if (nr > 1)
522 plug_device(dev, &plug);
523 for (i = 0; i < nr; i++) {
524 if (bh[i]) {
525 set_bit(BH_Req, &bh[i]->b_state);
526 make_request(major, rw, bh[i]);
527 if (rw == READ || rw == READA)
528 kstat.pgpgin++;
529 else
530 kstat.pgpgout++;
531 }
532 }
533 unplug_device(dev);
534 return;
535
536 sorry:
537 for (i = 0; i < nr; i++) {
538 if (bh[i]) {
539 clear_bit(BH_Dirty, &bh[i]->b_state);
540 clear_bit(BH_Uptodate, &bh[i]->b_state);
541 }
542 }
543 return;
544 }
545
546 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
547 {
548 int i, j;
549 int buffersize;
550 struct request * req[8];
551 unsigned int major = MAJOR(dev);
552 struct semaphore sem = MUTEX_LOCKED;
553
554 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
555 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
556 return;
557 }
558
559 if (rw != READ && rw != WRITE) {
560 printk("ll_rw_swap: bad block dev command, must be R/W");
561 return;
562 }
563 if (rw == WRITE && is_read_only(dev)) {
564 printk("Can't swap to read-only device %s\n",
565 kdevname(dev));
566 return;
567 }
568
569 buffersize = PAGE_SIZE / nb;
570
571 for (j=0, i=0; i<nb;)
572 {
573 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
574 {
575 if (j == 0) {
576 req[j] = get_request_wait(NR_REQUEST, dev);
577 } else {
578 cli();
579 req[j] = get_request(NR_REQUEST, dev);
580 sti();
581 if (req[j] == NULL)
582 break;
583 }
584 req[j]->cmd = rw;
585 req[j]->errors = 0;
586 req[j]->sector = (b[i] * buffersize) >> 9;
587 req[j]->nr_sectors = buffersize >> 9;
588 req[j]->current_nr_sectors = buffersize >> 9;
589 req[j]->buffer = buf;
590 req[j]->sem = &sem;
591 req[j]->bh = NULL;
592 req[j]->next = NULL;
593 add_request(major+blk_dev,req[j]);
594 }
595 while (j > 0) {
596 j--;
597 down(&sem);
598 }
599 }
600 }
601
602 int blk_dev_init(void)
603 {
604 struct request * req;
605
606 req = all_requests + NR_REQUEST;
607 while (--req >= all_requests) {
608 req->rq_status = RQ_INACTIVE;
609 req->next = NULL;
610 }
611 memset(ro_bits,0,sizeof(ro_bits));
612 #ifdef CONFIG_BLK_DEV_IDE
613 ide_init();
614 #endif
615 #ifdef CONFIG_BLK_DEV_HD
616 hd_init();
617 #endif
618 #ifdef CONFIG_BLK_DEV_XD
619 xd_init();
620 #endif
621 #ifdef CONFIG_BLK_DEV_FD
622 floppy_init();
623 #else
624 outb_p(0xc, 0x3f2);
625 #endif
626 #ifdef CONFIG_CDU31A
627 cdu31a_init();
628 #endif CONFIG_CDU31A
629 #ifdef CONFIG_MCD
630 mcd_init();
631 #endif CONFIG_MCD
632 #ifdef CONFIG_MCDX
633 mcdx_init();
634 #endif CONFIG_MCDX
635 #ifdef CONFIG_SBPCD
636 sbpcd_init();
637 #endif CONFIG_SBPCD
638 #ifdef CONFIG_AZTCD
639 aztcd_init();
640 #endif CONFIG_AZTCD
641 #ifdef CONFIG_CDU535
642 sony535_init();
643 #endif CONFIG_CDU535
644 #ifdef CONFIG_GSCD
645 gscd_init();
646 #endif CONFIG_GSCD
647 #ifdef CONFIG_CM206
648 cm206_init();
649 #endif
650 #ifdef CONFIG_OPTCD
651 optcd_init();
652 #endif CONFIG_OPTCD
653 #ifdef CONFIG_SJCD
654 sjcd_init();
655 #endif CONFIG_SJCD
656 return 0;
657 }