This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- drive_stat_acct
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->rq_status = RQ_INACTIVE;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static inline void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, kdev_t dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->rq_status == RQ_INACTIVE)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->rq_status = RQ_ACTIVE;
167 req->rq_dev = dev;
168 return req;
169 }
170
171
172
173
174 static struct request * __get_request_wait(int n, kdev_t dev)
175 {
176 register struct request *req;
177 struct wait_queue wait = { current, NULL };
178
179 add_wait_queue(&wait_for_request, &wait);
180 for (;;) {
181 unplug_device(MAJOR(dev)+blk_dev);
182 current->state = TASK_UNINTERRUPTIBLE;
183 cli();
184 req = get_request(n, dev);
185 sti();
186 if (req)
187 break;
188 schedule();
189 }
190 remove_wait_queue(&wait_for_request, &wait);
191 current->state = TASK_RUNNING;
192 return req;
193 }
194
195 static inline struct request * get_request_wait(int n, kdev_t dev)
196 {
197 register struct request *req;
198
199 cli();
200 req = get_request(n, dev);
201 sti();
202 if (req)
203 return req;
204 return __get_request_wait(n, dev);
205 }
206
207
208
209 static long ro_bits[MAX_BLKDEV][8];
210
211 int is_read_only(kdev_t dev)
212 {
213 int minor,major;
214
215 major = MAJOR(dev);
216 minor = MINOR(dev);
217 if (major < 0 || major >= MAX_BLKDEV) return 0;
218 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
219 }
220
221 void set_device_ro(kdev_t dev,int flag)
222 {
223 int minor,major;
224
225 major = MAJOR(dev);
226 minor = MINOR(dev);
227 if (major < 0 || major >= MAX_BLKDEV) return;
228 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
229 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
230 }
231
232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
233 {
234 kstat.dk_drive[disk_index]++;
235 if (cmd == READ || cmd == READA) {
236 kstat.dk_drive_rio[disk_index]++;
237 kstat.dk_drive_rblk[disk_index] += nr_sectors;
238 }
239 else if (cmd == WRITE || cmd == WRITEA) {
240 kstat.dk_drive_wio[disk_index]++;
241 kstat.dk_drive_wblk[disk_index] += nr_sectors;
242 }
243 }
244
245
246
247
248
249
250 static void add_request(struct blk_dev_struct * dev, struct request * req)
251 {
252 struct request * tmp;
253 short disk_index;
254
255 switch (MAJOR(req->rq_dev)) {
256 case SCSI_DISK_MAJOR:
257 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
258 if (disk_index < 4)
259 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
260 break;
261 case IDE0_MAJOR:
262 case XT_DISK_MAJOR:
263 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
264 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
265 break;
266 case IDE1_MAJOR:
267 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
268 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
269 default:
270 break;
271 }
272
273 req->next = NULL;
274 cli();
275 if (req->bh)
276 mark_buffer_clean(req->bh);
277 if (!(tmp = dev->current_request)) {
278 dev->current_request = req;
279 (dev->request_fn)();
280 sti();
281 return;
282 }
283 for ( ; tmp->next ; tmp = tmp->next) {
284 if ((IN_ORDER(tmp,req) ||
285 !IN_ORDER(tmp,tmp->next)) &&
286 IN_ORDER(req,tmp->next))
287 break;
288 }
289 req->next = tmp->next;
290 tmp->next = req;
291
292
293 if (scsi_major(MAJOR(req->rq_dev)))
294 (dev->request_fn)();
295
296 sti();
297 }
298
299 static void make_request(int major,int rw, struct buffer_head * bh)
300 {
301 unsigned int sector, count;
302 struct request * req;
303 int rw_ahead, max_req;
304
305
306
307 rw_ahead = (rw == READA || rw == WRITEA);
308 if (rw_ahead) {
309 if (bh->b_lock)
310 return;
311 if (rw == READA)
312 rw = READ;
313 else
314 rw = WRITE;
315 }
316 if (rw!=READ && rw!=WRITE) {
317 printk("Bad block dev command, must be R/W/RA/WA\n");
318 return;
319 }
320 count = bh->b_size >> 9;
321 sector = bh->b_blocknr * count;
322 if (blk_size[major])
323 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
324 bh->b_dirt = bh->b_uptodate = 0;
325 bh->b_req = 0;
326 printk("attempt to access beyond end of device\n");
327 return;
328 }
329
330 if (bh->b_lock)
331 return;
332
333 lock_buffer(bh);
334 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
335 unlock_buffer(bh);
336 return;
337 }
338
339
340
341
342
343 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
344
345
346 cli();
347
348
349
350
351
352 if (( major == IDE0_MAJOR
353 || major == IDE1_MAJOR
354 || major == FLOPPY_MAJOR
355 || major == SCSI_DISK_MAJOR
356 || major == SCSI_CDROM_MAJOR
357 || major == IDE2_MAJOR
358 || major == IDE3_MAJOR)
359 && (req = blk_dev[major].current_request))
360 {
361 #ifdef CONFIG_BLK_DEV_HD
362 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
363 #else
364 if (major == FLOPPY_MAJOR)
365 #endif CONFIG_BLK_DEV_HD
366 req = req->next;
367 while (req) {
368 if (req->rq_dev == bh->b_dev &&
369 !req->sem &&
370 req->cmd == rw &&
371 req->sector + req->nr_sectors == sector &&
372 req->nr_sectors < 244)
373 {
374 req->bhtail->b_reqnext = bh;
375 req->bhtail = bh;
376 req->nr_sectors += count;
377 mark_buffer_clean(bh);
378 sti();
379 return;
380 }
381
382 if (req->rq_dev == bh->b_dev &&
383 !req->sem &&
384 req->cmd == rw &&
385 req->sector - count == sector &&
386 req->nr_sectors < 244)
387 {
388 req->nr_sectors += count;
389 bh->b_reqnext = req->bh;
390 req->buffer = bh->b_data;
391 req->current_nr_sectors = count;
392 req->sector = sector;
393 mark_buffer_clean(bh);
394 req->bh = bh;
395 sti();
396 return;
397 }
398
399 req = req->next;
400 }
401 }
402
403
404 req = get_request(max_req, bh->b_dev);
405 sti();
406
407
408 if (!req) {
409 if (rw_ahead) {
410 unlock_buffer(bh);
411 return;
412 }
413 req = __get_request_wait(max_req, bh->b_dev);
414 }
415
416
417 req->cmd = rw;
418 req->errors = 0;
419 req->sector = sector;
420 req->nr_sectors = count;
421 req->current_nr_sectors = count;
422 req->buffer = bh->b_data;
423 req->sem = NULL;
424 req->bh = bh;
425 req->bhtail = bh;
426 req->next = NULL;
427 add_request(major+blk_dev,req);
428 }
429
430 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
431 {
432 struct request * req;
433 unsigned int major = MAJOR(dev);
434 unsigned long sector = page * (PAGE_SIZE / 512);
435 struct semaphore sem = MUTEX_LOCKED;
436
437 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
438 printk("Trying to read nonexistent block-device %s (%ld)\n",
439 kdevname(dev), sector);
440 return;
441 }
442 if (rw!=READ && rw!=WRITE)
443 panic("Bad block dev command, must be R/W");
444 if (rw == WRITE && is_read_only(dev)) {
445 printk("Can't page to read-only device %s\n",
446 kdevname(dev));
447 return;
448 }
449 req = get_request_wait(NR_REQUEST, dev);
450
451 req->cmd = rw;
452 req->errors = 0;
453 req->sector = sector;
454 req->nr_sectors = PAGE_SIZE / 512;
455 req->current_nr_sectors = PAGE_SIZE / 512;
456 req->buffer = buffer;
457 req->sem = &sem;
458 req->bh = NULL;
459 req->next = NULL;
460 add_request(major+blk_dev,req);
461 down(&sem);
462 }
463
464
465
466
467
468 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
469 {
470 unsigned int major;
471 struct request plug;
472 int correct_size;
473 struct blk_dev_struct * dev;
474 int i;
475
476
477 while (!*bh) {
478 bh++;
479 if (--nr <= 0)
480 return;
481 };
482
483 dev = NULL;
484 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
485 dev = blk_dev + major;
486 if (!dev || !dev->request_fn) {
487 printk(
488 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
489 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
490 goto sorry;
491 }
492
493
494 correct_size = BLOCK_SIZE;
495 if (blksize_size[major]) {
496 i = blksize_size[major][MINOR(bh[0]->b_dev)];
497 if (i)
498 correct_size = i;
499 }
500
501
502 for (i = 0; i < nr; i++) {
503 if (bh[i] && bh[i]->b_size != correct_size) {
504 printk(
505 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
506 correct_size, bh[i]->b_size);
507 goto sorry;
508 }
509 }
510
511 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
512 printk("Can't write to read-only device %s\n",
513 kdevname(bh[0]->b_dev));
514 goto sorry;
515 }
516
517
518
519
520
521
522 if (nr > 1)
523 plug_device(dev, &plug);
524 for (i = 0; i < nr; i++) {
525 if (bh[i]) {
526 bh[i]->b_req = 1;
527 make_request(major, rw, bh[i]);
528 if (rw == READ || rw == READA)
529 kstat.pgpgin++;
530 else
531 kstat.pgpgout++;
532 }
533 }
534 unplug_device(dev);
535 return;
536
537 sorry:
538 for (i = 0; i < nr; i++) {
539 if (bh[i])
540 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
541 }
542 return;
543 }
544
545 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
546 {
547 int i, j;
548 int buffersize;
549 struct request * req[8];
550 unsigned int major = MAJOR(dev);
551 struct semaphore sem = MUTEX_LOCKED;
552
553 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
554 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
555 return;
556 }
557
558 if (rw != READ && rw != WRITE) {
559 printk("ll_rw_swap: bad block dev command, must be R/W");
560 return;
561 }
562 if (rw == WRITE && is_read_only(dev)) {
563 printk("Can't swap to read-only device %s\n",
564 kdevname(dev));
565 return;
566 }
567
568 buffersize = PAGE_SIZE / nb;
569
570 for (j=0, i=0; i<nb;)
571 {
572 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
573 {
574 if (j == 0) {
575 req[j] = get_request_wait(NR_REQUEST, dev);
576 } else {
577 cli();
578 req[j] = get_request(NR_REQUEST, dev);
579 sti();
580 if (req[j] == NULL)
581 break;
582 }
583 req[j]->cmd = rw;
584 req[j]->errors = 0;
585 req[j]->sector = (b[i] * buffersize) >> 9;
586 req[j]->nr_sectors = buffersize >> 9;
587 req[j]->current_nr_sectors = buffersize >> 9;
588 req[j]->buffer = buf;
589 req[j]->sem = &sem;
590 req[j]->bh = NULL;
591 req[j]->next = NULL;
592 add_request(major+blk_dev,req[j]);
593 }
594 while (j > 0) {
595 j--;
596 down(&sem);
597 }
598 }
599 }
600
601 int blk_dev_init(void)
602 {
603 struct request * req;
604
605 req = all_requests + NR_REQUEST;
606 while (--req >= all_requests) {
607 req->rq_status = RQ_INACTIVE;
608 req->next = NULL;
609 }
610 memset(ro_bits,0,sizeof(ro_bits));
611 #ifdef CONFIG_BLK_DEV_IDE
612 ide_init();
613 #endif
614 #ifdef CONFIG_BLK_DEV_HD
615 hd_init();
616 #endif
617 #ifdef CONFIG_BLK_DEV_XD
618 xd_init();
619 #endif
620 #ifdef CONFIG_BLK_DEV_FD
621 floppy_init();
622 #else
623 outb_p(0xc, 0x3f2);
624 #endif
625 #ifdef CONFIG_CDU31A
626 cdu31a_init();
627 #endif CONFIG_CDU31A
628 #ifdef CONFIG_MCD
629 mcd_init();
630 #endif CONFIG_MCD
631 #ifdef CONFIG_MCDX
632 mcdx_init();
633 #endif CONFIG_MCDX
634 #ifdef CONFIG_SBPCD
635 sbpcd_init();
636 #endif CONFIG_SBPCD
637 #ifdef CONFIG_AZTCD
638 aztcd_init();
639 #endif CONFIG_AZTCD
640 #ifdef CONFIG_CDU535
641 sony535_init();
642 #endif CONFIG_CDU535
643 #ifdef CONFIG_GSCD
644 gscd_init();
645 #endif CONFIG_GSCD
646 #ifdef CONFIG_CM206
647 cm206_init();
648 #endif
649 #ifdef CONFIG_OPTCD
650 optcd_init();
651 #endif CONFIG_OPTCD
652 #ifdef CONFIG_SJCD
653 sjcd_init();
654 #endif CONFIG_SJCD
655 return 0;
656 }