This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include <linux/blk.h>
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->rq_status = RQ_INACTIVE;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, kdev_t dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->rq_status == RQ_INACTIVE)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->rq_status = RQ_ACTIVE;
167 req->rq_dev = dev;
168 return req;
169 }
170
171
172
173
174 static struct request * __get_request_wait(int n, kdev_t dev)
175 {
176 register struct request *req;
177 struct wait_queue wait = { current, NULL };
178
179 add_wait_queue(&wait_for_request, &wait);
180 for (;;) {
181 unplug_device(MAJOR(dev)+blk_dev);
182 current->state = TASK_UNINTERRUPTIBLE;
183 cli();
184 req = get_request(n, dev);
185 sti();
186 if (req)
187 break;
188 schedule();
189 }
190 remove_wait_queue(&wait_for_request, &wait);
191 current->state = TASK_RUNNING;
192 return req;
193 }
194
195 static inline struct request * get_request_wait(int n, kdev_t dev)
196 {
197 register struct request *req;
198
199 cli();
200 req = get_request(n, dev);
201 sti();
202 if (req)
203 return req;
204 return __get_request_wait(n, dev);
205 }
206
207
208
209 static long ro_bits[MAX_BLKDEV][8];
210
211 int is_read_only(kdev_t dev)
212 {
213 int minor,major;
214
215 major = MAJOR(dev);
216 minor = MINOR(dev);
217 if (major < 0 || major >= MAX_BLKDEV) return 0;
218 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
219 }
220
221 void set_device_ro(kdev_t dev,int flag)
222 {
223 int minor,major;
224
225 major = MAJOR(dev);
226 minor = MINOR(dev);
227 if (major < 0 || major >= MAX_BLKDEV) return;
228 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
229 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
230 }
231
232
233
234
235
236
237 static void add_request(struct blk_dev_struct * dev, struct request * req)
238 {
239 struct request * tmp;
240 short disk_index;
241
242 switch (MAJOR(req->rq_dev)) {
243 case SCSI_DISK_MAJOR:
244 disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
245 if (disk_index < 4)
246 kstat.dk_drive[disk_index]++;
247 break;
248 case IDE0_MAJOR:
249 case XT_DISK_MAJOR:
250 disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
251 kstat.dk_drive[disk_index]++;
252 break;
253 case IDE1_MAJOR:
254 disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
255 kstat.dk_drive[disk_index]++;
256 default:
257 break;
258 }
259
260 req->next = NULL;
261 cli();
262 if (req->bh)
263 mark_buffer_clean(req->bh);
264 if (!(tmp = dev->current_request)) {
265 dev->current_request = req;
266 (dev->request_fn)();
267 sti();
268 return;
269 }
270 for ( ; tmp->next ; tmp = tmp->next) {
271 if ((IN_ORDER(tmp,req) ||
272 !IN_ORDER(tmp,tmp->next)) &&
273 IN_ORDER(req,tmp->next))
274 break;
275 }
276 req->next = tmp->next;
277 tmp->next = req;
278
279
280 if (scsi_major(MAJOR(req->rq_dev)))
281 (dev->request_fn)();
282
283 sti();
284 }
285
286 static void make_request(int major,int rw, struct buffer_head * bh)
287 {
288 unsigned int sector, count;
289 struct request * req;
290 int rw_ahead, max_req;
291
292
293
294 rw_ahead = (rw == READA || rw == WRITEA);
295 if (rw_ahead) {
296 if (bh->b_lock)
297 return;
298 if (rw == READA)
299 rw = READ;
300 else
301 rw = WRITE;
302 }
303 if (rw!=READ && rw!=WRITE) {
304 printk("Bad block dev command, must be R/W/RA/WA\n");
305 return;
306 }
307 count = bh->b_size >> 9;
308 sector = bh->b_blocknr * count;
309 if (blk_size[major])
310 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
311 bh->b_dirt = bh->b_uptodate = 0;
312 bh->b_req = 0;
313 printk("attempt to access beyond end of device\n");
314 return;
315 }
316
317 if (bh->b_lock)
318 return;
319
320 lock_buffer(bh);
321 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
322 unlock_buffer(bh);
323 return;
324 }
325
326
327
328
329
330 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
331
332
333 cli();
334
335
336
337
338
339 if (( major == IDE0_MAJOR
340 || major == IDE1_MAJOR
341 || major == FLOPPY_MAJOR
342 || major == SCSI_DISK_MAJOR
343 || major == SCSI_CDROM_MAJOR
344 || major == IDE2_MAJOR
345 || major == IDE3_MAJOR)
346 && (req = blk_dev[major].current_request))
347 {
348 #ifdef CONFIG_BLK_DEV_HD
349 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
350 #else
351 if (major == FLOPPY_MAJOR)
352 #endif CONFIG_BLK_DEV_HD
353 req = req->next;
354 while (req) {
355 if (req->rq_dev == bh->b_dev &&
356 !req->sem &&
357 req->cmd == rw &&
358 req->sector + req->nr_sectors == sector &&
359 req->nr_sectors < 244)
360 {
361 req->bhtail->b_reqnext = bh;
362 req->bhtail = bh;
363 req->nr_sectors += count;
364 mark_buffer_clean(bh);
365 sti();
366 return;
367 }
368
369 if (req->rq_dev == bh->b_dev &&
370 !req->sem &&
371 req->cmd == rw &&
372 req->sector - count == sector &&
373 req->nr_sectors < 244)
374 {
375 req->nr_sectors += count;
376 bh->b_reqnext = req->bh;
377 req->buffer = bh->b_data;
378 req->current_nr_sectors = count;
379 req->sector = sector;
380 mark_buffer_clean(bh);
381 req->bh = bh;
382 sti();
383 return;
384 }
385
386 req = req->next;
387 }
388 }
389
390
391 req = get_request(max_req, bh->b_dev);
392 sti();
393
394
395 if (!req) {
396 if (rw_ahead) {
397 unlock_buffer(bh);
398 return;
399 }
400 req = __get_request_wait(max_req, bh->b_dev);
401 }
402
403
404 req->cmd = rw;
405 req->errors = 0;
406 req->sector = sector;
407 req->nr_sectors = count;
408 req->current_nr_sectors = count;
409 req->buffer = bh->b_data;
410 req->sem = NULL;
411 req->bh = bh;
412 req->bhtail = bh;
413 req->next = NULL;
414 add_request(major+blk_dev,req);
415 }
416
417 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
418 {
419 struct request * req;
420 unsigned int major = MAJOR(dev);
421 unsigned long sector = page * (PAGE_SIZE / 512);
422 struct semaphore sem = MUTEX_LOCKED;
423
424 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
425 printk("Trying to read nonexistent block-device %s (%ld)\n",
426 kdevname(dev), sector);
427 return;
428 }
429 if (rw!=READ && rw!=WRITE)
430 panic("Bad block dev command, must be R/W");
431 if (rw == WRITE && is_read_only(dev)) {
432 printk("Can't page to read-only device %s\n",
433 kdevname(dev));
434 return;
435 }
436 req = get_request_wait(NR_REQUEST, dev);
437
438 req->cmd = rw;
439 req->errors = 0;
440 req->sector = sector;
441 req->nr_sectors = PAGE_SIZE / 512;
442 req->current_nr_sectors = PAGE_SIZE / 512;
443 req->buffer = buffer;
444 req->sem = &sem;
445 req->bh = NULL;
446 req->next = NULL;
447 add_request(major+blk_dev,req);
448 down(&sem);
449 }
450
451
452
453
454
455 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
456 {
457 unsigned int major;
458 struct request plug;
459 int correct_size;
460 struct blk_dev_struct * dev;
461 int i;
462
463
464 while (!*bh) {
465 bh++;
466 if (--nr <= 0)
467 return;
468 };
469
470 dev = NULL;
471 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
472 dev = blk_dev + major;
473 if (!dev || !dev->request_fn) {
474 printk(
475 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
476 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
477 goto sorry;
478 }
479
480
481 correct_size = BLOCK_SIZE;
482 if (blksize_size[major]) {
483 i = blksize_size[major][MINOR(bh[0]->b_dev)];
484 if (i)
485 correct_size = i;
486 }
487
488
489 for (i = 0; i < nr; i++) {
490 if (bh[i] && bh[i]->b_size != correct_size) {
491 printk(
492 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
493 correct_size, bh[i]->b_size);
494 goto sorry;
495 }
496 }
497
498 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
499 printk("Can't write to read-only device %s\n",
500 kdevname(bh[0]->b_dev));
501 goto sorry;
502 }
503
504
505
506
507
508
509 if (nr > 1)
510 plug_device(dev, &plug);
511 for (i = 0; i < nr; i++) {
512 if (bh[i]) {
513 bh[i]->b_req = 1;
514 make_request(major, rw, bh[i]);
515 if (rw == READ || rw == READA)
516 kstat.pgpgin++;
517 else
518 kstat.pgpgout++;
519 }
520 }
521 unplug_device(dev);
522 return;
523
524 sorry:
525 for (i = 0; i < nr; i++) {
526 if (bh[i])
527 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
528 }
529 return;
530 }
531
532 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
533 {
534 int i, j;
535 int buffersize;
536 struct request * req[8];
537 unsigned int major = MAJOR(dev);
538 struct semaphore sem = MUTEX_LOCKED;
539
540 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
541 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
542 return;
543 }
544
545 if (rw != READ && rw != WRITE) {
546 printk("ll_rw_swap: bad block dev command, must be R/W");
547 return;
548 }
549 if (rw == WRITE && is_read_only(dev)) {
550 printk("Can't swap to read-only device %s\n",
551 kdevname(dev));
552 return;
553 }
554
555 buffersize = PAGE_SIZE / nb;
556
557 for (j=0, i=0; i<nb;)
558 {
559 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
560 {
561 if (j == 0) {
562 req[j] = get_request_wait(NR_REQUEST, dev);
563 } else {
564 cli();
565 req[j] = get_request(NR_REQUEST, dev);
566 sti();
567 if (req[j] == NULL)
568 break;
569 }
570 req[j]->cmd = rw;
571 req[j]->errors = 0;
572 req[j]->sector = (b[i] * buffersize) >> 9;
573 req[j]->nr_sectors = buffersize >> 9;
574 req[j]->current_nr_sectors = buffersize >> 9;
575 req[j]->buffer = buf;
576 req[j]->sem = &sem;
577 req[j]->bh = NULL;
578 req[j]->next = NULL;
579 add_request(major+blk_dev,req[j]);
580 }
581 while (j > 0) {
582 j--;
583 down(&sem);
584 }
585 }
586 }
587
588 int blk_dev_init(void)
589 {
590 struct request * req;
591
592 req = all_requests + NR_REQUEST;
593 while (--req >= all_requests) {
594 req->rq_status = RQ_INACTIVE;
595 req->next = NULL;
596 }
597 memset(ro_bits,0,sizeof(ro_bits));
598 #ifdef CONFIG_BLK_DEV_IDE
599 ide_init();
600 #endif
601 #ifdef CONFIG_BLK_DEV_HD
602 hd_init();
603 #endif
604 #ifdef CONFIG_BLK_DEV_XD
605 xd_init();
606 #endif
607 #ifdef CONFIG_BLK_DEV_FD
608 floppy_init();
609 #else
610 outb_p(0xc, 0x3f2);
611 #endif
612 #ifdef CONFIG_CDU31A
613 cdu31a_init();
614 #endif CONFIG_CDU31A
615 #ifdef CONFIG_MCD
616 mcd_init();
617 #endif CONFIG_MCD
618 #ifdef CONFIG_MCDX
619 mcdx_init();
620 #endif CONFIG_MCDX
621 #ifdef CONFIG_SBPCD
622 sbpcd_init();
623 #endif CONFIG_SBPCD
624 #ifdef CONFIG_AZTCD
625 aztcd_init();
626 #endif CONFIG_AZTCD
627 #ifdef CONFIG_CDU535
628 sony535_init();
629 #endif CONFIG_CDU535
630 #ifdef CONFIG_GSCD
631 gscd_init();
632 #endif CONFIG_GSCD
633 #ifdef CONFIG_CM206
634 cm206_init();
635 #endif
636 #ifdef CONFIG_OPTCD
637 optcd_init();
638 #endif CONFIG_OPTCD
639 #ifdef CONFIG_SJCD
640 sjcd_init();
641 #endif CONFIG_SJCD
642 return 0;
643 }