This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include "blk.h"
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->dev = -1;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->dev == -1 && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, int dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->dev < 0)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->dev = dev;
167 return req;
168 }
169
170
171
172
173 static struct request * __get_request_wait(int n, int dev)
174 {
175 register struct request *req;
176 struct wait_queue wait = { current, NULL };
177
178 add_wait_queue(&wait_for_request, &wait);
179 for (;;) {
180 unplug_device(MAJOR(dev)+blk_dev);
181 current->state = TASK_UNINTERRUPTIBLE;
182 cli();
183 req = get_request(n, dev);
184 sti();
185 if (req)
186 break;
187 schedule();
188 }
189 remove_wait_queue(&wait_for_request, &wait);
190 current->state = TASK_RUNNING;
191 return req;
192 }
193
194 static inline struct request * get_request_wait(int n, int dev)
195 {
196 register struct request *req;
197
198 cli();
199 req = get_request(n, dev);
200 sti();
201 if (req)
202 return req;
203 return __get_request_wait(n, dev);
204 }
205
206
207
208 static long ro_bits[MAX_BLKDEV][8];
209
210 int is_read_only(int dev)
211 {
212 int minor,major;
213
214 major = MAJOR(dev);
215 minor = MINOR(dev);
216 if (major < 0 || major >= MAX_BLKDEV) return 0;
217 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
218 }
219
220 void set_device_ro(int dev,int flag)
221 {
222 int minor,major;
223
224 major = MAJOR(dev);
225 minor = MINOR(dev);
226 if (major < 0 || major >= MAX_BLKDEV) return;
227 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
228 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
229 }
230
231
232
233
234
235
236 static void add_request(struct blk_dev_struct * dev, struct request * req)
237 {
238 struct request * tmp;
239 short disk_index;
240
241 switch (MAJOR(req->dev)) {
242 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
243 if (disk_index < 4)
244 kstat.dk_drive[disk_index]++;
245 break;
246 case IDE0_MAJOR:
247 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0040) >> 6;
248 kstat.dk_drive[disk_index]++;
249 break;
250 case IDE1_MAJOR: disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
251 kstat.dk_drive[disk_index]++;
252 default: break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 (dev->request_fn)();
262 sti();
263 return;
264 }
265 for ( ; tmp->next ; tmp = tmp->next) {
266 if ((IN_ORDER(tmp,req) ||
267 !IN_ORDER(tmp,tmp->next)) &&
268 IN_ORDER(req,tmp->next))
269 break;
270 }
271 req->next = tmp->next;
272 tmp->next = req;
273
274
275 if (scsi_major(MAJOR(req->dev)))
276 (dev->request_fn)();
277
278 sti();
279 }
280
281 static void make_request(int major,int rw, struct buffer_head * bh)
282 {
283 unsigned int sector, count;
284 struct request * req;
285 int rw_ahead, max_req;
286
287
288
289 rw_ahead = (rw == READA || rw == WRITEA);
290 if (rw_ahead) {
291 if (bh->b_lock)
292 return;
293 if (rw == READA)
294 rw = READ;
295 else
296 rw = WRITE;
297 }
298 if (rw!=READ && rw!=WRITE) {
299 printk("Bad block dev command, must be R/W/RA/WA\n");
300 return;
301 }
302 count = bh->b_size >> 9;
303 sector = bh->b_blocknr * count;
304 if (blk_size[major])
305 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
306 bh->b_dirt = bh->b_uptodate = 0;
307 bh->b_req = 0;
308 printk("attempt to access beyond end of device\n");
309 return;
310 }
311
312 if (bh->b_lock)
313 return;
314
315 lock_buffer(bh);
316 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
317 unlock_buffer(bh);
318 return;
319 }
320
321
322
323
324
325 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
326
327
328 cli();
329
330
331
332
333
334 if (( major == IDE0_MAJOR
335 || major == IDE1_MAJOR
336 || major == FLOPPY_MAJOR
337 || major == SCSI_DISK_MAJOR
338 || major == SCSI_CDROM_MAJOR
339 || major == IDE2_MAJOR
340 || major == IDE3_MAJOR)
341 && (req = blk_dev[major].current_request))
342 {
343 #ifdef CONFIG_BLK_DEV_HD
344 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
345 #else
346 if (major == FLOPPY_MAJOR)
347 #endif CONFIG_BLK_DEV_HD
348 req = req->next;
349 while (req) {
350 if (req->dev == bh->b_dev &&
351 !req->sem &&
352 req->cmd == rw &&
353 req->sector + req->nr_sectors == sector &&
354 req->nr_sectors < 244)
355 {
356 req->bhtail->b_reqnext = bh;
357 req->bhtail = bh;
358 req->nr_sectors += count;
359 mark_buffer_clean(bh);
360 sti();
361 return;
362 }
363
364 if (req->dev == bh->b_dev &&
365 !req->sem &&
366 req->cmd == rw &&
367 req->sector - count == sector &&
368 req->nr_sectors < 244)
369 {
370 req->nr_sectors += count;
371 bh->b_reqnext = req->bh;
372 req->buffer = bh->b_data;
373 req->current_nr_sectors = count;
374 req->sector = sector;
375 mark_buffer_clean(bh);
376 req->bh = bh;
377 sti();
378 return;
379 }
380
381 req = req->next;
382 }
383 }
384
385
386 req = get_request(max_req, bh->b_dev);
387 sti();
388
389
390 if (!req) {
391 if (rw_ahead) {
392 unlock_buffer(bh);
393 return;
394 }
395 req = __get_request_wait(max_req, bh->b_dev);
396 }
397
398
399 req->cmd = rw;
400 req->errors = 0;
401 req->sector = sector;
402 req->nr_sectors = count;
403 req->current_nr_sectors = count;
404 req->buffer = bh->b_data;
405 req->sem = NULL;
406 req->bh = bh;
407 req->bhtail = bh;
408 req->next = NULL;
409 add_request(major+blk_dev,req);
410 }
411
412 void ll_rw_page(int rw, int dev, unsigned long page, char * buffer)
413 {
414 struct request * req;
415 unsigned int major = MAJOR(dev);
416 unsigned long sector = page * (PAGE_SIZE / 512);
417 struct semaphore sem = MUTEX_LOCKED;
418
419 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
420 printk("Trying to read nonexistent block-device %04x (%ld)\n",dev,sector);
421 return;
422 }
423 if (rw!=READ && rw!=WRITE)
424 panic("Bad block dev command, must be R/W");
425 if (rw == WRITE && is_read_only(dev)) {
426 printk("Can't page to read-only device 0x%X\n",dev);
427 return;
428 }
429 req = get_request_wait(NR_REQUEST, dev);
430
431 req->cmd = rw;
432 req->errors = 0;
433 req->sector = sector;
434 req->nr_sectors = PAGE_SIZE / 512;
435 req->current_nr_sectors = PAGE_SIZE / 512;
436 req->buffer = buffer;
437 req->sem = &sem;
438 req->bh = NULL;
439 req->next = NULL;
440 add_request(major+blk_dev,req);
441 down(&sem);
442 }
443
444
445
446
447
448 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
449 {
450 unsigned int major;
451 struct request plug;
452 int correct_size;
453 struct blk_dev_struct * dev;
454 int i;
455
456
457 while (!*bh) {
458 bh++;
459 if (--nr <= 0)
460 return;
461 };
462
463 dev = NULL;
464 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
465 dev = blk_dev + major;
466 if (!dev || !dev->request_fn) {
467 printk(
468 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
469 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
470 goto sorry;
471 }
472
473
474 correct_size = BLOCK_SIZE;
475 if (blksize_size[major]) {
476 i = blksize_size[major][MINOR(bh[0]->b_dev)];
477 if (i)
478 correct_size = i;
479 }
480
481
482 for (i = 0; i < nr; i++) {
483 if (bh[i] && bh[i]->b_size != correct_size) {
484 printk(
485 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
486 correct_size, bh[i]->b_size);
487 goto sorry;
488 }
489 }
490
491 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
492 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
493 goto sorry;
494 }
495
496
497
498
499
500
501 if (nr > 1)
502 plug_device(dev, &plug);
503 for (i = 0; i < nr; i++) {
504 if (bh[i]) {
505 bh[i]->b_req = 1;
506 make_request(major, rw, bh[i]);
507 if (rw == READ || rw == READA)
508 kstat.pgpgin++;
509 else
510 kstat.pgpgout++;
511 }
512 }
513 unplug_device(dev);
514 return;
515
516 sorry:
517 for (i = 0; i < nr; i++) {
518 if (bh[i])
519 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
520 }
521 return;
522 }
523
524 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
525 {
526 int i, j;
527 int buffersize;
528 struct request * req[8];
529 unsigned int major = MAJOR(dev);
530 struct semaphore sem = MUTEX_LOCKED;
531
532 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
533 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
534 return;
535 }
536
537 if (rw!=READ && rw!=WRITE) {
538 printk("ll_rw_swap: bad block dev command, must be R/W");
539 return;
540 }
541 if (rw == WRITE && is_read_only(dev)) {
542 printk("Can't swap to read-only device 0x%X\n",dev);
543 return;
544 }
545
546 buffersize = PAGE_SIZE / nb;
547
548 for (j=0, i=0; i<nb;)
549 {
550 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
551 {
552 if (j == 0) {
553 req[j] = get_request_wait(NR_REQUEST, dev);
554 } else {
555 cli();
556 req[j] = get_request(NR_REQUEST, dev);
557 sti();
558 if (req[j] == NULL)
559 break;
560 }
561 req[j]->cmd = rw;
562 req[j]->errors = 0;
563 req[j]->sector = (b[i] * buffersize) >> 9;
564 req[j]->nr_sectors = buffersize >> 9;
565 req[j]->current_nr_sectors = buffersize >> 9;
566 req[j]->buffer = buf;
567 req[j]->sem = &sem;
568 req[j]->bh = NULL;
569 req[j]->next = NULL;
570 add_request(major+blk_dev,req[j]);
571 }
572 while (j > 0) {
573 j--;
574 down(&sem);
575 }
576 }
577 }
578
579 long blk_dev_init(long mem_start, long mem_end)
580 {
581 struct request * req;
582
583 req = all_requests + NR_REQUEST;
584 while (--req >= all_requests) {
585 req->dev = -1;
586 req->next = NULL;
587 }
588 memset(ro_bits,0,sizeof(ro_bits));
589 #ifdef CONFIG_BLK_DEV_IDE
590 mem_start = ide_init(mem_start,mem_end);
591 #endif
592 #ifdef CONFIG_BLK_DEV_HD
593 mem_start = hd_init(mem_start,mem_end);
594 #endif
595 #ifdef CONFIG_BLK_DEV_XD
596 mem_start = xd_init(mem_start,mem_end);
597 #endif
598 #ifdef CONFIG_BLK_DEV_FD
599 floppy_init();
600 #else
601 outb_p(0xc, 0x3f2);
602 #endif
603 #ifdef CONFIG_CDU31A
604 mem_start = cdu31a_init(mem_start,mem_end);
605 #endif CONFIG_CDU31A
606 #ifdef CONFIG_MCD
607 mem_start = mcd_init(mem_start,mem_end);
608 #endif CONFIG_MCD
609 #ifdef CONFIG_MCDX
610 mem_start = mcdx_init(mem_start,mem_end);
611 #endif CONFIG_MCDX
612 #ifdef CONFIG_SBPCD
613 mem_start = sbpcd_init(mem_start, mem_end);
614 #endif CONFIG_SBPCD
615 #ifdef CONFIG_AZTCD
616 mem_start = aztcd_init(mem_start,mem_end);
617 #endif CONFIG_AZTCD
618 #ifdef CONFIG_CDU535
619 mem_start = sony535_init(mem_start,mem_end);
620 #endif CONFIG_CDU535
621 #ifdef CONFIG_GSCD
622 mem_start = gscd_init(mem_start, mem_end);
623 #endif CONFIG_GSCD
624 #ifdef CONFIG_CM206
625 mem_start = cm206_init(mem_start, mem_end);
626 #endif
627 #ifdef CONFIG_OPTCD
628 mem_start = optcd_init(mem_start,mem_end);
629 #endif CONFIG_OPTCD
630 #ifdef CONFIG_SJCD
631 mem_start = sjcd_init(mem_start,mem_end);
632 #endif CONFIG_SJCD
633 if (ramdisk_size)
634 mem_start += rd_init(mem_start, ramdisk_size*1024);
635 return mem_start;
636 }