This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- __get_request_wait
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include "blk.h"
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->dev = -1;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->dev == -1 && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, int dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->dev < 0)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->dev = dev;
167 return req;
168 }
169
170
171
172
173 static struct request * __get_request_wait(int n, int dev)
174 {
175 register struct request *req;
176 struct wait_queue wait = { current, NULL };
177
178 add_wait_queue(&wait_for_request, &wait);
179 for (;;) {
180 unplug_device(MAJOR(dev)+blk_dev);
181 current->state = TASK_UNINTERRUPTIBLE;
182 cli();
183 req = get_request(n, dev);
184 sti();
185 if (req)
186 break;
187 schedule();
188 }
189 remove_wait_queue(&wait_for_request, &wait);
190 current->state = TASK_RUNNING;
191 return req;
192 }
193
194 static inline struct request * get_request_wait(int n, int dev)
195 {
196 register struct request *req;
197
198 cli();
199 req = get_request(n, dev);
200 sti();
201 if (req)
202 return req;
203 return __get_request_wait(n, dev);
204 }
205
206
207
208 static long ro_bits[MAX_BLKDEV][8];
209
210 int is_read_only(int dev)
211 {
212 int minor,major;
213
214 major = MAJOR(dev);
215 minor = MINOR(dev);
216 if (major < 0 || major >= MAX_BLKDEV) return 0;
217 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
218 }
219
220 void set_device_ro(int dev,int flag)
221 {
222 int minor,major;
223
224 major = MAJOR(dev);
225 minor = MINOR(dev);
226 if (major < 0 || major >= MAX_BLKDEV) return;
227 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
228 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
229 }
230
231
232
233
234
235
236 static void add_request(struct blk_dev_struct * dev, struct request * req)
237 {
238 struct request * tmp;
239 short disk_index;
240
241 switch (MAJOR(req->dev)) {
242 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
243 if (disk_index < 4)
244 kstat.dk_drive[disk_index]++;
245 break;
246 case HD_MAJOR:
247 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0040) >> 6;
248 kstat.dk_drive[disk_index]++;
249 break;
250 case IDE1_MAJOR: disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
251 kstat.dk_drive[disk_index]++;
252 default: break;
253 }
254
255 req->next = NULL;
256 cli();
257 if (req->bh)
258 mark_buffer_clean(req->bh);
259 if (!(tmp = dev->current_request)) {
260 dev->current_request = req;
261 (dev->request_fn)();
262 sti();
263 return;
264 }
265 for ( ; tmp->next ; tmp = tmp->next) {
266 if ((IN_ORDER(tmp,req) ||
267 !IN_ORDER(tmp,tmp->next)) &&
268 IN_ORDER(req,tmp->next))
269 break;
270 }
271 req->next = tmp->next;
272 tmp->next = req;
273
274
275 if (scsi_major(MAJOR(req->dev)))
276 (dev->request_fn)();
277
278 sti();
279 }
280
281 static void make_request(int major,int rw, struct buffer_head * bh)
282 {
283 unsigned int sector, count;
284 struct request * req;
285 int rw_ahead, max_req;
286
287
288
289 rw_ahead = (rw == READA || rw == WRITEA);
290 if (rw_ahead) {
291 if (bh->b_lock)
292 return;
293 if (rw == READA)
294 rw = READ;
295 else
296 rw = WRITE;
297 }
298 if (rw!=READ && rw!=WRITE) {
299 printk("Bad block dev command, must be R/W/RA/WA\n");
300 return;
301 }
302 count = bh->b_size >> 9;
303 sector = bh->b_blocknr * count;
304 if (blk_size[major])
305 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
306 bh->b_dirt = bh->b_uptodate = 0;
307 bh->b_req = 0;
308 return;
309 }
310
311 if (bh->b_lock)
312 return;
313
314 lock_buffer(bh);
315 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
316 unlock_buffer(bh);
317 return;
318 }
319
320
321
322
323
324 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
325
326
327 cli();
328
329
330
331
332
333 if (( major == IDE0_MAJOR
334 || major == IDE1_MAJOR
335 || major == FLOPPY_MAJOR
336 || major == SCSI_DISK_MAJOR
337 || major == SCSI_CDROM_MAJOR)
338 && (req = blk_dev[major].current_request))
339 {
340 #ifdef CONFIG_BLK_DEV_HD
341 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
342 #else
343 if (major == FLOPPY_MAJOR)
344 #endif CONFIG_BLK_DEV_HD
345 req = req->next;
346 while (req) {
347 if (req->dev == bh->b_dev &&
348 !req->sem &&
349 req->cmd == rw &&
350 req->sector + req->nr_sectors == sector &&
351 req->nr_sectors < 244)
352 {
353 req->bhtail->b_reqnext = bh;
354 req->bhtail = bh;
355 req->nr_sectors += count;
356 mark_buffer_clean(bh);
357 sti();
358 return;
359 }
360
361 if (req->dev == bh->b_dev &&
362 !req->sem &&
363 req->cmd == rw &&
364 req->sector - count == sector &&
365 req->nr_sectors < 244)
366 {
367 req->nr_sectors += count;
368 bh->b_reqnext = req->bh;
369 req->buffer = bh->b_data;
370 req->current_nr_sectors = count;
371 req->sector = sector;
372 mark_buffer_clean(bh);
373 req->bh = bh;
374 sti();
375 return;
376 }
377
378 req = req->next;
379 }
380 }
381
382
383 req = get_request(max_req, bh->b_dev);
384 sti();
385
386
387 if (!req) {
388 if (rw_ahead) {
389 unlock_buffer(bh);
390 return;
391 }
392 req = __get_request_wait(max_req, bh->b_dev);
393 }
394
395
396 req->cmd = rw;
397 req->errors = 0;
398 req->sector = sector;
399 req->nr_sectors = count;
400 req->current_nr_sectors = count;
401 req->buffer = bh->b_data;
402 req->sem = NULL;
403 req->bh = bh;
404 req->bhtail = bh;
405 req->next = NULL;
406 add_request(major+blk_dev,req);
407 }
408
409 void ll_rw_page(int rw, int dev, unsigned long page, char * buffer)
410 {
411 struct request * req;
412 unsigned int major = MAJOR(dev);
413 unsigned long sector = page * (PAGE_SIZE / 512);
414 struct semaphore sem = MUTEX_LOCKED;
415
416 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
417 printk("Trying to read nonexistent block-device %04x (%ld)\n",dev,sector);
418 return;
419 }
420 if (rw!=READ && rw!=WRITE)
421 panic("Bad block dev command, must be R/W");
422 if (rw == WRITE && is_read_only(dev)) {
423 printk("Can't page to read-only device 0x%X\n",dev);
424 return;
425 }
426 req = get_request_wait(NR_REQUEST, dev);
427
428 req->cmd = rw;
429 req->errors = 0;
430 req->sector = sector;
431 req->nr_sectors = PAGE_SIZE / 512;
432 req->current_nr_sectors = PAGE_SIZE / 512;
433 req->buffer = buffer;
434 req->sem = &sem;
435 req->bh = NULL;
436 req->next = NULL;
437 add_request(major+blk_dev,req);
438 down(&sem);
439 }
440
441
442
443
444
445 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
446 {
447 unsigned int major;
448 struct request plug;
449 int correct_size;
450 struct blk_dev_struct * dev;
451 int i;
452
453
454 while (!*bh) {
455 bh++;
456 if (--nr <= 0)
457 return;
458 };
459
460 dev = NULL;
461 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
462 dev = blk_dev + major;
463 if (!dev || !dev->request_fn) {
464 printk(
465 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
466 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
467 goto sorry;
468 }
469
470
471 correct_size = BLOCK_SIZE;
472 if (blksize_size[major]) {
473 i = blksize_size[major][MINOR(bh[0]->b_dev)];
474 if (i)
475 correct_size = i;
476 }
477
478
479 for (i = 0; i < nr; i++) {
480 if (bh[i] && bh[i]->b_size != correct_size) {
481 printk(
482 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
483 correct_size, bh[i]->b_size);
484 goto sorry;
485 }
486 }
487
488 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
489 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
490 goto sorry;
491 }
492
493
494
495
496
497
498 if (nr > 1)
499 plug_device(dev, &plug);
500 for (i = 0; i < nr; i++) {
501 if (bh[i]) {
502 bh[i]->b_req = 1;
503 make_request(major, rw, bh[i]);
504 if (rw == READ || rw == READA)
505 kstat.pgpgin++;
506 else
507 kstat.pgpgout++;
508 }
509 }
510 unplug_device(dev);
511 return;
512
513 sorry:
514 for (i = 0; i < nr; i++) {
515 if (bh[i])
516 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
517 }
518 return;
519 }
520
521 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
522 {
523 int i, j;
524 int buffersize;
525 struct request * req[8];
526 unsigned int major = MAJOR(dev);
527 struct semaphore sem = MUTEX_LOCKED;
528
529 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
530 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
531 return;
532 }
533
534 if (rw!=READ && rw!=WRITE) {
535 printk("ll_rw_swap: bad block dev command, must be R/W");
536 return;
537 }
538 if (rw == WRITE && is_read_only(dev)) {
539 printk("Can't swap to read-only device 0x%X\n",dev);
540 return;
541 }
542
543 buffersize = PAGE_SIZE / nb;
544
545 for (j=0, i=0; i<nb;)
546 {
547 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
548 {
549 if (j == 0) {
550 req[j] = get_request_wait(NR_REQUEST, dev);
551 } else {
552 cli();
553 req[j] = get_request(NR_REQUEST, dev);
554 sti();
555 if (req[j] == NULL)
556 break;
557 }
558 req[j]->cmd = rw;
559 req[j]->errors = 0;
560 req[j]->sector = (b[i] * buffersize) >> 9;
561 req[j]->nr_sectors = buffersize >> 9;
562 req[j]->current_nr_sectors = buffersize >> 9;
563 req[j]->buffer = buf;
564 req[j]->sem = &sem;
565 req[j]->bh = NULL;
566 req[j]->next = NULL;
567 add_request(major+blk_dev,req[j]);
568 }
569 while (j > 0) {
570 j--;
571 down(&sem);
572 }
573 }
574 }
575
576 long blk_dev_init(long mem_start, long mem_end)
577 {
578 struct request * req;
579
580 req = all_requests + NR_REQUEST;
581 while (--req >= all_requests) {
582 req->dev = -1;
583 req->next = NULL;
584 }
585 memset(ro_bits,0,sizeof(ro_bits));
586 #ifdef CONFIG_BLK_DEV_HD
587 mem_start = hd_init(mem_start,mem_end);
588 #endif
589 #ifdef CONFIG_BLK_DEV_IDE
590 mem_start = ide_init(mem_start,mem_end);
591 #endif
592 #ifdef CONFIG_BLK_DEV_XD
593 mem_start = xd_init(mem_start,mem_end);
594 #endif
595 #ifdef CONFIG_BLK_DEV_FD
596 floppy_init();
597 #else
598 outb_p(0xc, 0x3f2);
599 #endif
600 #ifdef CONFIG_CDU31A
601 mem_start = cdu31a_init(mem_start,mem_end);
602 #endif CONFIG_CDU31A
603 #ifdef CONFIG_MCD
604 mem_start = mcd_init(mem_start,mem_end);
605 #endif CONFIG_MCD
606 #ifdef CONFIG_MCDX
607 mem_start = mcdx_init(mem_start,mem_end);
608 #endif CONFIG_MCDX
609 #ifdef CONFIG_SBPCD
610 mem_start = sbpcd_init(mem_start, mem_end);
611 #endif CONFIG_SBPCD
612 #ifdef CONFIG_AZTCD
613 mem_start = aztcd_init(mem_start,mem_end);
614 #endif CONFIG_AZTCD
615 #ifdef CONFIG_CDU535
616 mem_start = sony535_init(mem_start,mem_end);
617 #endif CONFIG_CDU535
618 #ifdef CONFIG_GSCD
619 mem_start = gscd_init(mem_start, mem_end);
620 #endif CONFIG_GSCD
621 #ifdef CONFIG_CM206
622 mem_start = cm206_init(mem_start, mem_end);
623 #endif
624 #ifdef CONFIG_OPTCD
625 mem_start = optcd_init(mem_start,mem_end);
626 #endif CONFIG_OPTCD
627 #ifdef CONFIG_SJCD
628 mem_start = sjcd_init(mem_start,mem_end);
629 #endif CONFIG_SJCD
630 if (ramdisk_size)
631 mem_start += rd_init(mem_start, ramdisk_size*1024);
632 return mem_start;
633 }