This source file includes following definitions.
- plug_device
- unplug_device
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include "blk.h"
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static void plug_device(struct blk_dev_struct * dev, struct request * plug)
108 {
109 unsigned long flags;
110
111 plug->dev = -1;
112 plug->cmd = -1;
113 plug->next = NULL;
114 save_flags(flags);
115 cli();
116 if (!dev->current_request)
117 dev->current_request = plug;
118 restore_flags(flags);
119 }
120
121
122
123
124 static void unplug_device(struct blk_dev_struct * dev)
125 {
126 struct request * req;
127 unsigned long flags;
128
129 save_flags(flags);
130 cli();
131 req = dev->current_request;
132 if (req && req->dev == -1 && req->cmd == -1) {
133 dev->current_request = req->next;
134 (dev->request_fn)();
135 }
136 restore_flags(flags);
137 }
138
139
140
141
142
143
144 static inline struct request * get_request(int n, int dev)
145 {
146 static struct request *prev_found = NULL, *prev_limit = NULL;
147 register struct request *req, *limit;
148
149 if (n <= 0)
150 panic("get_request(%d): impossible!\n", n);
151
152 limit = all_requests + n;
153 if (limit != prev_limit) {
154 prev_limit = limit;
155 prev_found = all_requests;
156 }
157 req = prev_found;
158 for (;;) {
159 req = ((req > all_requests) ? req : limit) - 1;
160 if (req->dev < 0)
161 break;
162 if (req == prev_found)
163 return NULL;
164 }
165 prev_found = req;
166 req->dev = dev;
167 return req;
168 }
169
170
171
172
173
174
175 static inline struct request * get_request_wait(int n, int dev)
176 {
177 register struct request *req;
178
179 while ((req = get_request(n, dev)) == NULL) {
180 unplug_device(MAJOR(dev)+blk_dev);
181 sleep_on(&wait_for_request);
182 }
183 return req;
184 }
185
186
187
188 static long ro_bits[MAX_BLKDEV][8];
189
190 int is_read_only(int dev)
191 {
192 int minor,major;
193
194 major = MAJOR(dev);
195 minor = MINOR(dev);
196 if (major < 0 || major >= MAX_BLKDEV) return 0;
197 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
198 }
199
200 void set_device_ro(int dev,int flag)
201 {
202 int minor,major;
203
204 major = MAJOR(dev);
205 minor = MINOR(dev);
206 if (major < 0 || major >= MAX_BLKDEV) return;
207 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
208 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
209 }
210
211
212
213
214
215
216 static void add_request(struct blk_dev_struct * dev, struct request * req)
217 {
218 struct request * tmp;
219 short disk_index;
220
221 switch (MAJOR(req->dev)) {
222 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
223 if (disk_index < 4)
224 kstat.dk_drive[disk_index]++;
225 break;
226 case HD_MAJOR:
227 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0040) >> 6;
228 kstat.dk_drive[disk_index]++;
229 break;
230 case IDE1_MAJOR: disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
231 kstat.dk_drive[disk_index]++;
232 default: break;
233 }
234
235 req->next = NULL;
236 cli();
237 if (req->bh)
238 mark_buffer_clean(req->bh);
239 if (!(tmp = dev->current_request)) {
240 dev->current_request = req;
241 (dev->request_fn)();
242 sti();
243 return;
244 }
245 for ( ; tmp->next ; tmp = tmp->next) {
246 if ((IN_ORDER(tmp,req) ||
247 !IN_ORDER(tmp,tmp->next)) &&
248 IN_ORDER(req,tmp->next))
249 break;
250 }
251 req->next = tmp->next;
252 tmp->next = req;
253
254
255 if (scsi_major(MAJOR(req->dev)))
256 (dev->request_fn)();
257
258 sti();
259 }
260
261 static void make_request(int major,int rw, struct buffer_head * bh)
262 {
263 unsigned int sector, count;
264 struct request * req;
265 int rw_ahead, max_req;
266
267
268
269 rw_ahead = (rw == READA || rw == WRITEA);
270 if (rw_ahead) {
271 if (bh->b_lock)
272 return;
273 if (rw == READA)
274 rw = READ;
275 else
276 rw = WRITE;
277 }
278 if (rw!=READ && rw!=WRITE) {
279 printk("Bad block dev command, must be R/W/RA/WA\n");
280 return;
281 }
282 count = bh->b_size >> 9;
283 sector = bh->b_blocknr * count;
284 if (blk_size[major])
285 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
286 bh->b_dirt = bh->b_uptodate = 0;
287 bh->b_req = 0;
288 return;
289 }
290
291 if (bh->b_lock)
292 return;
293
294 lock_buffer(bh);
295 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
296 unlock_buffer(bh);
297 return;
298 }
299
300
301
302
303
304 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
305
306
307
308 repeat:
309 cli();
310
311
312
313
314
315 if (( major == IDE0_MAJOR
316 || major == IDE1_MAJOR
317 || major == FLOPPY_MAJOR
318 || major == SCSI_DISK_MAJOR
319 || major == SCSI_CDROM_MAJOR)
320 && (req = blk_dev[major].current_request))
321 {
322 #ifdef CONFIG_BLK_DEV_HD
323 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
324 #else
325 if (major == FLOPPY_MAJOR)
326 #endif CONFIG_BLK_DEV_HD
327 req = req->next;
328 while (req) {
329 if (req->dev == bh->b_dev &&
330 !req->sem &&
331 req->cmd == rw &&
332 req->sector + req->nr_sectors == sector &&
333 req->nr_sectors < 244)
334 {
335 req->bhtail->b_reqnext = bh;
336 req->bhtail = bh;
337 req->nr_sectors += count;
338 mark_buffer_clean(bh);
339 sti();
340 return;
341 }
342
343 if (req->dev == bh->b_dev &&
344 !req->sem &&
345 req->cmd == rw &&
346 req->sector - count == sector &&
347 req->nr_sectors < 244)
348 {
349 req->nr_sectors += count;
350 bh->b_reqnext = req->bh;
351 req->buffer = bh->b_data;
352 req->current_nr_sectors = count;
353 req->sector = sector;
354 mark_buffer_clean(bh);
355 req->bh = bh;
356 sti();
357 return;
358 }
359
360 req = req->next;
361 }
362 }
363
364
365 req = get_request(max_req, bh->b_dev);
366
367
368 if (! req) {
369 if (rw_ahead) {
370 sti();
371 unlock_buffer(bh);
372 return;
373 }
374 unplug_device(major+blk_dev);
375 sleep_on(&wait_for_request);
376 sti();
377 goto repeat;
378 }
379
380
381 sti();
382
383
384 req->cmd = rw;
385 req->errors = 0;
386 req->sector = sector;
387 req->nr_sectors = count;
388 req->current_nr_sectors = count;
389 req->buffer = bh->b_data;
390 req->sem = NULL;
391 req->bh = bh;
392 req->bhtail = bh;
393 req->next = NULL;
394 add_request(major+blk_dev,req);
395 }
396
397 void ll_rw_page(int rw, int dev, int page, char * buffer)
398 {
399 struct request * req;
400 unsigned int major = MAJOR(dev);
401 struct semaphore sem = MUTEX_LOCKED;
402
403 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
404 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
405 return;
406 }
407 if (rw!=READ && rw!=WRITE)
408 panic("Bad block dev command, must be R/W");
409 if (rw == WRITE && is_read_only(dev)) {
410 printk("Can't page to read-only device 0x%X\n",dev);
411 return;
412 }
413 cli();
414 req = get_request_wait(NR_REQUEST, dev);
415 sti();
416
417 req->cmd = rw;
418 req->errors = 0;
419 req->sector = page<<3;
420 req->nr_sectors = 8;
421 req->current_nr_sectors = 8;
422 req->buffer = buffer;
423 req->sem = &sem;
424 req->bh = NULL;
425 req->next = NULL;
426 add_request(major+blk_dev,req);
427 down(&sem);
428 }
429
430
431
432
433
434 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
435 {
436 unsigned int major;
437 struct request plug;
438 int correct_size;
439 struct blk_dev_struct * dev;
440 int i;
441
442
443 while (!*bh) {
444 bh++;
445 if (--nr <= 0)
446 return;
447 };
448
449 dev = NULL;
450 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
451 dev = blk_dev + major;
452 if (!dev || !dev->request_fn) {
453 printk(
454 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
455 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
456 goto sorry;
457 }
458
459
460 correct_size = BLOCK_SIZE;
461 if (blksize_size[major]) {
462 i = blksize_size[major][MINOR(bh[0]->b_dev)];
463 if (i)
464 correct_size = i;
465 }
466
467
468 for (i = 0; i < nr; i++) {
469 if (bh[i] && bh[i]->b_size != correct_size) {
470 printk(
471 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
472 correct_size, bh[i]->b_size);
473 goto sorry;
474 }
475 }
476
477 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
478 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
479 goto sorry;
480 }
481
482
483
484
485
486
487 if (nr > 1)
488 plug_device(dev, &plug);
489 for (i = 0; i < nr; i++) {
490 if (bh[i]) {
491 bh[i]->b_req = 1;
492 make_request(major, rw, bh[i]);
493 if (rw == READ || rw == READA)
494 kstat.pgpgin++;
495 else
496 kstat.pgpgout++;
497 }
498 }
499 unplug_device(dev);
500 return;
501
502 sorry:
503 for (i = 0; i < nr; i++) {
504 if (bh[i])
505 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
506 }
507 return;
508 }
509
510 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
511 {
512 int i;
513 int buffersize;
514 struct request * req;
515 unsigned int major = MAJOR(dev);
516 struct semaphore sem = MUTEX_LOCKED;
517
518 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
519 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
520 return;
521 }
522
523 if (rw!=READ && rw!=WRITE) {
524 printk("ll_rw_swap: bad block dev command, must be R/W");
525 return;
526 }
527 if (rw == WRITE && is_read_only(dev)) {
528 printk("Can't swap to read-only device 0x%X\n",dev);
529 return;
530 }
531
532 buffersize = PAGE_SIZE / nb;
533
534 for (i=0; i<nb; i++, buf += buffersize)
535 {
536 cli();
537 req = get_request_wait(NR_REQUEST, dev);
538 sti();
539 req->cmd = rw;
540 req->errors = 0;
541 req->sector = (b[i] * buffersize) >> 9;
542 req->nr_sectors = buffersize >> 9;
543 req->current_nr_sectors = buffersize >> 9;
544 req->buffer = buf;
545 req->sem = &sem;
546 req->bh = NULL;
547 req->next = NULL;
548 add_request(major+blk_dev,req);
549 down(&sem);
550 }
551 }
552
553 long blk_dev_init(long mem_start, long mem_end)
554 {
555 struct request * req;
556
557 req = all_requests + NR_REQUEST;
558 while (--req >= all_requests) {
559 req->dev = -1;
560 req->next = NULL;
561 }
562 memset(ro_bits,0,sizeof(ro_bits));
563 #ifdef CONFIG_BLK_DEV_HD
564 mem_start = hd_init(mem_start,mem_end);
565 #endif
566 #ifdef CONFIG_BLK_DEV_IDE
567 mem_start = ide_init(mem_start,mem_end);
568 #endif
569 #ifdef CONFIG_BLK_DEV_XD
570 mem_start = xd_init(mem_start,mem_end);
571 #endif
572 #ifdef CONFIG_CDU31A
573 mem_start = cdu31a_init(mem_start,mem_end);
574 #endif
575 #ifdef CONFIG_CDU535
576 mem_start = sony535_init(mem_start,mem_end);
577 #endif
578 #ifdef CONFIG_MCD
579 mem_start = mcd_init(mem_start,mem_end);
580 #endif
581 #ifdef CONFIG_AZTCD
582 mem_start = aztcd_init(mem_start,mem_end);
583 #endif
584 #ifdef CONFIG_BLK_DEV_FD
585 floppy_init();
586 #else
587 outb_p(0xc, 0x3f2);
588 #endif
589 #ifdef CONFIG_SBPCD
590 mem_start = sbpcd_init(mem_start, mem_end);
591 #endif CONFIG_SBPCD
592 if (ramdisk_size)
593 mem_start += rd_init(mem_start, ramdisk_size*1024);
594 return mem_start;
595 }