This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19
20 #include <asm/system.h>
21 #include <asm/io.h>
22 #include "blk.h"
23
24
25
26
27
28 static struct request all_requests[NR_REQUEST];
29
30
31
32
33 struct wait_queue * wait_for_request = NULL;
34
35
36
37 int read_ahead[MAX_BLKDEV] = {0, };
38
39
40
41
42
43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL },
66 { NULL, NULL }
67 };
68
69
70
71
72
73
74
75
76
77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
78
79
80
81
82
83
84
85
86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93
94
95
96
97
98
99
100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
101
102
103
104
105
106
107 static inline struct request * get_request(int n, int dev)
108 {
109 static struct request *prev_found = NULL, *prev_limit = NULL;
110 register struct request *req, *limit;
111
112 if (n <= 0)
113 panic("get_request(%d): impossible!\n", n);
114
115 limit = all_requests + n;
116 if (limit != prev_limit) {
117 prev_limit = limit;
118 prev_found = all_requests;
119 }
120 req = prev_found;
121 for (;;) {
122 req = ((req > all_requests) ? req : limit) - 1;
123 if (req->dev < 0)
124 break;
125 if (req == prev_found)
126 return NULL;
127 }
128 prev_found = req;
129 req->dev = dev;
130 return req;
131 }
132
133
134
135
136
137
138 static inline struct request * get_request_wait(int n, int dev)
139 {
140 register struct request *req;
141
142 while ((req = get_request(n, dev)) == NULL)
143 sleep_on(&wait_for_request);
144 return req;
145 }
146
147
148
149 static long ro_bits[MAX_BLKDEV][8];
150
151 int is_read_only(int dev)
152 {
153 int minor,major;
154
155 major = MAJOR(dev);
156 minor = MINOR(dev);
157 if (major < 0 || major >= MAX_BLKDEV) return 0;
158 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
159 }
160
161 void set_device_ro(int dev,int flag)
162 {
163 int minor,major;
164
165 major = MAJOR(dev);
166 minor = MINOR(dev);
167 if (major < 0 || major >= MAX_BLKDEV) return;
168 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
169 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
170 }
171
172
173
174
175
176
177 static void add_request(struct blk_dev_struct * dev, struct request * req)
178 {
179 struct request * tmp;
180 short disk_index;
181
182 switch (MAJOR(req->dev)) {
183 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
184 if (disk_index < 4)
185 kstat.dk_drive[disk_index]++;
186 break;
187 case HD_MAJOR:
188 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0040) >> 6;
189 kstat.dk_drive[disk_index]++;
190 break;
191 case IDE1_MAJOR: disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
192 kstat.dk_drive[disk_index]++;
193 default: break;
194 }
195
196 req->next = NULL;
197 cli();
198 if (req->bh)
199 mark_buffer_clean(req->bh);
200 if (!(tmp = dev->current_request)) {
201 dev->current_request = req;
202 (dev->request_fn)();
203 sti();
204 return;
205 }
206 for ( ; tmp->next ; tmp = tmp->next) {
207 if ((IN_ORDER(tmp,req) ||
208 !IN_ORDER(tmp,tmp->next)) &&
209 IN_ORDER(req,tmp->next))
210 break;
211 }
212 req->next = tmp->next;
213 tmp->next = req;
214
215
216 if (scsi_major(MAJOR(req->dev)))
217 (dev->request_fn)();
218
219 sti();
220 }
221
222 static void make_request(int major,int rw, struct buffer_head * bh)
223 {
224 unsigned int sector, count;
225 struct request * req;
226 int rw_ahead, max_req;
227
228
229
230 rw_ahead = (rw == READA || rw == WRITEA);
231 if (rw_ahead) {
232 if (bh->b_lock)
233 return;
234 if (rw == READA)
235 rw = READ;
236 else
237 rw = WRITE;
238 }
239 if (rw!=READ && rw!=WRITE) {
240 printk("Bad block dev command, must be R/W/RA/WA\n");
241 return;
242 }
243 count = bh->b_size >> 9;
244 sector = bh->b_blocknr * count;
245 if (blk_size[major])
246 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
247 bh->b_dirt = bh->b_uptodate = 0;
248 bh->b_req = 0;
249 return;
250 }
251
252 if (bh->b_lock)
253 return;
254
255 lock_buffer(bh);
256 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
257 unlock_buffer(bh);
258 return;
259 }
260
261
262
263
264
265 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
266
267
268
269 repeat:
270 cli();
271
272
273
274
275
276 if (( major == IDE0_MAJOR
277 || major == IDE1_MAJOR
278 || major == FLOPPY_MAJOR
279 || major == SCSI_DISK_MAJOR
280 || major == SCSI_CDROM_MAJOR)
281 && (req = blk_dev[major].current_request))
282 {
283 #ifdef CONFIG_BLK_DEV_HD
284 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
285 #else
286 if (major == FLOPPY_MAJOR)
287 #endif CONFIG_BLK_DEV_HD
288 req = req->next;
289 while (req) {
290 if (req->dev == bh->b_dev &&
291 !req->sem &&
292 req->cmd == rw &&
293 req->sector + req->nr_sectors == sector &&
294 req->nr_sectors < 244)
295 {
296 req->bhtail->b_reqnext = bh;
297 req->bhtail = bh;
298 req->nr_sectors += count;
299 mark_buffer_clean(bh);
300 sti();
301 return;
302 }
303
304 if (req->dev == bh->b_dev &&
305 !req->sem &&
306 req->cmd == rw &&
307 req->sector - count == sector &&
308 req->nr_sectors < 244)
309 {
310 req->nr_sectors += count;
311 bh->b_reqnext = req->bh;
312 req->buffer = bh->b_data;
313 req->current_nr_sectors = count;
314 req->sector = sector;
315 mark_buffer_clean(bh);
316 req->bh = bh;
317 sti();
318 return;
319 }
320
321 req = req->next;
322 }
323 }
324
325
326 req = get_request(max_req, bh->b_dev);
327
328
329 if (! req) {
330 if (rw_ahead) {
331 sti();
332 unlock_buffer(bh);
333 return;
334 }
335 sleep_on(&wait_for_request);
336 sti();
337 goto repeat;
338 }
339
340
341 sti();
342
343
344 req->cmd = rw;
345 req->errors = 0;
346 req->sector = sector;
347 req->nr_sectors = count;
348 req->current_nr_sectors = count;
349 req->buffer = bh->b_data;
350 req->sem = NULL;
351 req->bh = bh;
352 req->bhtail = bh;
353 req->next = NULL;
354 add_request(major+blk_dev,req);
355 }
356
357 void ll_rw_page(int rw, int dev, int page, char * buffer)
358 {
359 struct request * req;
360 unsigned int major = MAJOR(dev);
361 struct semaphore sem = MUTEX_LOCKED;
362
363 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
364 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
365 return;
366 }
367 if (rw!=READ && rw!=WRITE)
368 panic("Bad block dev command, must be R/W");
369 if (rw == WRITE && is_read_only(dev)) {
370 printk("Can't page to read-only device 0x%X\n",dev);
371 return;
372 }
373 cli();
374 req = get_request_wait(NR_REQUEST, dev);
375 sti();
376
377 req->cmd = rw;
378 req->errors = 0;
379 req->sector = page<<3;
380 req->nr_sectors = 8;
381 req->current_nr_sectors = 8;
382 req->buffer = buffer;
383 req->sem = &sem;
384 req->bh = NULL;
385 req->next = NULL;
386 add_request(major+blk_dev,req);
387 down(&sem);
388 }
389
390
391
392
393
394 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
395 {
396 unsigned int major;
397 struct request plug;
398 int plugged;
399 int correct_size;
400 struct blk_dev_struct * dev;
401 int i;
402
403
404 while (!*bh) {
405 bh++;
406 if (--nr <= 0)
407 return;
408 };
409
410 dev = NULL;
411 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
412 dev = blk_dev + major;
413 if (!dev || !dev->request_fn) {
414 printk(
415 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
416 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
417 goto sorry;
418 }
419
420
421 correct_size = BLOCK_SIZE;
422 if (blksize_size[major]) {
423 i = blksize_size[major][MINOR(bh[0]->b_dev)];
424 if (i)
425 correct_size = i;
426 }
427
428
429 for (i = 0; i < nr; i++) {
430 if (bh[i] && bh[i]->b_size != correct_size) {
431 printk(
432 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
433 correct_size, bh[i]->b_size);
434 goto sorry;
435 }
436 }
437
438 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
439 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
440 goto sorry;
441 }
442
443
444
445
446
447
448 plugged = 0;
449 cli();
450 if (!dev->current_request && nr > 1) {
451 dev->current_request = &plug;
452 plug.dev = -1;
453 plug.next = NULL;
454 plugged = 1;
455 }
456 sti();
457 for (i = 0; i < nr; i++) {
458 if (bh[i]) {
459 bh[i]->b_req = 1;
460 make_request(major, rw, bh[i]);
461 if (rw == READ || rw == READA)
462 kstat.pgpgin++;
463 else
464 kstat.pgpgout++;
465 }
466 }
467 if (plugged) {
468 cli();
469 dev->current_request = plug.next;
470 (dev->request_fn)();
471 sti();
472 }
473 return;
474
475 sorry:
476 for (i = 0; i < nr; i++) {
477 if (bh[i])
478 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
479 }
480 return;
481 }
482
483 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
484 {
485 int i;
486 int buffersize;
487 struct request * req;
488 unsigned int major = MAJOR(dev);
489 struct semaphore sem = MUTEX_LOCKED;
490
491 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
492 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
493 return;
494 }
495
496 if (rw!=READ && rw!=WRITE) {
497 printk("ll_rw_swap: bad block dev command, must be R/W");
498 return;
499 }
500 if (rw == WRITE && is_read_only(dev)) {
501 printk("Can't swap to read-only device 0x%X\n",dev);
502 return;
503 }
504
505 buffersize = PAGE_SIZE / nb;
506
507 for (i=0; i<nb; i++, buf += buffersize)
508 {
509 cli();
510 req = get_request_wait(NR_REQUEST, dev);
511 sti();
512 req->cmd = rw;
513 req->errors = 0;
514 req->sector = (b[i] * buffersize) >> 9;
515 req->nr_sectors = buffersize >> 9;
516 req->current_nr_sectors = buffersize >> 9;
517 req->buffer = buf;
518 req->sem = &sem;
519 req->bh = NULL;
520 req->next = NULL;
521 add_request(major+blk_dev,req);
522 down(&sem);
523 }
524 }
525
526 long blk_dev_init(long mem_start, long mem_end)
527 {
528 struct request * req;
529
530 req = all_requests + NR_REQUEST;
531 while (--req >= all_requests) {
532 req->dev = -1;
533 req->next = NULL;
534 }
535 memset(ro_bits,0,sizeof(ro_bits));
536 #ifdef CONFIG_BLK_DEV_HD
537 mem_start = hd_init(mem_start,mem_end);
538 #endif
539 #ifdef CONFIG_BLK_DEV_IDE
540 mem_start = ide_init(mem_start,mem_end);
541 #endif
542 #ifdef CONFIG_BLK_DEV_XD
543 mem_start = xd_init(mem_start,mem_end);
544 #endif
545 #ifdef CONFIG_CDU31A
546 mem_start = cdu31a_init(mem_start,mem_end);
547 #endif
548 #ifdef CONFIG_MCD
549 mem_start = mcd_init(mem_start,mem_end);
550 #endif
551 #ifdef CONFIG_AZTCD
552 mem_start = aztcd_init(mem_start,mem_end);
553 #endif
554 #ifdef CONFIG_BLK_DEV_FD
555 floppy_init();
556 #else
557 outb_p(0xc, 0x3f2);
558 #endif
559 #ifdef CONFIG_SBPCD
560 mem_start = sbpcd_init(mem_start, mem_end);
561 #endif CONFIG_SBPCD
562 if (ramdisk_size)
563 mem_start += rd_init(mem_start, ramdisk_size*1024);
564 return mem_start;
565 }