This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18
19 #include <asm/system.h>
20 #include <asm/io.h>
21 #include "blk.h"
22
23
24
25
26
27 static struct request all_requests[NR_REQUEST];
28
29
30
31
32 struct wait_queue * wait_for_request = NULL;
33
34
35
36 int read_ahead[MAX_BLKDEV] = {0, };
37
38
39
40
41
42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL },
56 { NULL, NULL },
57 { NULL, NULL },
58 { NULL, NULL },
59 { NULL, NULL },
60 { NULL, NULL },
61 { NULL, NULL },
62 { NULL, NULL },
63 { NULL, NULL },
64 { NULL, NULL },
65 { NULL, NULL }
66 };
67
68
69
70
71
72
73
74
75
76 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
77
78
79
80
81
82
83
84
85 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
86
87
88
89
90
91
92
93
94
95
96
97
98
99 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
100
101
102
103
104
105
106 static inline struct request * get_request(int n, int dev)
107 {
108 static struct request *prev_found = NULL, *prev_limit = NULL;
109 register struct request *req, *limit;
110
111 if (n <= 0)
112 panic("get_request(%d): impossible!\n", n);
113
114 limit = all_requests + n;
115 if (limit != prev_limit) {
116 prev_limit = limit;
117 prev_found = all_requests;
118 }
119 req = prev_found;
120 for (;;) {
121 req = ((req > all_requests) ? req : limit) - 1;
122 if (req->dev < 0)
123 break;
124 if (req == prev_found)
125 return NULL;
126 }
127 prev_found = req;
128 req->dev = dev;
129 return req;
130 }
131
132
133
134
135
136
137 static inline struct request * get_request_wait(int n, int dev)
138 {
139 register struct request *req;
140
141 while ((req = get_request(n, dev)) == NULL)
142 sleep_on(&wait_for_request);
143 return req;
144 }
145
146
147
148 static long ro_bits[MAX_BLKDEV][8];
149
150 int is_read_only(int dev)
151 {
152 int minor,major;
153
154 major = MAJOR(dev);
155 minor = MINOR(dev);
156 if (major < 0 || major >= MAX_BLKDEV) return 0;
157 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
158 }
159
160 void set_device_ro(int dev,int flag)
161 {
162 int minor,major;
163
164 major = MAJOR(dev);
165 minor = MINOR(dev);
166 if (major < 0 || major >= MAX_BLKDEV) return;
167 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
168 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
169 }
170
171
172
173
174
175
176 static void add_request(struct blk_dev_struct * dev, struct request * req)
177 {
178 struct request * tmp;
179 short disk_index;
180
181 switch (MAJOR(req->dev)) {
182 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
183 if (disk_index < 4)
184 kstat.dk_drive[disk_index]++;
185 break;
186 case HD_MAJOR:
187 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0040) >> 6;
188 kstat.dk_drive[disk_index]++;
189 break;
190 case IDE1_MAJOR: disk_index = ((MINOR(req->dev) & 0x0040) >> 6) + 2;
191 kstat.dk_drive[disk_index]++;
192 default: break;
193 }
194
195 req->next = NULL;
196 cli();
197 if (req->bh)
198 mark_buffer_clean(req->bh);
199 if (!(tmp = dev->current_request)) {
200 dev->current_request = req;
201 (dev->request_fn)();
202 sti();
203 return;
204 }
205 for ( ; tmp->next ; tmp = tmp->next) {
206 if ((IN_ORDER(tmp,req) ||
207 !IN_ORDER(tmp,tmp->next)) &&
208 IN_ORDER(req,tmp->next))
209 break;
210 }
211 req->next = tmp->next;
212 tmp->next = req;
213
214
215 if (scsi_major(MAJOR(req->dev)))
216 (dev->request_fn)();
217
218 sti();
219 }
220
221 static void make_request(int major,int rw, struct buffer_head * bh)
222 {
223 unsigned int sector, count;
224 struct request * req;
225 int rw_ahead, max_req;
226
227
228
229 rw_ahead = (rw == READA || rw == WRITEA);
230 if (rw_ahead) {
231 if (bh->b_lock)
232 return;
233 if (rw == READA)
234 rw = READ;
235 else
236 rw = WRITE;
237 }
238 if (rw!=READ && rw!=WRITE) {
239 printk("Bad block dev command, must be R/W/RA/WA\n");
240 return;
241 }
242 count = bh->b_size >> 9;
243 sector = bh->b_blocknr * count;
244 if (blk_size[major])
245 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
246 bh->b_dirt = bh->b_uptodate = 0;
247 bh->b_req = 0;
248 return;
249 }
250
251 if (bh->b_lock)
252 return;
253
254 lock_buffer(bh);
255 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
256 unlock_buffer(bh);
257 return;
258 }
259
260
261
262
263
264 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
265
266
267
268 repeat:
269 cli();
270
271
272
273
274
275 if (( major == IDE0_MAJOR
276 || major == IDE1_MAJOR
277 || major == FLOPPY_MAJOR
278 || major == SCSI_DISK_MAJOR
279 || major == SCSI_CDROM_MAJOR)
280 && (req = blk_dev[major].current_request))
281 {
282 #ifdef CONFIG_BLK_DEV_HD
283 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
284 #else
285 if (major == FLOPPY_MAJOR)
286 #endif CONFIG_BLK_DEV_HD
287 req = req->next;
288 while (req) {
289 if (req->dev == bh->b_dev &&
290 !req->sem &&
291 req->cmd == rw &&
292 req->sector + req->nr_sectors == sector &&
293 req->nr_sectors < 244)
294 {
295 req->bhtail->b_reqnext = bh;
296 req->bhtail = bh;
297 req->nr_sectors += count;
298 mark_buffer_clean(bh);
299 sti();
300 return;
301 }
302
303 if (req->dev == bh->b_dev &&
304 !req->sem &&
305 req->cmd == rw &&
306 req->sector - count == sector &&
307 req->nr_sectors < 244)
308 {
309 req->nr_sectors += count;
310 bh->b_reqnext = req->bh;
311 req->buffer = bh->b_data;
312 req->current_nr_sectors = count;
313 req->sector = sector;
314 mark_buffer_clean(bh);
315 req->bh = bh;
316 sti();
317 return;
318 }
319
320 req = req->next;
321 }
322 }
323
324
325 req = get_request(max_req, bh->b_dev);
326
327
328 if (! req) {
329 if (rw_ahead) {
330 sti();
331 unlock_buffer(bh);
332 return;
333 }
334 sleep_on(&wait_for_request);
335 sti();
336 goto repeat;
337 }
338
339
340 sti();
341
342
343 req->cmd = rw;
344 req->errors = 0;
345 req->sector = sector;
346 req->nr_sectors = count;
347 req->current_nr_sectors = count;
348 req->buffer = bh->b_data;
349 req->sem = NULL;
350 req->bh = bh;
351 req->bhtail = bh;
352 req->next = NULL;
353 add_request(major+blk_dev,req);
354 }
355
356 void ll_rw_page(int rw, int dev, int page, char * buffer)
357 {
358 struct request * req;
359 unsigned int major = MAJOR(dev);
360 struct semaphore sem = MUTEX_LOCKED;
361
362 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
363 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
364 return;
365 }
366 if (rw!=READ && rw!=WRITE)
367 panic("Bad block dev command, must be R/W");
368 if (rw == WRITE && is_read_only(dev)) {
369 printk("Can't page to read-only device 0x%X\n",dev);
370 return;
371 }
372 cli();
373 req = get_request_wait(NR_REQUEST, dev);
374 sti();
375
376 req->cmd = rw;
377 req->errors = 0;
378 req->sector = page<<3;
379 req->nr_sectors = 8;
380 req->current_nr_sectors = 8;
381 req->buffer = buffer;
382 req->sem = &sem;
383 req->bh = NULL;
384 req->next = NULL;
385 add_request(major+blk_dev,req);
386 down(&sem);
387 }
388
389
390
391
392
393 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
394 {
395 unsigned int major;
396 struct request plug;
397 int plugged;
398 int correct_size;
399 struct blk_dev_struct * dev;
400 int i;
401
402
403 while (!*bh) {
404 bh++;
405 if (--nr <= 0)
406 return;
407 };
408
409 dev = NULL;
410 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
411 dev = blk_dev + major;
412 if (!dev || !dev->request_fn) {
413 printk(
414 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
415 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
416 goto sorry;
417 }
418
419
420 correct_size = BLOCK_SIZE;
421 if (blksize_size[major]) {
422 i = blksize_size[major][MINOR(bh[0]->b_dev)];
423 if (i)
424 correct_size = i;
425 }
426
427
428 for (i = 0; i < nr; i++) {
429 if (bh[i] && bh[i]->b_size != correct_size) {
430 printk(
431 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
432 correct_size, bh[i]->b_size);
433 goto sorry;
434 }
435 }
436
437 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
438 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
439 goto sorry;
440 }
441
442
443
444
445
446
447 plugged = 0;
448 cli();
449 if (!dev->current_request && nr > 1) {
450 dev->current_request = &plug;
451 plug.dev = -1;
452 plug.next = NULL;
453 plugged = 1;
454 }
455 sti();
456 for (i = 0; i < nr; i++) {
457 if (bh[i]) {
458 bh[i]->b_req = 1;
459 make_request(major, rw, bh[i]);
460 if (rw == READ || rw == READA)
461 kstat.pgpgin++;
462 else
463 kstat.pgpgout++;
464 }
465 }
466 if (plugged) {
467 cli();
468 dev->current_request = plug.next;
469 (dev->request_fn)();
470 sti();
471 }
472 return;
473
474 sorry:
475 for (i = 0; i < nr; i++) {
476 if (bh[i])
477 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
478 }
479 return;
480 }
481
482 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
483 {
484 int i;
485 int buffersize;
486 struct request * req;
487 unsigned int major = MAJOR(dev);
488 struct semaphore sem = MUTEX_LOCKED;
489
490 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
491 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
492 return;
493 }
494
495 if (rw!=READ && rw!=WRITE) {
496 printk("ll_rw_swap: bad block dev command, must be R/W");
497 return;
498 }
499 if (rw == WRITE && is_read_only(dev)) {
500 printk("Can't swap to read-only device 0x%X\n",dev);
501 return;
502 }
503
504 buffersize = PAGE_SIZE / nb;
505
506 for (i=0; i<nb; i++, buf += buffersize)
507 {
508 cli();
509 req = get_request_wait(NR_REQUEST, dev);
510 sti();
511 req->cmd = rw;
512 req->errors = 0;
513 req->sector = (b[i] * buffersize) >> 9;
514 req->nr_sectors = buffersize >> 9;
515 req->current_nr_sectors = buffersize >> 9;
516 req->buffer = buf;
517 req->sem = &sem;
518 req->bh = NULL;
519 req->next = NULL;
520 add_request(major+blk_dev,req);
521 down(&sem);
522 }
523 }
524
525 long blk_dev_init(long mem_start, long mem_end)
526 {
527 struct request * req;
528
529 req = all_requests + NR_REQUEST;
530 while (--req >= all_requests) {
531 req->dev = -1;
532 req->next = NULL;
533 }
534 memset(ro_bits,0,sizeof(ro_bits));
535 #ifdef CONFIG_BLK_DEV_HD
536 mem_start = hd_init(mem_start,mem_end);
537 #endif
538 #ifdef CONFIG_BLK_DEV_IDE
539 mem_start = ide_init(mem_start,mem_end);
540 #endif
541 #ifdef CONFIG_BLK_DEV_XD
542 mem_start = xd_init(mem_start,mem_end);
543 #endif
544 #ifdef CONFIG_CDU31A
545 mem_start = cdu31a_init(mem_start,mem_end);
546 #endif
547 #ifdef CONFIG_MCD
548 mem_start = mcd_init(mem_start,mem_end);
549 #endif
550 #ifdef CONFIG_BLK_DEV_FD
551 floppy_init();
552 #else
553 outb_p(0xc, 0x3f2);
554 #endif
555 #ifdef CONFIG_SBPCD
556 mem_start = sbpcd_init(mem_start, mem_end);
557 #endif CONFIG_SBPCD
558 if (ramdisk_size)
559 mem_start += rd_init(mem_start, ramdisk_size*1024);
560 return mem_start;
561 }