This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18
19 #include <asm/system.h>
20 #include <asm/io.h>
21 #include "blk.h"
22
23
24
25
26
27 static struct request all_requests[NR_REQUEST];
28
29
30
31
32 struct wait_queue * wait_for_request = NULL;
33
34
35
36 int read_ahead[MAX_BLKDEV] = {0, };
37
38
39
40
41
42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL }
53 };
54
55
56
57
58
59
60
61
62
63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
64
65
66
67
68
69
70
71
72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
73
74
75
76
77
78
79 static inline struct request * get_request(int n, int dev)
80 {
81 static struct request *prev_found = NULL, *prev_limit = NULL;
82 register struct request *req, *limit;
83
84 if (n <= 0)
85 panic("get_request(%d): impossible!\n", n);
86
87 limit = all_requests + n;
88 if (limit != prev_limit) {
89 prev_limit = limit;
90 prev_found = all_requests;
91 }
92 req = prev_found;
93 for (;;) {
94 req = ((req > all_requests) ? req : limit) - 1;
95 if (req->dev < 0)
96 break;
97 if (req == prev_found)
98 return NULL;
99 }
100 prev_found = req;
101 req->dev = dev;
102 return req;
103 }
104
105
106
107
108
109
110 static inline struct request * get_request_wait(int n, int dev)
111 {
112 register struct request *req;
113
114 while ((req = get_request(n, dev)) == NULL)
115 sleep_on(&wait_for_request);
116 return req;
117 }
118
119
120
121 static long ro_bits[MAX_BLKDEV][8];
122
123 int is_read_only(int dev)
124 {
125 int minor,major;
126
127 major = MAJOR(dev);
128 minor = MINOR(dev);
129 if (major < 0 || major >= MAX_BLKDEV) return 0;
130 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
131 }
132
133 void set_device_ro(int dev,int flag)
134 {
135 int minor,major;
136
137 major = MAJOR(dev);
138 minor = MINOR(dev);
139 if (major < 0 || major >= MAX_BLKDEV) return;
140 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
141 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
142 }
143
144
145
146
147
148
149 static void add_request(struct blk_dev_struct * dev, struct request * req)
150 {
151 struct request * tmp;
152 short disk_index;
153
154 switch (MAJOR(req->dev)) {
155 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
156 if (disk_index < 4)
157 kstat.dk_drive[disk_index]++;
158 break;
159 case HD_MAJOR:
160 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
161 if (disk_index < 4)
162 kstat.dk_drive[disk_index]++;
163 break;
164 default: break;
165 }
166
167 req->next = NULL;
168 cli();
169 if (req->bh)
170 mark_buffer_clean(req->bh);
171 if (!(tmp = dev->current_request)) {
172 dev->current_request = req;
173 (dev->request_fn)();
174 sti();
175 return;
176 }
177 for ( ; tmp->next ; tmp = tmp->next) {
178 if ((IN_ORDER(tmp,req) ||
179 !IN_ORDER(tmp,tmp->next)) &&
180 IN_ORDER(req,tmp->next))
181 break;
182 }
183 req->next = tmp->next;
184 tmp->next = req;
185
186
187 if (scsi_major(MAJOR(req->dev)))
188 (dev->request_fn)();
189
190 sti();
191 }
192
193 static void make_request(int major,int rw, struct buffer_head * bh)
194 {
195 unsigned int sector, count;
196 struct request * req;
197 int rw_ahead, max_req;
198
199
200
201 rw_ahead = (rw == READA || rw == WRITEA);
202 if (rw_ahead) {
203 if (bh->b_lock)
204 return;
205 if (rw == READA)
206 rw = READ;
207 else
208 rw = WRITE;
209 }
210 if (rw!=READ && rw!=WRITE) {
211 printk("Bad block dev command, must be R/W/RA/WA\n");
212 return;
213 }
214 count = bh->b_size >> 9;
215 sector = bh->b_blocknr * count;
216 if (blk_size[major])
217 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
218 bh->b_dirt = bh->b_uptodate = 0;
219 bh->b_req = 0;
220 return;
221 }
222
223 if (bh->b_lock)
224 return;
225
226 lock_buffer(bh);
227 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
228 unlock_buffer(bh);
229 return;
230 }
231
232
233
234
235
236 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
237
238
239
240 repeat:
241 cli();
242
243
244
245
246
247 if ((major == HD_MAJOR
248 || major == FLOPPY_MAJOR
249 || major == SCSI_DISK_MAJOR
250 || major == SCSI_CDROM_MAJOR)
251 && (req = blk_dev[major].current_request))
252 {
253 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
254 req = req->next;
255 while (req) {
256 if (req->dev == bh->b_dev &&
257 !req->sem &&
258 req->cmd == rw &&
259 req->sector + req->nr_sectors == sector &&
260 req->nr_sectors < 244)
261 {
262 req->bhtail->b_reqnext = bh;
263 req->bhtail = bh;
264 req->nr_sectors += count;
265 mark_buffer_clean(bh);
266 sti();
267 return;
268 }
269
270 if (req->dev == bh->b_dev &&
271 !req->sem &&
272 req->cmd == rw &&
273 req->sector - count == sector &&
274 req->nr_sectors < 244)
275 {
276 req->nr_sectors += count;
277 bh->b_reqnext = req->bh;
278 req->buffer = bh->b_data;
279 req->current_nr_sectors = count;
280 req->sector = sector;
281 mark_buffer_clean(bh);
282 req->bh = bh;
283 sti();
284 return;
285 }
286
287 req = req->next;
288 }
289 }
290
291
292 req = get_request(max_req, bh->b_dev);
293
294
295 if (! req) {
296 if (rw_ahead) {
297 sti();
298 unlock_buffer(bh);
299 return;
300 }
301 sleep_on(&wait_for_request);
302 sti();
303 goto repeat;
304 }
305
306
307 sti();
308
309
310 req->cmd = rw;
311 req->errors = 0;
312 req->sector = sector;
313 req->nr_sectors = count;
314 req->current_nr_sectors = count;
315 req->buffer = bh->b_data;
316 req->sem = NULL;
317 req->bh = bh;
318 req->bhtail = bh;
319 req->next = NULL;
320 add_request(major+blk_dev,req);
321 }
322
323 void ll_rw_page(int rw, int dev, int page, char * buffer)
324 {
325 struct request * req;
326 unsigned int major = MAJOR(dev);
327 struct semaphore sem = MUTEX_LOCKED;
328
329 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
330 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
331 return;
332 }
333 if (rw!=READ && rw!=WRITE)
334 panic("Bad block dev command, must be R/W");
335 if (rw == WRITE && is_read_only(dev)) {
336 printk("Can't page to read-only device 0x%X\n",dev);
337 return;
338 }
339 cli();
340 req = get_request_wait(NR_REQUEST, dev);
341 sti();
342
343 req->cmd = rw;
344 req->errors = 0;
345 req->sector = page<<3;
346 req->nr_sectors = 8;
347 req->current_nr_sectors = 8;
348 req->buffer = buffer;
349 req->sem = &sem;
350 req->bh = NULL;
351 req->next = NULL;
352 add_request(major+blk_dev,req);
353 down(&sem);
354 }
355
356
357
358
359
360 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
361 {
362 unsigned int major;
363 struct request plug;
364 int plugged;
365 int correct_size;
366 struct blk_dev_struct * dev;
367 int i;
368
369
370 while (!*bh) {
371 bh++;
372 if (--nr <= 0)
373 return;
374 };
375
376 dev = NULL;
377 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
378 dev = blk_dev + major;
379 if (!dev || !dev->request_fn) {
380 printk(
381 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
382 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
383 goto sorry;
384 }
385
386
387 correct_size = BLOCK_SIZE;
388 if (blksize_size[major]) {
389 i = blksize_size[major][MINOR(bh[0]->b_dev)];
390 if (i)
391 correct_size = i;
392 }
393
394
395 for (i = 0; i < nr; i++) {
396 if (bh[i] && bh[i]->b_size != correct_size) {
397 printk(
398 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
399 correct_size, bh[i]->b_size);
400 goto sorry;
401 }
402 }
403
404 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
405 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
406 goto sorry;
407 }
408
409
410
411
412
413
414 plugged = 0;
415 cli();
416 if (!dev->current_request && nr > 1) {
417 dev->current_request = &plug;
418 plug.dev = -1;
419 plug.next = NULL;
420 plugged = 1;
421 }
422 sti();
423 for (i = 0; i < nr; i++) {
424 if (bh[i]) {
425 bh[i]->b_req = 1;
426 make_request(major, rw, bh[i]);
427 if (rw == READ || rw == READA)
428 kstat.pgpgin++;
429 else
430 kstat.pgpgout++;
431 }
432 }
433 if (plugged) {
434 cli();
435 dev->current_request = plug.next;
436 (dev->request_fn)();
437 sti();
438 }
439 return;
440
441 sorry:
442 for (i = 0; i < nr; i++) {
443 if (bh[i])
444 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
445 }
446 return;
447 }
448
449 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
450 {
451 int i;
452 int buffersize;
453 struct request * req;
454 unsigned int major = MAJOR(dev);
455 struct semaphore sem = MUTEX_LOCKED;
456
457 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
458 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
459 return;
460 }
461
462 if (rw!=READ && rw!=WRITE) {
463 printk("ll_rw_swap: bad block dev command, must be R/W");
464 return;
465 }
466 if (rw == WRITE && is_read_only(dev)) {
467 printk("Can't swap to read-only device 0x%X\n",dev);
468 return;
469 }
470
471 buffersize = PAGE_SIZE / nb;
472
473 for (i=0; i<nb; i++, buf += buffersize)
474 {
475 cli();
476 req = get_request_wait(NR_REQUEST, dev);
477 sti();
478 req->cmd = rw;
479 req->errors = 0;
480 req->sector = (b[i] * buffersize) >> 9;
481 req->nr_sectors = buffersize >> 9;
482 req->current_nr_sectors = buffersize >> 9;
483 req->buffer = buf;
484 req->sem = &sem;
485 req->bh = NULL;
486 req->next = NULL;
487 add_request(major+blk_dev,req);
488 down(&sem);
489 }
490 }
491
492 long blk_dev_init(long mem_start, long mem_end)
493 {
494 struct request * req;
495
496 req = all_requests + NR_REQUEST;
497 while (--req >= all_requests) {
498 req->dev = -1;
499 req->next = NULL;
500 }
501 memset(ro_bits,0,sizeof(ro_bits));
502 #ifdef CONFIG_BLK_DEV_HD
503 mem_start = hd_init(mem_start,mem_end);
504 #endif
505 #ifdef CONFIG_BLK_DEV_XD
506 mem_start = xd_init(mem_start,mem_end);
507 #endif
508 #ifdef CONFIG_CDU31A
509 mem_start = cdu31a_init(mem_start,mem_end);
510 #endif
511 #ifdef CONFIG_MCD
512 mem_start = mcd_init(mem_start,mem_end);
513 #endif
514 #ifdef CONFIG_BLK_DEV_FD
515 floppy_init();
516 #else
517 outb_p(0xc, 0x3f2);
518 #endif
519 #ifdef CONFIG_SBPCD
520 mem_start = sbpcd_init(mem_start, mem_end);
521 #endif CONFIG_SBPCD
522 if (ramdisk_size)
523 mem_start += rd_init(mem_start, ramdisk_size*1024);
524 return mem_start;
525 }