This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18
19 #include <asm/system.h>
20 #include <asm/io.h>
21 #include "blk.h"
22
23
24
25
26
27 static struct request all_requests[NR_REQUEST];
28
29
30
31
32 struct wait_queue * wait_for_request = NULL;
33
34
35
36 int read_ahead[MAX_BLKDEV] = {0, };
37
38
39
40
41
42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL }
53 };
54
55
56
57
58
59
60
61
62
63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
64
65
66
67
68
69
70
71
72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
73
74
75
76
77
78
79 static inline struct request * get_request(int n, int dev)
80 {
81 static struct request *prev_found = NULL, *prev_limit = NULL;
82 register struct request *req, *limit;
83
84 if (n <= 0)
85 panic("get_request(%d): impossible!\n", n);
86
87 limit = all_requests + n;
88 if (limit != prev_limit) {
89 prev_limit = limit;
90 prev_found = all_requests;
91 }
92 req = prev_found;
93 for (;;) {
94 req = ((req > all_requests) ? req : limit) - 1;
95 if (req->dev < 0)
96 break;
97 if (req == prev_found)
98 return NULL;
99 }
100 prev_found = req;
101 req->dev = dev;
102 return req;
103 }
104
105
106
107
108
109
110 static inline struct request * get_request_wait(int n, int dev)
111 {
112 register struct request *req;
113
114 while ((req = get_request(n, dev)) == NULL)
115 sleep_on(&wait_for_request);
116 return req;
117 }
118
119
120
121 static long ro_bits[MAX_BLKDEV][8];
122
123 int is_read_only(int dev)
124 {
125 int minor,major;
126
127 major = MAJOR(dev);
128 minor = MINOR(dev);
129 if (major < 0 || major >= MAX_BLKDEV) return 0;
130 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
131 }
132
133 void set_device_ro(int dev,int flag)
134 {
135 int minor,major;
136
137 major = MAJOR(dev);
138 minor = MINOR(dev);
139 if (major < 0 || major >= MAX_BLKDEV) return;
140 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
141 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
142 }
143
144
145
146
147
148
149 static void add_request(struct blk_dev_struct * dev, struct request * req)
150 {
151 struct request * tmp;
152 short disk_index;
153
154 switch (MAJOR(req->dev)) {
155 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
156 if (disk_index < 4)
157 kstat.dk_drive[disk_index]++;
158 break;
159 case HD_MAJOR:
160 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
161 if (disk_index < 4)
162 kstat.dk_drive[disk_index]++;
163 break;
164 default: break;
165 }
166
167 req->next = NULL;
168 cli();
169 if (req->bh)
170 mark_buffer_clean(req->bh);
171 if (!(tmp = dev->current_request)) {
172 dev->current_request = req;
173 (dev->request_fn)();
174 sti();
175 return;
176 }
177 for ( ; tmp->next ; tmp = tmp->next) {
178 if ((IN_ORDER(tmp,req) ||
179 !IN_ORDER(tmp,tmp->next)) &&
180 IN_ORDER(req,tmp->next))
181 break;
182 }
183 req->next = tmp->next;
184 tmp->next = req;
185
186
187 if (scsi_major(MAJOR(req->dev)))
188 (dev->request_fn)();
189
190 sti();
191 }
192
193 static void make_request(int major,int rw, struct buffer_head * bh)
194 {
195 unsigned int sector, count;
196 struct request * req;
197 int rw_ahead, max_req;
198
199
200
201 rw_ahead = (rw == READA || rw == WRITEA);
202 if (rw_ahead) {
203 if (bh->b_lock)
204 return;
205 if (rw == READA)
206 rw = READ;
207 else
208 rw = WRITE;
209 }
210 if (rw!=READ && rw!=WRITE) {
211 printk("Bad block dev command, must be R/W/RA/WA\n");
212 return;
213 }
214 count = bh->b_size >> 9;
215 sector = bh->b_blocknr * count;
216 if (blk_size[major])
217 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
218 bh->b_dirt = bh->b_uptodate = 0;
219 bh->b_req = 0;
220 return;
221 }
222 lock_buffer(bh);
223 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
224 unlock_buffer(bh);
225 return;
226 }
227
228
229
230
231
232 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
233
234
235
236 repeat:
237 cli();
238
239
240
241
242
243 if ((major == HD_MAJOR
244 || major == FLOPPY_MAJOR
245 || major == SCSI_DISK_MAJOR
246 || major == SCSI_CDROM_MAJOR)
247 && (req = blk_dev[major].current_request))
248 {
249 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
250 req = req->next;
251 while (req) {
252 if (req->dev == bh->b_dev &&
253 !req->sem &&
254 req->cmd == rw &&
255 req->sector + req->nr_sectors == sector &&
256 req->nr_sectors < 244)
257 {
258 req->bhtail->b_reqnext = bh;
259 req->bhtail = bh;
260 req->nr_sectors += count;
261 mark_buffer_clean(bh);
262 sti();
263 return;
264 }
265
266 if (req->dev == bh->b_dev &&
267 !req->sem &&
268 req->cmd == rw &&
269 req->sector - count == sector &&
270 req->nr_sectors < 244)
271 {
272 req->nr_sectors += count;
273 bh->b_reqnext = req->bh;
274 req->buffer = bh->b_data;
275 req->current_nr_sectors = count;
276 req->sector = sector;
277 mark_buffer_clean(bh);
278 req->bh = bh;
279 sti();
280 return;
281 }
282
283 req = req->next;
284 }
285 }
286
287
288 req = get_request(max_req, bh->b_dev);
289
290
291 if (! req) {
292 if (rw_ahead) {
293 sti();
294 unlock_buffer(bh);
295 return;
296 }
297 sleep_on(&wait_for_request);
298 sti();
299 goto repeat;
300 }
301
302
303 sti();
304
305
306 req->cmd = rw;
307 req->errors = 0;
308 req->sector = sector;
309 req->nr_sectors = count;
310 req->current_nr_sectors = count;
311 req->buffer = bh->b_data;
312 req->sem = NULL;
313 req->bh = bh;
314 req->bhtail = bh;
315 req->next = NULL;
316 add_request(major+blk_dev,req);
317 }
318
319 void ll_rw_page(int rw, int dev, int page, char * buffer)
320 {
321 struct request * req;
322 unsigned int major = MAJOR(dev);
323 struct semaphore sem = MUTEX_LOCKED;
324
325 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
326 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
327 return;
328 }
329 if (rw!=READ && rw!=WRITE)
330 panic("Bad block dev command, must be R/W");
331 if (rw == WRITE && is_read_only(dev)) {
332 printk("Can't page to read-only device 0x%X\n",dev);
333 return;
334 }
335 cli();
336 req = get_request_wait(NR_REQUEST, dev);
337 sti();
338
339 req->cmd = rw;
340 req->errors = 0;
341 req->sector = page<<3;
342 req->nr_sectors = 8;
343 req->current_nr_sectors = 8;
344 req->buffer = buffer;
345 req->sem = &sem;
346 req->bh = NULL;
347 req->next = NULL;
348 add_request(major+blk_dev,req);
349 down(&sem);
350 }
351
352
353
354
355
356 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
357 {
358 unsigned int major;
359 struct request plug;
360 int plugged;
361 int correct_size;
362 struct blk_dev_struct * dev;
363 int i;
364
365
366 while (!*bh) {
367 bh++;
368 if (--nr <= 0)
369 return;
370 };
371
372 dev = NULL;
373 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
374 dev = blk_dev + major;
375 if (!dev || !dev->request_fn) {
376 printk(
377 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
378 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
379 goto sorry;
380 }
381
382
383 correct_size = BLOCK_SIZE;
384 if (blksize_size[major]) {
385 i = blksize_size[major][MINOR(bh[0]->b_dev)];
386 if (i)
387 correct_size = i;
388 }
389
390
391 for (i = 0; i < nr; i++) {
392 if (bh[i] && bh[i]->b_size != correct_size) {
393 printk(
394 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
395 correct_size, bh[i]->b_size);
396 goto sorry;
397 }
398 }
399
400 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
401 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
402 goto sorry;
403 }
404
405
406
407
408
409
410 plugged = 0;
411 cli();
412 if (!dev->current_request && nr > 1) {
413 dev->current_request = &plug;
414 plug.dev = -1;
415 plug.next = NULL;
416 plugged = 1;
417 }
418 sti();
419 for (i = 0; i < nr; i++) {
420 if (bh[i]) {
421 bh[i]->b_req = 1;
422 make_request(major, rw, bh[i]);
423 if (rw == READ || rw == READA)
424 kstat.pgpgin++;
425 else
426 kstat.pgpgout++;
427 }
428 }
429 if (plugged) {
430 cli();
431 dev->current_request = plug.next;
432 (dev->request_fn)();
433 sti();
434 }
435 return;
436
437 sorry:
438 for (i = 0; i < nr; i++) {
439 if (bh[i])
440 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
441 }
442 return;
443 }
444
445 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
446 {
447 int i;
448 int buffersize;
449 struct request * req;
450 unsigned int major = MAJOR(dev);
451 struct semaphore sem = MUTEX_LOCKED;
452
453 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
454 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
455 return;
456 }
457
458 if (rw!=READ && rw!=WRITE) {
459 printk("ll_rw_swap: bad block dev command, must be R/W");
460 return;
461 }
462 if (rw == WRITE && is_read_only(dev)) {
463 printk("Can't swap to read-only device 0x%X\n",dev);
464 return;
465 }
466
467 buffersize = PAGE_SIZE / nb;
468
469 for (i=0; i<nb; i++, buf += buffersize)
470 {
471 cli();
472 req = get_request_wait(NR_REQUEST, dev);
473 sti();
474 req->cmd = rw;
475 req->errors = 0;
476 req->sector = (b[i] * buffersize) >> 9;
477 req->nr_sectors = buffersize >> 9;
478 req->current_nr_sectors = buffersize >> 9;
479 req->buffer = buf;
480 req->sem = &sem;
481 req->bh = NULL;
482 req->next = NULL;
483 add_request(major+blk_dev,req);
484 down(&sem);
485 }
486 }
487
488 long blk_dev_init(long mem_start, long mem_end)
489 {
490 struct request * req;
491
492 req = all_requests + NR_REQUEST;
493 while (--req >= all_requests) {
494 req->dev = -1;
495 req->next = NULL;
496 }
497 memset(ro_bits,0,sizeof(ro_bits));
498 #ifdef CONFIG_BLK_DEV_HD
499 mem_start = hd_init(mem_start,mem_end);
500 #endif
501 #ifdef CONFIG_BLK_DEV_XD
502 mem_start = xd_init(mem_start,mem_end);
503 #endif
504 #ifdef CONFIG_CDU31A
505 mem_start = cdu31a_init(mem_start,mem_end);
506 #endif
507 #ifdef CONFIG_MCD
508 mem_start = mcd_init(mem_start,mem_end);
509 #endif
510 #ifdef CONFIG_BLK_DEV_FD
511 floppy_init();
512 #else
513 outb_p(0xc, 0x3f2);
514 #endif
515 #ifdef CONFIG_SBPCD
516 mem_start = sbpcd_init(mem_start, mem_end);
517 #endif CONFIG_SBPCD
518 if (ramdisk_size)
519 mem_start += rd_init(mem_start, ramdisk_size*1024);
520 return mem_start;
521 }