This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/config.h>
15 #include <linux/locks.h>
16
17 #include <asm/system.h>
18
19 #include "blk.h"
20
21
22
23
24
25 static struct request all_requests[NR_REQUEST];
26
27
28
29
30 struct wait_queue * wait_for_request = NULL;
31
32
33
34 int read_ahead[MAX_BLKDEV] = {0, };
35
36
37
38
39
40 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
41 { NULL, NULL },
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL }
51 };
52
53
54
55
56
57
58
59
60
61 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
62
63
64
65
66
67
68
69
70 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
71
72
73
74
75
76
77 static inline struct request * get_request(int n, int dev)
78 {
79 static struct request *prev_found = NULL, *prev_limit = NULL;
80 register struct request *req, *limit;
81
82 if (n <= 0)
83 panic("get_request(%d): impossible!\n", n);
84
85 limit = all_requests + n;
86 if (limit != prev_limit) {
87 prev_limit = limit;
88 prev_found = all_requests;
89 }
90 req = prev_found;
91 for (;;) {
92 req = ((req > all_requests) ? req : limit) - 1;
93 if (req->dev < 0)
94 break;
95 if (req == prev_found)
96 return NULL;
97 }
98 prev_found = req;
99 req->dev = dev;
100 return req;
101 }
102
103
104
105
106
107
108 static inline struct request * get_request_wait(int n, int dev)
109 {
110 register struct request *req;
111
112 while ((req = get_request(n, dev)) == NULL)
113 sleep_on(&wait_for_request);
114 return req;
115 }
116
117
118
119 static long ro_bits[MAX_BLKDEV][8];
120
121 int is_read_only(int dev)
122 {
123 int minor,major;
124
125 major = MAJOR(dev);
126 minor = MINOR(dev);
127 if (major < 0 || major >= MAX_BLKDEV) return 0;
128 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
129 }
130
131 void set_device_ro(int dev,int flag)
132 {
133 int minor,major;
134
135 major = MAJOR(dev);
136 minor = MINOR(dev);
137 if (major < 0 || major >= MAX_BLKDEV) return;
138 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
139 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
140 }
141
142
143
144
145
146
147 static void add_request(struct blk_dev_struct * dev, struct request * req)
148 {
149 struct request * tmp;
150
151 req->next = NULL;
152 cli();
153 if (req->bh)
154 req->bh->b_dirt = 0;
155 if (!(tmp = dev->current_request)) {
156 dev->current_request = req;
157 (dev->request_fn)();
158 sti();
159 return;
160 }
161 for ( ; tmp->next ; tmp = tmp->next) {
162 if ((IN_ORDER(tmp,req) ||
163 !IN_ORDER(tmp,tmp->next)) &&
164 IN_ORDER(req,tmp->next))
165 break;
166 }
167 req->next = tmp->next;
168 tmp->next = req;
169
170
171 if (scsi_major(MAJOR(req->dev)))
172 (dev->request_fn)();
173
174 sti();
175 }
176
177 static void make_request(int major,int rw, struct buffer_head * bh)
178 {
179 unsigned int sector, count;
180 struct request * req;
181 int rw_ahead, max_req;
182
183
184
185 rw_ahead = (rw == READA || rw == WRITEA);
186 if (rw_ahead) {
187 if (bh->b_lock)
188 return;
189 if (rw == READA)
190 rw = READ;
191 else
192 rw = WRITE;
193 }
194 if (rw!=READ && rw!=WRITE) {
195 printk("Bad block dev command, must be R/W/RA/WA\n");
196 return;
197 }
198 count = bh->b_size >> 9;
199 sector = bh->b_blocknr * count;
200 if (blk_size[major])
201 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
202 bh->b_dirt = bh->b_uptodate = 0;
203 return;
204 }
205 lock_buffer(bh);
206 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
207 unlock_buffer(bh);
208 return;
209 }
210
211
212
213
214
215 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
216
217
218
219 repeat:
220 cli();
221
222
223
224
225
226 if ((major == HD_MAJOR
227 || major == SCSI_DISK_MAJOR
228 || major == SCSI_CDROM_MAJOR)
229 && (req = blk_dev[major].current_request))
230 {
231 if (major == HD_MAJOR)
232 req = req->next;
233 while (req) {
234 if (req->dev == bh->b_dev &&
235 !req->waiting &&
236 req->cmd == rw &&
237 req->sector + req->nr_sectors == sector &&
238 req->nr_sectors < 254)
239 {
240 req->bhtail->b_reqnext = bh;
241 req->bhtail = bh;
242 req->nr_sectors += count;
243 bh->b_dirt = 0;
244 sti();
245 return;
246 }
247
248 if (req->dev == bh->b_dev &&
249 !req->waiting &&
250 req->cmd == rw &&
251 req->sector - count == sector &&
252 req->nr_sectors < 254)
253 {
254 req->nr_sectors += count;
255 bh->b_reqnext = req->bh;
256 req->buffer = bh->b_data;
257 req->current_nr_sectors = count;
258 req->sector = sector;
259 bh->b_dirt = 0;
260 req->bh = bh;
261 sti();
262 return;
263 }
264
265 req = req->next;
266 }
267 }
268
269
270 req = get_request(max_req, bh->b_dev);
271
272
273 if (! req) {
274 if (rw_ahead) {
275 sti();
276 unlock_buffer(bh);
277 return;
278 }
279 sleep_on(&wait_for_request);
280 sti();
281 goto repeat;
282 }
283
284
285 sti();
286
287
288 req->cmd = rw;
289 req->errors = 0;
290 req->sector = sector;
291 req->nr_sectors = count;
292 req->current_nr_sectors = count;
293 req->buffer = bh->b_data;
294 req->waiting = NULL;
295 req->bh = bh;
296 req->bhtail = bh;
297 req->next = NULL;
298 add_request(major+blk_dev,req);
299 }
300
301 void ll_rw_page(int rw, int dev, int page, char * buffer)
302 {
303 struct request * req;
304 unsigned int major = MAJOR(dev);
305
306 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
307 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
308 return;
309 }
310 if (rw!=READ && rw!=WRITE)
311 panic("Bad block dev command, must be R/W");
312 if (rw == WRITE && is_read_only(dev)) {
313 printk("Can't page to read-only device 0x%X\n",dev);
314 return;
315 }
316 cli();
317 req = get_request_wait(NR_REQUEST, dev);
318 sti();
319
320 req->cmd = rw;
321 req->errors = 0;
322 req->sector = page<<3;
323 req->nr_sectors = 8;
324 req->current_nr_sectors = 8;
325 req->buffer = buffer;
326 req->waiting = current;
327 req->bh = NULL;
328 req->next = NULL;
329 current->state = TASK_SWAPPING;
330 add_request(major+blk_dev,req);
331 schedule();
332 }
333
334
335
336
337
338 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
339 {
340 unsigned int major;
341 struct request plug;
342 int plugged;
343 int correct_size;
344 struct blk_dev_struct * dev;
345 int i;
346
347
348 while (!*bh) {
349 bh++;
350 if (--nr <= 0)
351 return;
352 };
353
354 dev = NULL;
355 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
356 dev = blk_dev + major;
357 if (!dev || !dev->request_fn) {
358 printk(
359 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
360 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
361 goto sorry;
362 }
363
364
365 correct_size = BLOCK_SIZE;
366 if (blksize_size[major]) {
367 i = blksize_size[major][MINOR(bh[0]->b_dev)];
368 if (i)
369 correct_size = i;
370 }
371
372
373 for (i = 0; i < nr; i++) {
374 if (bh[i] && bh[i]->b_size != correct_size) {
375 printk(
376 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
377 correct_size, bh[i]->b_size);
378 goto sorry;
379 }
380 }
381
382 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
383 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
384 goto sorry;
385 }
386
387
388
389
390
391
392 plugged = 0;
393 cli();
394 if (!dev->current_request && nr > 1) {
395 dev->current_request = &plug;
396 plug.dev = -1;
397 plug.next = NULL;
398 plugged = 1;
399 }
400 sti();
401 for (i = 0; i < nr; i++) {
402 if (bh[i]) {
403 bh[i]->b_req = 1;
404 make_request(major, rw, bh[i]);
405 }
406 }
407 if (plugged) {
408 cli();
409 dev->current_request = plug.next;
410 (dev->request_fn)();
411 sti();
412 }
413 return;
414
415 sorry:
416 for (i = 0; i < nr; i++) {
417 if (bh[i])
418 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
419 }
420 return;
421 }
422
423 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
424 {
425 int i;
426 int buffersize;
427 struct request * req;
428 unsigned int major = MAJOR(dev);
429
430 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
431 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
432 return;
433 }
434
435 if (rw!=READ && rw!=WRITE) {
436 printk("ll_rw_swap: bad block dev command, must be R/W");
437 return;
438 }
439 if (rw == WRITE && is_read_only(dev)) {
440 printk("Can't swap to read-only device 0x%X\n",dev);
441 return;
442 }
443
444 buffersize = PAGE_SIZE / nb;
445
446 for (i=0; i<nb; i++, buf += buffersize)
447 {
448 cli();
449 req = get_request_wait(NR_REQUEST, dev);
450 sti();
451 req->cmd = rw;
452 req->errors = 0;
453 req->sector = (b[i] * buffersize) >> 9;
454 req->nr_sectors = buffersize >> 9;
455 req->current_nr_sectors = buffersize >> 9;
456 req->buffer = buf;
457 req->waiting = current;
458 req->bh = NULL;
459 req->next = NULL;
460 current->state = TASK_UNINTERRUPTIBLE;
461 add_request(major+blk_dev,req);
462 schedule();
463 }
464 }
465
466 long blk_dev_init(long mem_start, long mem_end)
467 {
468 struct request * req;
469
470 req = all_requests + NR_REQUEST;
471 while (--req >= all_requests) {
472 req->dev = -1;
473 req->next = NULL;
474 }
475 memset(ro_bits,0,sizeof(ro_bits));
476 #ifdef CONFIG_BLK_DEV_HD
477 mem_start = hd_init(mem_start,mem_end);
478 #endif
479 #ifdef CONFIG_BLK_DEV_XD
480 mem_start = xd_init(mem_start,mem_end);
481 #endif
482 #ifdef CONFIG_CDU31A
483 mem_start = cdu31a_init(mem_start,mem_end);
484 #endif
485 #ifdef CONFIG_MCD
486 mem_start = mcd_init(mem_start,mem_end);
487 #endif
488 if (ramdisk_size)
489 mem_start += rd_init(mem_start, ramdisk_size*1024);
490 return mem_start;
491 }