This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
17
18 #include <asm/system.h>
19
20 #include "blk.h"
21
22 #ifdef CONFIG_SBPCD
23 extern u_long sbpcd_init(u_long, u_long);
24 #endif CONFIG_SBPCD
25
26
27
28
29
30 static struct request all_requests[NR_REQUEST];
31
32
33
34
35 struct wait_queue * wait_for_request = NULL;
36
37
38
39 int read_ahead[MAX_BLKDEV] = {0, };
40
41
42
43
44
45 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL },
53 { NULL, NULL },
54 { NULL, NULL },
55 { NULL, NULL }
56 };
57
58
59
60
61
62
63
64
65
66 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
67
68
69
70
71
72
73
74
75 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
76
77
78
79
80
81
82 static inline struct request * get_request(int n, int dev)
83 {
84 static struct request *prev_found = NULL, *prev_limit = NULL;
85 register struct request *req, *limit;
86
87 if (n <= 0)
88 panic("get_request(%d): impossible!\n", n);
89
90 limit = all_requests + n;
91 if (limit != prev_limit) {
92 prev_limit = limit;
93 prev_found = all_requests;
94 }
95 req = prev_found;
96 for (;;) {
97 req = ((req > all_requests) ? req : limit) - 1;
98 if (req->dev < 0)
99 break;
100 if (req == prev_found)
101 return NULL;
102 }
103 prev_found = req;
104 req->dev = dev;
105 return req;
106 }
107
108
109
110
111
112
113 static inline struct request * get_request_wait(int n, int dev)
114 {
115 register struct request *req;
116
117 while ((req = get_request(n, dev)) == NULL)
118 sleep_on(&wait_for_request);
119 return req;
120 }
121
122
123
124 static long ro_bits[MAX_BLKDEV][8];
125
126 int is_read_only(int dev)
127 {
128 int minor,major;
129
130 major = MAJOR(dev);
131 minor = MINOR(dev);
132 if (major < 0 || major >= MAX_BLKDEV) return 0;
133 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
134 }
135
136 void set_device_ro(int dev,int flag)
137 {
138 int minor,major;
139
140 major = MAJOR(dev);
141 minor = MINOR(dev);
142 if (major < 0 || major >= MAX_BLKDEV) return;
143 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
144 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
145 }
146
147
148
149
150
151
152 static void add_request(struct blk_dev_struct * dev, struct request * req)
153 {
154 struct request * tmp;
155
156 req->next = NULL;
157 cli();
158 if (req->bh)
159 mark_buffer_clean(req->bh);
160 if (!(tmp = dev->current_request)) {
161 dev->current_request = req;
162 (dev->request_fn)();
163 sti();
164 return;
165 }
166 for ( ; tmp->next ; tmp = tmp->next) {
167 if ((IN_ORDER(tmp,req) ||
168 !IN_ORDER(tmp,tmp->next)) &&
169 IN_ORDER(req,tmp->next))
170 break;
171 }
172 req->next = tmp->next;
173 tmp->next = req;
174
175
176 if (scsi_major(MAJOR(req->dev)))
177 (dev->request_fn)();
178
179 sti();
180 }
181
182 static void make_request(int major,int rw, struct buffer_head * bh)
183 {
184 unsigned int sector, count;
185 struct request * req;
186 int rw_ahead, max_req;
187
188
189
190 rw_ahead = (rw == READA || rw == WRITEA);
191 if (rw_ahead) {
192 if (bh->b_lock)
193 return;
194 if (rw == READA)
195 rw = READ;
196 else
197 rw = WRITE;
198 }
199 if (rw!=READ && rw!=WRITE) {
200 printk("Bad block dev command, must be R/W/RA/WA\n");
201 return;
202 }
203 count = bh->b_size >> 9;
204 sector = bh->b_blocknr * count;
205 if (blk_size[major])
206 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
207 bh->b_dirt = bh->b_uptodate = 0;
208 return;
209 }
210 lock_buffer(bh);
211 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
212 unlock_buffer(bh);
213 return;
214 }
215
216
217
218
219
220 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
221
222
223
224 repeat:
225 cli();
226
227
228
229
230
231 if ((major == HD_MAJOR
232 || major == SCSI_DISK_MAJOR
233 || major == SCSI_CDROM_MAJOR)
234 && (req = blk_dev[major].current_request))
235 {
236 if (major == HD_MAJOR)
237 req = req->next;
238 while (req) {
239 if (req->dev == bh->b_dev &&
240 !req->waiting &&
241 req->cmd == rw &&
242 req->sector + req->nr_sectors == sector &&
243 req->nr_sectors < 244)
244 {
245 req->bhtail->b_reqnext = bh;
246 req->bhtail = bh;
247 req->nr_sectors += count;
248 mark_buffer_clean(bh);
249 sti();
250 return;
251 }
252
253 if (req->dev == bh->b_dev &&
254 !req->waiting &&
255 req->cmd == rw &&
256 req->sector - count == sector &&
257 req->nr_sectors < 244)
258 {
259 req->nr_sectors += count;
260 bh->b_reqnext = req->bh;
261 req->buffer = bh->b_data;
262 req->current_nr_sectors = count;
263 req->sector = sector;
264 mark_buffer_clean(bh);
265 req->bh = bh;
266 sti();
267 return;
268 }
269
270 req = req->next;
271 }
272 }
273
274
275 req = get_request(max_req, bh->b_dev);
276
277
278 if (! req) {
279 if (rw_ahead) {
280 sti();
281 unlock_buffer(bh);
282 return;
283 }
284 sleep_on(&wait_for_request);
285 sti();
286 goto repeat;
287 }
288
289
290 sti();
291
292
293 req->cmd = rw;
294 req->errors = 0;
295 req->sector = sector;
296 req->nr_sectors = count;
297 req->current_nr_sectors = count;
298 req->buffer = bh->b_data;
299 req->waiting = NULL;
300 req->bh = bh;
301 req->bhtail = bh;
302 req->next = NULL;
303 add_request(major+blk_dev,req);
304 }
305
306 void ll_rw_page(int rw, int dev, int page, char * buffer)
307 {
308 struct request * req;
309 unsigned int major = MAJOR(dev);
310
311 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
312 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
313 return;
314 }
315 if (rw!=READ && rw!=WRITE)
316 panic("Bad block dev command, must be R/W");
317 if (rw == WRITE && is_read_only(dev)) {
318 printk("Can't page to read-only device 0x%X\n",dev);
319 return;
320 }
321 cli();
322 req = get_request_wait(NR_REQUEST, dev);
323 sti();
324
325 req->cmd = rw;
326 req->errors = 0;
327 req->sector = page<<3;
328 req->nr_sectors = 8;
329 req->current_nr_sectors = 8;
330 req->buffer = buffer;
331 req->waiting = current;
332 req->bh = NULL;
333 req->next = NULL;
334 current->state = TASK_SWAPPING;
335 add_request(major+blk_dev,req);
336 schedule();
337 }
338
339
340
341
342
343 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
344 {
345 unsigned int major;
346 struct request plug;
347 int plugged;
348 int correct_size;
349 struct blk_dev_struct * dev;
350 int i;
351
352
353 while (!*bh) {
354 bh++;
355 if (--nr <= 0)
356 return;
357 };
358
359 dev = NULL;
360 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
361 dev = blk_dev + major;
362 if (!dev || !dev->request_fn) {
363 printk(
364 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
365 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
366 goto sorry;
367 }
368
369
370 correct_size = BLOCK_SIZE;
371 if (blksize_size[major]) {
372 i = blksize_size[major][MINOR(bh[0]->b_dev)];
373 if (i)
374 correct_size = i;
375 }
376
377
378 for (i = 0; i < nr; i++) {
379 if (bh[i] && bh[i]->b_size != correct_size) {
380 printk(
381 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
382 correct_size, bh[i]->b_size);
383 goto sorry;
384 }
385 }
386
387 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
388 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
389 goto sorry;
390 }
391
392
393
394
395
396
397 plugged = 0;
398 cli();
399 if (!dev->current_request && nr > 1) {
400 dev->current_request = &plug;
401 plug.dev = -1;
402 plug.next = NULL;
403 plugged = 1;
404 }
405 sti();
406 for (i = 0; i < nr; i++) {
407 if (bh[i]) {
408 bh[i]->b_req = 1;
409 make_request(major, rw, bh[i]);
410 if (rw == READ || rw == READA)
411 kstat.pgpgin++;
412 else
413 kstat.pgpgout++;
414 }
415 }
416 if (plugged) {
417 cli();
418 dev->current_request = plug.next;
419 (dev->request_fn)();
420 sti();
421 }
422 return;
423
424 sorry:
425 for (i = 0; i < nr; i++) {
426 if (bh[i])
427 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
428 }
429 return;
430 }
431
432 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
433 {
434 int i;
435 int buffersize;
436 struct request * req;
437 unsigned int major = MAJOR(dev);
438
439 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
440 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
441 return;
442 }
443
444 if (rw!=READ && rw!=WRITE) {
445 printk("ll_rw_swap: bad block dev command, must be R/W");
446 return;
447 }
448 if (rw == WRITE && is_read_only(dev)) {
449 printk("Can't swap to read-only device 0x%X\n",dev);
450 return;
451 }
452
453 buffersize = PAGE_SIZE / nb;
454
455 for (i=0; i<nb; i++, buf += buffersize)
456 {
457 cli();
458 req = get_request_wait(NR_REQUEST, dev);
459 sti();
460 req->cmd = rw;
461 req->errors = 0;
462 req->sector = (b[i] * buffersize) >> 9;
463 req->nr_sectors = buffersize >> 9;
464 req->current_nr_sectors = buffersize >> 9;
465 req->buffer = buf;
466 req->waiting = current;
467 req->bh = NULL;
468 req->next = NULL;
469 current->state = TASK_UNINTERRUPTIBLE;
470 add_request(major+blk_dev,req);
471 schedule();
472 }
473 }
474
475 long blk_dev_init(long mem_start, long mem_end)
476 {
477 struct request * req;
478
479 req = all_requests + NR_REQUEST;
480 while (--req >= all_requests) {
481 req->dev = -1;
482 req->next = NULL;
483 }
484 memset(ro_bits,0,sizeof(ro_bits));
485 #ifdef CONFIG_BLK_DEV_HD
486 mem_start = hd_init(mem_start,mem_end);
487 #endif
488 #ifdef CONFIG_BLK_DEV_XD
489 mem_start = xd_init(mem_start,mem_end);
490 #endif
491 #ifdef CONFIG_CDU31A
492 mem_start = cdu31a_init(mem_start,mem_end);
493 #endif
494 #ifdef CONFIG_MCD
495 mem_start = mcd_init(mem_start,mem_end);
496 #endif
497 #ifdef CONFIG_SBPCD
498 mem_start = sbpcd_init(mem_start, mem_end);
499 #endif CONFIG_SBPCD
500 if (ramdisk_size)
501 mem_start += rd_init(mem_start, ramdisk_size*1024);
502 return mem_start;
503 }