This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
17
18 #include <asm/system.h>
19
20 #include "blk.h"
21
22
23
24
25
26 static struct request all_requests[NR_REQUEST];
27
28
29
30
31 struct wait_queue * wait_for_request = NULL;
32
33
34
35 int read_ahead[MAX_BLKDEV] = {0, };
36
37
38
39
40
41 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL }
52 };
53
54
55
56
57
58
59
60
61
62 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
72
73
74
75
76
77
78 static inline struct request * get_request(int n, int dev)
79 {
80 static struct request *prev_found = NULL, *prev_limit = NULL;
81 register struct request *req, *limit;
82
83 if (n <= 0)
84 panic("get_request(%d): impossible!\n", n);
85
86 limit = all_requests + n;
87 if (limit != prev_limit) {
88 prev_limit = limit;
89 prev_found = all_requests;
90 }
91 req = prev_found;
92 for (;;) {
93 req = ((req > all_requests) ? req : limit) - 1;
94 if (req->dev < 0)
95 break;
96 if (req == prev_found)
97 return NULL;
98 }
99 prev_found = req;
100 req->dev = dev;
101 return req;
102 }
103
104
105
106
107
108
109 static inline struct request * get_request_wait(int n, int dev)
110 {
111 register struct request *req;
112
113 while ((req = get_request(n, dev)) == NULL)
114 sleep_on(&wait_for_request);
115 return req;
116 }
117
118
119
120 static long ro_bits[MAX_BLKDEV][8];
121
122 int is_read_only(int dev)
123 {
124 int minor,major;
125
126 major = MAJOR(dev);
127 minor = MINOR(dev);
128 if (major < 0 || major >= MAX_BLKDEV) return 0;
129 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
130 }
131
132 void set_device_ro(int dev,int flag)
133 {
134 int minor,major;
135
136 major = MAJOR(dev);
137 minor = MINOR(dev);
138 if (major < 0 || major >= MAX_BLKDEV) return;
139 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
140 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
141 }
142
143
144
145
146
147
148 static void add_request(struct blk_dev_struct * dev, struct request * req)
149 {
150 struct request * tmp;
151
152 req->next = NULL;
153 cli();
154 if (req->bh)
155 mark_buffer_clean(req->bh);
156 if (!(tmp = dev->current_request)) {
157 dev->current_request = req;
158 (dev->request_fn)();
159 sti();
160 return;
161 }
162 for ( ; tmp->next ; tmp = tmp->next) {
163 if ((IN_ORDER(tmp,req) ||
164 !IN_ORDER(tmp,tmp->next)) &&
165 IN_ORDER(req,tmp->next))
166 break;
167 }
168 req->next = tmp->next;
169 tmp->next = req;
170
171
172 if (scsi_major(MAJOR(req->dev)))
173 (dev->request_fn)();
174
175 sti();
176 }
177
178 static void make_request(int major,int rw, struct buffer_head * bh)
179 {
180 unsigned int sector, count;
181 struct request * req;
182 int rw_ahead, max_req;
183
184
185
186 rw_ahead = (rw == READA || rw == WRITEA);
187 if (rw_ahead) {
188 if (bh->b_lock)
189 return;
190 if (rw == READA)
191 rw = READ;
192 else
193 rw = WRITE;
194 }
195 if (rw!=READ && rw!=WRITE) {
196 printk("Bad block dev command, must be R/W/RA/WA\n");
197 return;
198 }
199 count = bh->b_size >> 9;
200 sector = bh->b_blocknr * count;
201 if (blk_size[major])
202 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
203 bh->b_dirt = bh->b_uptodate = 0;
204 return;
205 }
206 lock_buffer(bh);
207 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
208 unlock_buffer(bh);
209 return;
210 }
211
212
213
214
215
216 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
217
218
219
220 repeat:
221 cli();
222
223
224
225
226
227 if ((major == HD_MAJOR
228 || major == SCSI_DISK_MAJOR
229 || major == SCSI_CDROM_MAJOR)
230 && (req = blk_dev[major].current_request))
231 {
232 if (major == HD_MAJOR)
233 req = req->next;
234 while (req) {
235 if (req->dev == bh->b_dev &&
236 !req->sem &&
237 req->cmd == rw &&
238 req->sector + req->nr_sectors == sector &&
239 req->nr_sectors < 244)
240 {
241 req->bhtail->b_reqnext = bh;
242 req->bhtail = bh;
243 req->nr_sectors += count;
244 mark_buffer_clean(bh);
245 sti();
246 return;
247 }
248
249 if (req->dev == bh->b_dev &&
250 !req->sem &&
251 req->cmd == rw &&
252 req->sector - count == sector &&
253 req->nr_sectors < 244)
254 {
255 req->nr_sectors += count;
256 bh->b_reqnext = req->bh;
257 req->buffer = bh->b_data;
258 req->current_nr_sectors = count;
259 req->sector = sector;
260 mark_buffer_clean(bh);
261 req->bh = bh;
262 sti();
263 return;
264 }
265
266 req = req->next;
267 }
268 }
269
270
271 req = get_request(max_req, bh->b_dev);
272
273
274 if (! req) {
275 if (rw_ahead) {
276 sti();
277 unlock_buffer(bh);
278 return;
279 }
280 sleep_on(&wait_for_request);
281 sti();
282 goto repeat;
283 }
284
285
286 sti();
287
288
289 req->cmd = rw;
290 req->errors = 0;
291 req->sector = sector;
292 req->nr_sectors = count;
293 req->current_nr_sectors = count;
294 req->buffer = bh->b_data;
295 req->sem = NULL;
296 req->bh = bh;
297 req->bhtail = bh;
298 req->next = NULL;
299 add_request(major+blk_dev,req);
300 }
301
302 void ll_rw_page(int rw, int dev, int page, char * buffer)
303 {
304 struct request * req;
305 unsigned int major = MAJOR(dev);
306 struct semaphore sem = MUTEX_LOCKED;
307
308 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
309 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
310 return;
311 }
312 if (rw!=READ && rw!=WRITE)
313 panic("Bad block dev command, must be R/W");
314 if (rw == WRITE && is_read_only(dev)) {
315 printk("Can't page to read-only device 0x%X\n",dev);
316 return;
317 }
318 cli();
319 req = get_request_wait(NR_REQUEST, dev);
320 sti();
321
322 req->cmd = rw;
323 req->errors = 0;
324 req->sector = page<<3;
325 req->nr_sectors = 8;
326 req->current_nr_sectors = 8;
327 req->buffer = buffer;
328 req->sem = &sem;
329 req->bh = NULL;
330 req->next = NULL;
331 add_request(major+blk_dev,req);
332 down(&sem);
333 }
334
335
336
337
338
339 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
340 {
341 unsigned int major;
342 struct request plug;
343 int plugged;
344 int correct_size;
345 struct blk_dev_struct * dev;
346 int i;
347
348
349 while (!*bh) {
350 bh++;
351 if (--nr <= 0)
352 return;
353 };
354
355 dev = NULL;
356 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
357 dev = blk_dev + major;
358 if (!dev || !dev->request_fn) {
359 printk(
360 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
361 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
362 goto sorry;
363 }
364
365
366 correct_size = BLOCK_SIZE;
367 if (blksize_size[major]) {
368 i = blksize_size[major][MINOR(bh[0]->b_dev)];
369 if (i)
370 correct_size = i;
371 }
372
373
374 for (i = 0; i < nr; i++) {
375 if (bh[i] && bh[i]->b_size != correct_size) {
376 printk(
377 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
378 correct_size, bh[i]->b_size);
379 goto sorry;
380 }
381 }
382
383 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
384 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
385 goto sorry;
386 }
387
388
389
390
391
392
393 plugged = 0;
394 cli();
395 if (!dev->current_request && nr > 1) {
396 dev->current_request = &plug;
397 plug.dev = -1;
398 plug.next = NULL;
399 plugged = 1;
400 }
401 sti();
402 for (i = 0; i < nr; i++) {
403 if (bh[i]) {
404 bh[i]->b_req = 1;
405 make_request(major, rw, bh[i]);
406 if (rw == READ || rw == READA)
407 kstat.pgpgin++;
408 else
409 kstat.pgpgout++;
410 }
411 }
412 if (plugged) {
413 cli();
414 dev->current_request = plug.next;
415 (dev->request_fn)();
416 sti();
417 }
418 return;
419
420 sorry:
421 for (i = 0; i < nr; i++) {
422 if (bh[i])
423 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
424 }
425 return;
426 }
427
428 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
429 {
430 int i;
431 int buffersize;
432 struct request * req;
433 unsigned int major = MAJOR(dev);
434 struct semaphore sem = MUTEX_LOCKED;
435
436 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
437 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
438 return;
439 }
440
441 if (rw!=READ && rw!=WRITE) {
442 printk("ll_rw_swap: bad block dev command, must be R/W");
443 return;
444 }
445 if (rw == WRITE && is_read_only(dev)) {
446 printk("Can't swap to read-only device 0x%X\n",dev);
447 return;
448 }
449
450 buffersize = PAGE_SIZE / nb;
451
452 for (i=0; i<nb; i++, buf += buffersize)
453 {
454 cli();
455 req = get_request_wait(NR_REQUEST, dev);
456 sti();
457 req->cmd = rw;
458 req->errors = 0;
459 req->sector = (b[i] * buffersize) >> 9;
460 req->nr_sectors = buffersize >> 9;
461 req->current_nr_sectors = buffersize >> 9;
462 req->buffer = buf;
463 req->sem = &sem;
464 req->bh = NULL;
465 req->next = NULL;
466 add_request(major+blk_dev,req);
467 down(&sem);
468 }
469 }
470
471 long blk_dev_init(long mem_start, long mem_end)
472 {
473 struct request * req;
474
475 req = all_requests + NR_REQUEST;
476 while (--req >= all_requests) {
477 req->dev = -1;
478 req->next = NULL;
479 }
480 memset(ro_bits,0,sizeof(ro_bits));
481 #ifdef CONFIG_BLK_DEV_HD
482 mem_start = hd_init(mem_start,mem_end);
483 #endif
484 #ifdef CONFIG_BLK_DEV_XD
485 mem_start = xd_init(mem_start,mem_end);
486 #endif
487 #ifdef CONFIG_CDU31A
488 mem_start = cdu31a_init(mem_start,mem_end);
489 #endif
490 #ifdef CONFIG_MCD
491 mem_start = mcd_init(mem_start,mem_end);
492 #endif
493 #ifdef CONFIG_SBPCD
494 mem_start = sbpcd_init(mem_start, mem_end);
495 #endif CONFIG_SBPCD
496 if (ramdisk_size)
497 mem_start += rd_init(mem_start, ramdisk_size*1024);
498 return mem_start;
499 }