This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
17
18 #include <asm/system.h>
19
20 #include "blk.h"
21
22
23
24
25
26 static struct request all_requests[NR_REQUEST];
27
28
29
30
31 struct wait_queue * wait_for_request = NULL;
32
33
34
35 int read_ahead[MAX_BLKDEV] = {0, };
36
37
38
39
40
41 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL }
52 };
53
54
55
56
57
58
59
60
61
62 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
72
73
74
75
76
77
78 static inline struct request * get_request(int n, int dev)
79 {
80 static struct request *prev_found = NULL, *prev_limit = NULL;
81 register struct request *req, *limit;
82
83 if (n <= 0)
84 panic("get_request(%d): impossible!\n", n);
85
86 limit = all_requests + n;
87 if (limit != prev_limit) {
88 prev_limit = limit;
89 prev_found = all_requests;
90 }
91 req = prev_found;
92 for (;;) {
93 req = ((req > all_requests) ? req : limit) - 1;
94 if (req->dev < 0)
95 break;
96 if (req == prev_found)
97 return NULL;
98 }
99 prev_found = req;
100 req->dev = dev;
101 return req;
102 }
103
104
105
106
107
108
109 static inline struct request * get_request_wait(int n, int dev)
110 {
111 register struct request *req;
112
113 while ((req = get_request(n, dev)) == NULL)
114 sleep_on(&wait_for_request);
115 return req;
116 }
117
118
119
120 static long ro_bits[MAX_BLKDEV][8];
121
122 int is_read_only(int dev)
123 {
124 int minor,major;
125
126 major = MAJOR(dev);
127 minor = MINOR(dev);
128 if ( major == FLOPPY_MAJOR && floppy_is_wp( minor) ) return 1;
129 if (major < 0 || major >= MAX_BLKDEV) return 0;
130 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
131 }
132
133 void set_device_ro(int dev,int flag)
134 {
135 int minor,major;
136
137 major = MAJOR(dev);
138 minor = MINOR(dev);
139 if (major < 0 || major >= MAX_BLKDEV) return;
140 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
141 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
142 }
143
144
145
146
147
148
149 static void add_request(struct blk_dev_struct * dev, struct request * req)
150 {
151 struct request * tmp;
152
153 req->next = NULL;
154 cli();
155 if (req->bh)
156 mark_buffer_clean(req->bh);
157 if (!(tmp = dev->current_request)) {
158 dev->current_request = req;
159 (dev->request_fn)();
160 sti();
161 return;
162 }
163 for ( ; tmp->next ; tmp = tmp->next) {
164 if ((IN_ORDER(tmp,req) ||
165 !IN_ORDER(tmp,tmp->next)) &&
166 IN_ORDER(req,tmp->next))
167 break;
168 }
169 req->next = tmp->next;
170 tmp->next = req;
171
172
173 if (scsi_major(MAJOR(req->dev)))
174 (dev->request_fn)();
175
176 sti();
177 }
178
179 static void make_request(int major,int rw, struct buffer_head * bh)
180 {
181 unsigned int sector, count;
182 struct request * req;
183 int rw_ahead, max_req;
184
185
186
187 rw_ahead = (rw == READA || rw == WRITEA);
188 if (rw_ahead) {
189 if (bh->b_lock)
190 return;
191 if (rw == READA)
192 rw = READ;
193 else
194 rw = WRITE;
195 }
196 if (rw!=READ && rw!=WRITE) {
197 printk("Bad block dev command, must be R/W/RA/WA\n");
198 return;
199 }
200 count = bh->b_size >> 9;
201 sector = bh->b_blocknr * count;
202 if (blk_size[major])
203 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
204 bh->b_dirt = bh->b_uptodate = 0;
205 bh->b_req = 0;
206 return;
207 }
208 lock_buffer(bh);
209 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
210 unlock_buffer(bh);
211 return;
212 }
213
214
215
216
217
218 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
219
220
221
222 repeat:
223 cli();
224
225
226
227
228
229 if ((major == HD_MAJOR
230 || major == FLOPPY_MAJOR
231 || major == SCSI_DISK_MAJOR
232 || major == SCSI_CDROM_MAJOR)
233 && (req = blk_dev[major].current_request))
234 {
235 if (major == HD_MAJOR)
236 req = req->next;
237 while (req) {
238 if (req->dev == bh->b_dev &&
239 !req->sem &&
240 req->cmd == rw &&
241 req->sector + req->nr_sectors == sector &&
242 req->nr_sectors < 244)
243 {
244 req->bhtail->b_reqnext = bh;
245 req->bhtail = bh;
246 req->nr_sectors += count;
247 mark_buffer_clean(bh);
248 sti();
249 return;
250 }
251
252 if (req->dev == bh->b_dev &&
253 !req->sem &&
254 req->cmd == rw &&
255 req->sector - count == sector &&
256 req->nr_sectors < 244)
257 {
258 req->nr_sectors += count;
259 bh->b_reqnext = req->bh;
260 req->buffer = bh->b_data;
261 req->current_nr_sectors = count;
262 req->sector = sector;
263 mark_buffer_clean(bh);
264 req->bh = bh;
265 sti();
266 return;
267 }
268
269 req = req->next;
270 }
271 }
272
273
274 req = get_request(max_req, bh->b_dev);
275
276
277 if (! req) {
278 if (rw_ahead) {
279 sti();
280 unlock_buffer(bh);
281 return;
282 }
283 sleep_on(&wait_for_request);
284 sti();
285 goto repeat;
286 }
287
288
289 sti();
290
291
292 req->cmd = rw;
293 req->errors = 0;
294 req->sector = sector;
295 req->nr_sectors = count;
296 req->current_nr_sectors = count;
297 req->buffer = bh->b_data;
298 req->sem = NULL;
299 req->bh = bh;
300 req->bhtail = bh;
301 req->next = NULL;
302 add_request(major+blk_dev,req);
303 }
304
305 void ll_rw_page(int rw, int dev, int page, char * buffer)
306 {
307 struct request * req;
308 unsigned int major = MAJOR(dev);
309 struct semaphore sem = MUTEX_LOCKED;
310
311 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
312 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
313 return;
314 }
315 if (rw!=READ && rw!=WRITE)
316 panic("Bad block dev command, must be R/W");
317 if (rw == WRITE && is_read_only(dev)) {
318 printk("Can't page to read-only device 0x%X\n",dev);
319 return;
320 }
321 cli();
322 req = get_request_wait(NR_REQUEST, dev);
323 sti();
324
325 req->cmd = rw;
326 req->errors = 0;
327 req->sector = page<<3;
328 req->nr_sectors = 8;
329 req->current_nr_sectors = 8;
330 req->buffer = buffer;
331 req->sem = &sem;
332 req->bh = NULL;
333 req->next = NULL;
334 add_request(major+blk_dev,req);
335 down(&sem);
336 }
337
338
339
340
341
342 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
343 {
344 unsigned int major;
345 struct request plug;
346 int plugged;
347 int correct_size;
348 struct blk_dev_struct * dev;
349 int i;
350
351
352 while (!*bh) {
353 bh++;
354 if (--nr <= 0)
355 return;
356 };
357
358 dev = NULL;
359 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
360 dev = blk_dev + major;
361 if (!dev || !dev->request_fn) {
362 printk(
363 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
364 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
365 goto sorry;
366 }
367
368
369 correct_size = BLOCK_SIZE;
370 if (blksize_size[major]) {
371 i = blksize_size[major][MINOR(bh[0]->b_dev)];
372 if (i)
373 correct_size = i;
374 }
375
376
377 for (i = 0; i < nr; i++) {
378 if (bh[i] && bh[i]->b_size != correct_size) {
379 printk(
380 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
381 correct_size, bh[i]->b_size);
382 goto sorry;
383 }
384 }
385
386 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
387 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
388 goto sorry;
389 }
390
391
392
393
394
395
396 plugged = 0;
397 cli();
398 if (!dev->current_request && nr > 1) {
399 dev->current_request = &plug;
400 plug.dev = -1;
401 plug.next = NULL;
402 plugged = 1;
403 }
404 sti();
405 for (i = 0; i < nr; i++) {
406 if (bh[i]) {
407 bh[i]->b_req = 1;
408 make_request(major, rw, bh[i]);
409 if (rw == READ || rw == READA)
410 kstat.pgpgin++;
411 else
412 kstat.pgpgout++;
413 }
414 }
415 if (plugged) {
416 cli();
417 dev->current_request = plug.next;
418 (dev->request_fn)();
419 sti();
420 }
421 return;
422
423 sorry:
424 for (i = 0; i < nr; i++) {
425 if (bh[i])
426 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
427 }
428 return;
429 }
430
431 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
432 {
433 int i;
434 int buffersize;
435 struct request * req;
436 unsigned int major = MAJOR(dev);
437 struct semaphore sem = MUTEX_LOCKED;
438
439 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
440 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
441 return;
442 }
443
444 if (rw!=READ && rw!=WRITE) {
445 printk("ll_rw_swap: bad block dev command, must be R/W");
446 return;
447 }
448 if (rw == WRITE && is_read_only(dev)) {
449 printk("Can't swap to read-only device 0x%X\n",dev);
450 return;
451 }
452
453 buffersize = PAGE_SIZE / nb;
454
455 for (i=0; i<nb; i++, buf += buffersize)
456 {
457 cli();
458 req = get_request_wait(NR_REQUEST, dev);
459 sti();
460 req->cmd = rw;
461 req->errors = 0;
462 req->sector = (b[i] * buffersize) >> 9;
463 req->nr_sectors = buffersize >> 9;
464 req->current_nr_sectors = buffersize >> 9;
465 req->buffer = buf;
466 req->sem = &sem;
467 req->bh = NULL;
468 req->next = NULL;
469 add_request(major+blk_dev,req);
470 down(&sem);
471 }
472 }
473
474 long blk_dev_init(long mem_start, long mem_end)
475 {
476 struct request * req;
477
478 req = all_requests + NR_REQUEST;
479 while (--req >= all_requests) {
480 req->dev = -1;
481 req->next = NULL;
482 }
483 memset(ro_bits,0,sizeof(ro_bits));
484 #ifdef CONFIG_BLK_DEV_HD
485 mem_start = hd_init(mem_start,mem_end);
486 #endif
487 #ifdef CONFIG_BLK_DEV_XD
488 mem_start = xd_init(mem_start,mem_end);
489 #endif
490 #ifdef CONFIG_CDU31A
491 mem_start = cdu31a_init(mem_start,mem_end);
492 #endif
493 #ifdef CONFIG_MCD
494 mem_start = mcd_init(mem_start,mem_end);
495 #endif
496 #ifdef CONFIG_SBPCD
497 mem_start = sbpcd_init(mem_start, mem_end);
498 #endif CONFIG_SBPCD
499 if (ramdisk_size)
500 mem_start += rd_init(mem_start, ramdisk_size*1024);
501 return mem_start;
502 }