This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/config.h>
16 #include <linux/locks.h>
17
18 #include <asm/system.h>
19
20 #include "blk.h"
21
22
23
24
25
26 static struct request all_requests[NR_REQUEST];
27
28
29
30
31 struct wait_queue * wait_for_request = NULL;
32
33
34
35 int read_ahead[MAX_BLKDEV] = {0, };
36
37
38
39
40
41 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL }
52 };
53
54
55
56
57
58
59
60
61
62 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
63
64
65
66
67
68
69
70
71 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
72
73
74
75
76
77
78 static inline struct request * get_request(int n, int dev)
79 {
80 static struct request *prev_found = NULL, *prev_limit = NULL;
81 register struct request *req, *limit;
82
83 if (n <= 0)
84 panic("get_request(%d): impossible!\n", n);
85
86 limit = all_requests + n;
87 if (limit != prev_limit) {
88 prev_limit = limit;
89 prev_found = all_requests;
90 }
91 req = prev_found;
92 for (;;) {
93 req = ((req > all_requests) ? req : limit) - 1;
94 if (req->dev < 0)
95 break;
96 if (req == prev_found)
97 return NULL;
98 }
99 prev_found = req;
100 req->dev = dev;
101 return req;
102 }
103
104
105
106
107
108
109 static inline struct request * get_request_wait(int n, int dev)
110 {
111 register struct request *req;
112
113 while ((req = get_request(n, dev)) == NULL)
114 sleep_on(&wait_for_request);
115 return req;
116 }
117
118
119
120 static long ro_bits[MAX_BLKDEV][8];
121
122 int is_read_only(int dev)
123 {
124 int minor,major;
125
126 major = MAJOR(dev);
127 minor = MINOR(dev);
128 if ( major == FLOPPY_MAJOR && floppy_is_wp( minor) ) return 1;
129 if (major < 0 || major >= MAX_BLKDEV) return 0;
130 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
131 }
132
133 void set_device_ro(int dev,int flag)
134 {
135 int minor,major;
136
137 major = MAJOR(dev);
138 minor = MINOR(dev);
139 if (major < 0 || major >= MAX_BLKDEV) return;
140 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
141 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
142 }
143
144
145
146
147
148
149 static void add_request(struct blk_dev_struct * dev, struct request * req)
150 {
151 struct request * tmp;
152
153 req->next = NULL;
154 cli();
155 if (req->bh)
156 mark_buffer_clean(req->bh);
157 if (!(tmp = dev->current_request)) {
158 dev->current_request = req;
159 (dev->request_fn)();
160 sti();
161 return;
162 }
163 for ( ; tmp->next ; tmp = tmp->next) {
164 if ((IN_ORDER(tmp,req) ||
165 !IN_ORDER(tmp,tmp->next)) &&
166 IN_ORDER(req,tmp->next))
167 break;
168 }
169 req->next = tmp->next;
170 tmp->next = req;
171
172
173 if (scsi_major(MAJOR(req->dev)))
174 (dev->request_fn)();
175
176 sti();
177 }
178
179 static void make_request(int major,int rw, struct buffer_head * bh)
180 {
181 unsigned int sector, count;
182 struct request * req;
183 int rw_ahead, max_req;
184
185
186
187 rw_ahead = (rw == READA || rw == WRITEA);
188 if (rw_ahead) {
189 if (bh->b_lock)
190 return;
191 if (rw == READA)
192 rw = READ;
193 else
194 rw = WRITE;
195 }
196 if (rw!=READ && rw!=WRITE) {
197 printk("Bad block dev command, must be R/W/RA/WA\n");
198 return;
199 }
200 count = bh->b_size >> 9;
201 sector = bh->b_blocknr * count;
202 if (blk_size[major])
203 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
204 bh->b_dirt = bh->b_uptodate = 0;
205 return;
206 }
207 lock_buffer(bh);
208 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
209 unlock_buffer(bh);
210 return;
211 }
212
213
214
215
216
217 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
218
219
220
221 repeat:
222 cli();
223
224
225
226
227
228 if ((major == HD_MAJOR
229 || major == FLOPPY_MAJOR
230 || major == SCSI_DISK_MAJOR
231 || major == SCSI_CDROM_MAJOR)
232 && (req = blk_dev[major].current_request))
233 {
234 if (major == HD_MAJOR)
235 req = req->next;
236 while (req) {
237 if (req->dev == bh->b_dev &&
238 !req->sem &&
239 req->cmd == rw &&
240 req->sector + req->nr_sectors == sector &&
241 req->nr_sectors < 244)
242 {
243 req->bhtail->b_reqnext = bh;
244 req->bhtail = bh;
245 req->nr_sectors += count;
246 mark_buffer_clean(bh);
247 sti();
248 return;
249 }
250
251 if (req->dev == bh->b_dev &&
252 !req->sem &&
253 req->cmd == rw &&
254 req->sector - count == sector &&
255 req->nr_sectors < 244)
256 {
257 req->nr_sectors += count;
258 bh->b_reqnext = req->bh;
259 req->buffer = bh->b_data;
260 req->current_nr_sectors = count;
261 req->sector = sector;
262 mark_buffer_clean(bh);
263 req->bh = bh;
264 sti();
265 return;
266 }
267
268 req = req->next;
269 }
270 }
271
272
273 req = get_request(max_req, bh->b_dev);
274
275
276 if (! req) {
277 if (rw_ahead) {
278 sti();
279 unlock_buffer(bh);
280 return;
281 }
282 sleep_on(&wait_for_request);
283 sti();
284 goto repeat;
285 }
286
287
288 sti();
289
290
291 req->cmd = rw;
292 req->errors = 0;
293 req->sector = sector;
294 req->nr_sectors = count;
295 req->current_nr_sectors = count;
296 req->buffer = bh->b_data;
297 req->sem = NULL;
298 req->bh = bh;
299 req->bhtail = bh;
300 req->next = NULL;
301 add_request(major+blk_dev,req);
302 }
303
304 void ll_rw_page(int rw, int dev, int page, char * buffer)
305 {
306 struct request * req;
307 unsigned int major = MAJOR(dev);
308 struct semaphore sem = MUTEX_LOCKED;
309
310 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
311 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
312 return;
313 }
314 if (rw!=READ && rw!=WRITE)
315 panic("Bad block dev command, must be R/W");
316 if (rw == WRITE && is_read_only(dev)) {
317 printk("Can't page to read-only device 0x%X\n",dev);
318 return;
319 }
320 cli();
321 req = get_request_wait(NR_REQUEST, dev);
322 sti();
323
324 req->cmd = rw;
325 req->errors = 0;
326 req->sector = page<<3;
327 req->nr_sectors = 8;
328 req->current_nr_sectors = 8;
329 req->buffer = buffer;
330 req->sem = &sem;
331 req->bh = NULL;
332 req->next = NULL;
333 add_request(major+blk_dev,req);
334 down(&sem);
335 }
336
337
338
339
340
341 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
342 {
343 unsigned int major;
344 struct request plug;
345 int plugged;
346 int correct_size;
347 struct blk_dev_struct * dev;
348 int i;
349
350
351 while (!*bh) {
352 bh++;
353 if (--nr <= 0)
354 return;
355 };
356
357 dev = NULL;
358 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
359 dev = blk_dev + major;
360 if (!dev || !dev->request_fn) {
361 printk(
362 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
363 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
364 goto sorry;
365 }
366
367
368 correct_size = BLOCK_SIZE;
369 if (blksize_size[major]) {
370 i = blksize_size[major][MINOR(bh[0]->b_dev)];
371 if (i)
372 correct_size = i;
373 }
374
375
376 for (i = 0; i < nr; i++) {
377 if (bh[i] && bh[i]->b_size != correct_size) {
378 printk(
379 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
380 correct_size, bh[i]->b_size);
381 goto sorry;
382 }
383 }
384
385 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
386 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
387 goto sorry;
388 }
389
390
391
392
393
394
395 plugged = 0;
396 cli();
397 if (!dev->current_request && nr > 1) {
398 dev->current_request = &plug;
399 plug.dev = -1;
400 plug.next = NULL;
401 plugged = 1;
402 }
403 sti();
404 for (i = 0; i < nr; i++) {
405 if (bh[i]) {
406 bh[i]->b_req = 1;
407 make_request(major, rw, bh[i]);
408 if (rw == READ || rw == READA)
409 kstat.pgpgin++;
410 else
411 kstat.pgpgout++;
412 }
413 }
414 if (plugged) {
415 cli();
416 dev->current_request = plug.next;
417 (dev->request_fn)();
418 sti();
419 }
420 return;
421
422 sorry:
423 for (i = 0; i < nr; i++) {
424 if (bh[i])
425 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
426 }
427 return;
428 }
429
430 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
431 {
432 int i;
433 int buffersize;
434 struct request * req;
435 unsigned int major = MAJOR(dev);
436 struct semaphore sem = MUTEX_LOCKED;
437
438 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
439 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
440 return;
441 }
442
443 if (rw!=READ && rw!=WRITE) {
444 printk("ll_rw_swap: bad block dev command, must be R/W");
445 return;
446 }
447 if (rw == WRITE && is_read_only(dev)) {
448 printk("Can't swap to read-only device 0x%X\n",dev);
449 return;
450 }
451
452 buffersize = PAGE_SIZE / nb;
453
454 for (i=0; i<nb; i++, buf += buffersize)
455 {
456 cli();
457 req = get_request_wait(NR_REQUEST, dev);
458 sti();
459 req->cmd = rw;
460 req->errors = 0;
461 req->sector = (b[i] * buffersize) >> 9;
462 req->nr_sectors = buffersize >> 9;
463 req->current_nr_sectors = buffersize >> 9;
464 req->buffer = buf;
465 req->sem = &sem;
466 req->bh = NULL;
467 req->next = NULL;
468 add_request(major+blk_dev,req);
469 down(&sem);
470 }
471 }
472
473 long blk_dev_init(long mem_start, long mem_end)
474 {
475 struct request * req;
476
477 req = all_requests + NR_REQUEST;
478 while (--req >= all_requests) {
479 req->dev = -1;
480 req->next = NULL;
481 }
482 memset(ro_bits,0,sizeof(ro_bits));
483 #ifdef CONFIG_BLK_DEV_HD
484 mem_start = hd_init(mem_start,mem_end);
485 #endif
486 #ifdef CONFIG_BLK_DEV_XD
487 mem_start = xd_init(mem_start,mem_end);
488 #endif
489 #ifdef CONFIG_CDU31A
490 mem_start = cdu31a_init(mem_start,mem_end);
491 #endif
492 #ifdef CONFIG_MCD
493 mem_start = mcd_init(mem_start,mem_end);
494 #endif
495 #ifdef CONFIG_SBPCD
496 mem_start = sbpcd_init(mem_start, mem_end);
497 #endif CONFIG_SBPCD
498 if (ramdisk_size)
499 mem_start += rd_init(mem_start, ramdisk_size*1024);
500 return mem_start;
501 }