This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18
19 #include <asm/system.h>
20
21 #include "blk.h"
22
23
24
25
26
27 static struct request all_requests[NR_REQUEST];
28
29
30
31
32 struct wait_queue * wait_for_request = NULL;
33
34
35
36 int read_ahead[MAX_BLKDEV] = {0, };
37
38
39
40
41
42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL }
53 };
54
55
56
57
58
59
60
61
62
63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
64
65
66
67
68
69
70
71
72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
73
74
75
76
77
78
79 static inline struct request * get_request(int n, int dev)
80 {
81 static struct request *prev_found = NULL, *prev_limit = NULL;
82 register struct request *req, *limit;
83
84 if (n <= 0)
85 panic("get_request(%d): impossible!\n", n);
86
87 limit = all_requests + n;
88 if (limit != prev_limit) {
89 prev_limit = limit;
90 prev_found = all_requests;
91 }
92 req = prev_found;
93 for (;;) {
94 req = ((req > all_requests) ? req : limit) - 1;
95 if (req->dev < 0)
96 break;
97 if (req == prev_found)
98 return NULL;
99 }
100 prev_found = req;
101 req->dev = dev;
102 return req;
103 }
104
105
106
107
108
109
110 static inline struct request * get_request_wait(int n, int dev)
111 {
112 register struct request *req;
113
114 while ((req = get_request(n, dev)) == NULL)
115 sleep_on(&wait_for_request);
116 return req;
117 }
118
119
120
121 static long ro_bits[MAX_BLKDEV][8];
122
123 int is_read_only(int dev)
124 {
125 int minor,major;
126
127 major = MAJOR(dev);
128 minor = MINOR(dev);
129 if ( major == FLOPPY_MAJOR && floppy_is_wp( minor) ) return 1;
130 if (major < 0 || major >= MAX_BLKDEV) return 0;
131 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
132 }
133
134 void set_device_ro(int dev,int flag)
135 {
136 int minor,major;
137
138 major = MAJOR(dev);
139 minor = MINOR(dev);
140 if (major < 0 || major >= MAX_BLKDEV) return;
141 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
142 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
143 }
144
145
146
147
148
149
150 static void add_request(struct blk_dev_struct * dev, struct request * req)
151 {
152 struct request * tmp;
153 short disk_index;
154
155 switch (MAJOR(req->dev)) {
156 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
157 if (disk_index < 4)
158 kstat.dk_drive[disk_index]++;
159 break;
160 case HD_MAJOR:
161 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
162 if (disk_index < 4)
163 kstat.dk_drive[disk_index]++;
164 break;
165 default: break;
166 }
167
168 req->next = NULL;
169 cli();
170 if (req->bh)
171 mark_buffer_clean(req->bh);
172 if (!(tmp = dev->current_request)) {
173 dev->current_request = req;
174 (dev->request_fn)();
175 sti();
176 return;
177 }
178 for ( ; tmp->next ; tmp = tmp->next) {
179 if ((IN_ORDER(tmp,req) ||
180 !IN_ORDER(tmp,tmp->next)) &&
181 IN_ORDER(req,tmp->next))
182 break;
183 }
184 req->next = tmp->next;
185 tmp->next = req;
186
187
188 if (scsi_major(MAJOR(req->dev)))
189 (dev->request_fn)();
190
191 sti();
192 }
193
194 static void make_request(int major,int rw, struct buffer_head * bh)
195 {
196 unsigned int sector, count;
197 struct request * req;
198 int rw_ahead, max_req;
199
200
201
202 rw_ahead = (rw == READA || rw == WRITEA);
203 if (rw_ahead) {
204 if (bh->b_lock)
205 return;
206 if (rw == READA)
207 rw = READ;
208 else
209 rw = WRITE;
210 }
211 if (rw!=READ && rw!=WRITE) {
212 printk("Bad block dev command, must be R/W/RA/WA\n");
213 return;
214 }
215 count = bh->b_size >> 9;
216 sector = bh->b_blocknr * count;
217 if (blk_size[major])
218 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
219 bh->b_dirt = bh->b_uptodate = 0;
220 bh->b_req = 0;
221 return;
222 }
223 lock_buffer(bh);
224 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
225 unlock_buffer(bh);
226 return;
227 }
228
229
230
231
232
233 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
234
235
236
237 repeat:
238 cli();
239
240
241
242
243
244 if ((major == HD_MAJOR
245 || major == FLOPPY_MAJOR
246 || major == SCSI_DISK_MAJOR
247 || major == SCSI_CDROM_MAJOR)
248 && (req = blk_dev[major].current_request))
249 {
250 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
251 req = req->next;
252 while (req) {
253 if (req->dev == bh->b_dev &&
254 !req->sem &&
255 req->cmd == rw &&
256 req->sector + req->nr_sectors == sector &&
257 req->nr_sectors < 244)
258 {
259 req->bhtail->b_reqnext = bh;
260 req->bhtail = bh;
261 req->nr_sectors += count;
262 mark_buffer_clean(bh);
263 sti();
264 return;
265 }
266
267 if (req->dev == bh->b_dev &&
268 !req->sem &&
269 req->cmd == rw &&
270 req->sector - count == sector &&
271 req->nr_sectors < 244)
272 {
273 req->nr_sectors += count;
274 bh->b_reqnext = req->bh;
275 req->buffer = bh->b_data;
276 req->current_nr_sectors = count;
277 req->sector = sector;
278 mark_buffer_clean(bh);
279 req->bh = bh;
280 sti();
281 return;
282 }
283
284 req = req->next;
285 }
286 }
287
288
289 req = get_request(max_req, bh->b_dev);
290
291
292 if (! req) {
293 if (rw_ahead) {
294 sti();
295 unlock_buffer(bh);
296 return;
297 }
298 sleep_on(&wait_for_request);
299 sti();
300 goto repeat;
301 }
302
303
304 sti();
305
306
307 req->cmd = rw;
308 req->errors = 0;
309 req->sector = sector;
310 req->nr_sectors = count;
311 req->current_nr_sectors = count;
312 req->buffer = bh->b_data;
313 req->sem = NULL;
314 req->bh = bh;
315 req->bhtail = bh;
316 req->next = NULL;
317 add_request(major+blk_dev,req);
318 }
319
320 void ll_rw_page(int rw, int dev, int page, char * buffer)
321 {
322 struct request * req;
323 unsigned int major = MAJOR(dev);
324 struct semaphore sem = MUTEX_LOCKED;
325
326 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
327 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
328 return;
329 }
330 if (rw!=READ && rw!=WRITE)
331 panic("Bad block dev command, must be R/W");
332 if (rw == WRITE && is_read_only(dev)) {
333 printk("Can't page to read-only device 0x%X\n",dev);
334 return;
335 }
336 cli();
337 req = get_request_wait(NR_REQUEST, dev);
338 sti();
339
340 req->cmd = rw;
341 req->errors = 0;
342 req->sector = page<<3;
343 req->nr_sectors = 8;
344 req->current_nr_sectors = 8;
345 req->buffer = buffer;
346 req->sem = &sem;
347 req->bh = NULL;
348 req->next = NULL;
349 add_request(major+blk_dev,req);
350 down(&sem);
351 }
352
353
354
355
356
357 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
358 {
359 unsigned int major;
360 struct request plug;
361 int plugged;
362 int correct_size;
363 struct blk_dev_struct * dev;
364 int i;
365
366
367 while (!*bh) {
368 bh++;
369 if (--nr <= 0)
370 return;
371 };
372
373 dev = NULL;
374 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
375 dev = blk_dev + major;
376 if (!dev || !dev->request_fn) {
377 printk(
378 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
379 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
380 goto sorry;
381 }
382
383
384 correct_size = BLOCK_SIZE;
385 if (blksize_size[major]) {
386 i = blksize_size[major][MINOR(bh[0]->b_dev)];
387 if (i)
388 correct_size = i;
389 }
390
391
392 for (i = 0; i < nr; i++) {
393 if (bh[i] && bh[i]->b_size != correct_size) {
394 printk(
395 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
396 correct_size, bh[i]->b_size);
397 goto sorry;
398 }
399 }
400
401 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
402 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
403 goto sorry;
404 }
405
406
407
408
409
410
411 plugged = 0;
412 cli();
413 if (!dev->current_request && nr > 1) {
414 dev->current_request = &plug;
415 plug.dev = -1;
416 plug.next = NULL;
417 plugged = 1;
418 }
419 sti();
420 for (i = 0; i < nr; i++) {
421 if (bh[i]) {
422 bh[i]->b_req = 1;
423 make_request(major, rw, bh[i]);
424 if (rw == READ || rw == READA)
425 kstat.pgpgin++;
426 else
427 kstat.pgpgout++;
428 }
429 }
430 if (plugged) {
431 cli();
432 dev->current_request = plug.next;
433 (dev->request_fn)();
434 sti();
435 }
436 return;
437
438 sorry:
439 for (i = 0; i < nr; i++) {
440 if (bh[i])
441 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
442 }
443 return;
444 }
445
446 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
447 {
448 int i;
449 int buffersize;
450 struct request * req;
451 unsigned int major = MAJOR(dev);
452 struct semaphore sem = MUTEX_LOCKED;
453
454 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
455 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
456 return;
457 }
458
459 if (rw!=READ && rw!=WRITE) {
460 printk("ll_rw_swap: bad block dev command, must be R/W");
461 return;
462 }
463 if (rw == WRITE && is_read_only(dev)) {
464 printk("Can't swap to read-only device 0x%X\n",dev);
465 return;
466 }
467
468 buffersize = PAGE_SIZE / nb;
469
470 for (i=0; i<nb; i++, buf += buffersize)
471 {
472 cli();
473 req = get_request_wait(NR_REQUEST, dev);
474 sti();
475 req->cmd = rw;
476 req->errors = 0;
477 req->sector = (b[i] * buffersize) >> 9;
478 req->nr_sectors = buffersize >> 9;
479 req->current_nr_sectors = buffersize >> 9;
480 req->buffer = buf;
481 req->sem = &sem;
482 req->bh = NULL;
483 req->next = NULL;
484 add_request(major+blk_dev,req);
485 down(&sem);
486 }
487 }
488
489 long blk_dev_init(long mem_start, long mem_end)
490 {
491 struct request * req;
492
493 req = all_requests + NR_REQUEST;
494 while (--req >= all_requests) {
495 req->dev = -1;
496 req->next = NULL;
497 }
498 memset(ro_bits,0,sizeof(ro_bits));
499 #ifdef CONFIG_BLK_DEV_HD
500 mem_start = hd_init(mem_start,mem_end);
501 #endif
502 #ifdef CONFIG_BLK_DEV_XD
503 mem_start = xd_init(mem_start,mem_end);
504 #endif
505 #ifdef CONFIG_CDU31A
506 mem_start = cdu31a_init(mem_start,mem_end);
507 #endif
508 #ifdef CONFIG_MCD
509 mem_start = mcd_init(mem_start,mem_end);
510 #endif
511 #ifdef CONFIG_SBPCD
512 mem_start = sbpcd_init(mem_start, mem_end);
513 #endif CONFIG_SBPCD
514 if (ramdisk_size)
515 mem_start += rd_init(mem_start, ramdisk_size*1024);
516 return mem_start;
517 }