This source file includes following definitions.
- get_request
- get_request_wait
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18
19 #include <asm/system.h>
20 #include <asm/io.h>
21 #include "blk.h"
22
23
24
25
26
27 static struct request all_requests[NR_REQUEST];
28
29
30
31
32 struct wait_queue * wait_for_request = NULL;
33
34
35
36 int read_ahead[MAX_BLKDEV] = {0, };
37
38
39
40
41
42 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL },
51 { NULL, NULL },
52 { NULL, NULL }
53 };
54
55
56
57
58
59
60
61
62
63 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
64
65
66
67
68
69
70
71
72 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
73
74
75
76
77
78
79
80
81
82
83
84
85
86 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
87
88
89
90
91
92
93 static inline struct request * get_request(int n, int dev)
94 {
95 static struct request *prev_found = NULL, *prev_limit = NULL;
96 register struct request *req, *limit;
97
98 if (n <= 0)
99 panic("get_request(%d): impossible!\n", n);
100
101 limit = all_requests + n;
102 if (limit != prev_limit) {
103 prev_limit = limit;
104 prev_found = all_requests;
105 }
106 req = prev_found;
107 for (;;) {
108 req = ((req > all_requests) ? req : limit) - 1;
109 if (req->dev < 0)
110 break;
111 if (req == prev_found)
112 return NULL;
113 }
114 prev_found = req;
115 req->dev = dev;
116 return req;
117 }
118
119
120
121
122
123
124 static inline struct request * get_request_wait(int n, int dev)
125 {
126 register struct request *req;
127
128 while ((req = get_request(n, dev)) == NULL)
129 sleep_on(&wait_for_request);
130 return req;
131 }
132
133
134
135 static long ro_bits[MAX_BLKDEV][8];
136
137 int is_read_only(int dev)
138 {
139 int minor,major;
140
141 major = MAJOR(dev);
142 minor = MINOR(dev);
143 if (major < 0 || major >= MAX_BLKDEV) return 0;
144 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
145 }
146
147 void set_device_ro(int dev,int flag)
148 {
149 int minor,major;
150
151 major = MAJOR(dev);
152 minor = MINOR(dev);
153 if (major < 0 || major >= MAX_BLKDEV) return;
154 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
155 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
156 }
157
158
159
160
161
162
163 static void add_request(struct blk_dev_struct * dev, struct request * req)
164 {
165 struct request * tmp;
166 short disk_index;
167
168 switch (MAJOR(req->dev)) {
169 case SCSI_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x0070) >> 4;
170 if (disk_index < 4)
171 kstat.dk_drive[disk_index]++;
172 break;
173 case HD_MAJOR:
174 case XT_DISK_MAJOR: disk_index = (MINOR(req->dev) & 0x00C0) >> 6;
175 if (disk_index < 4)
176 kstat.dk_drive[disk_index]++;
177 break;
178 default: break;
179 }
180
181 req->next = NULL;
182 cli();
183 if (req->bh)
184 mark_buffer_clean(req->bh);
185 if (!(tmp = dev->current_request)) {
186 dev->current_request = req;
187 (dev->request_fn)();
188 sti();
189 return;
190 }
191 for ( ; tmp->next ; tmp = tmp->next) {
192 if ((IN_ORDER(tmp,req) ||
193 !IN_ORDER(tmp,tmp->next)) &&
194 IN_ORDER(req,tmp->next))
195 break;
196 }
197 req->next = tmp->next;
198 tmp->next = req;
199
200
201 if (scsi_major(MAJOR(req->dev)))
202 (dev->request_fn)();
203
204 sti();
205 }
206
207 static void make_request(int major,int rw, struct buffer_head * bh)
208 {
209 unsigned int sector, count;
210 struct request * req;
211 int rw_ahead, max_req;
212
213
214
215 rw_ahead = (rw == READA || rw == WRITEA);
216 if (rw_ahead) {
217 if (bh->b_lock)
218 return;
219 if (rw == READA)
220 rw = READ;
221 else
222 rw = WRITE;
223 }
224 if (rw!=READ && rw!=WRITE) {
225 printk("Bad block dev command, must be R/W/RA/WA\n");
226 return;
227 }
228 count = bh->b_size >> 9;
229 sector = bh->b_blocknr * count;
230 if (blk_size[major])
231 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
232 bh->b_dirt = bh->b_uptodate = 0;
233 bh->b_req = 0;
234 return;
235 }
236
237 if (bh->b_lock)
238 return;
239
240 lock_buffer(bh);
241 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
242 unlock_buffer(bh);
243 return;
244 }
245
246
247
248
249
250 max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
251
252
253
254 repeat:
255 cli();
256
257
258
259
260
261 if ((major == HD_MAJOR
262 || major == FLOPPY_MAJOR
263 || major == SCSI_DISK_MAJOR
264 || major == SCSI_CDROM_MAJOR)
265 && (req = blk_dev[major].current_request))
266 {
267 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
268 req = req->next;
269 while (req) {
270 if (req->dev == bh->b_dev &&
271 !req->sem &&
272 req->cmd == rw &&
273 req->sector + req->nr_sectors == sector &&
274 req->nr_sectors < 244)
275 {
276 req->bhtail->b_reqnext = bh;
277 req->bhtail = bh;
278 req->nr_sectors += count;
279 mark_buffer_clean(bh);
280 sti();
281 return;
282 }
283
284 if (req->dev == bh->b_dev &&
285 !req->sem &&
286 req->cmd == rw &&
287 req->sector - count == sector &&
288 req->nr_sectors < 244)
289 {
290 req->nr_sectors += count;
291 bh->b_reqnext = req->bh;
292 req->buffer = bh->b_data;
293 req->current_nr_sectors = count;
294 req->sector = sector;
295 mark_buffer_clean(bh);
296 req->bh = bh;
297 sti();
298 return;
299 }
300
301 req = req->next;
302 }
303 }
304
305
306 req = get_request(max_req, bh->b_dev);
307
308
309 if (! req) {
310 if (rw_ahead) {
311 sti();
312 unlock_buffer(bh);
313 return;
314 }
315 sleep_on(&wait_for_request);
316 sti();
317 goto repeat;
318 }
319
320
321 sti();
322
323
324 req->cmd = rw;
325 req->errors = 0;
326 req->sector = sector;
327 req->nr_sectors = count;
328 req->current_nr_sectors = count;
329 req->buffer = bh->b_data;
330 req->sem = NULL;
331 req->bh = bh;
332 req->bhtail = bh;
333 req->next = NULL;
334 add_request(major+blk_dev,req);
335 }
336
337 void ll_rw_page(int rw, int dev, int page, char * buffer)
338 {
339 struct request * req;
340 unsigned int major = MAJOR(dev);
341 struct semaphore sem = MUTEX_LOCKED;
342
343 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
344 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
345 return;
346 }
347 if (rw!=READ && rw!=WRITE)
348 panic("Bad block dev command, must be R/W");
349 if (rw == WRITE && is_read_only(dev)) {
350 printk("Can't page to read-only device 0x%X\n",dev);
351 return;
352 }
353 cli();
354 req = get_request_wait(NR_REQUEST, dev);
355 sti();
356
357 req->cmd = rw;
358 req->errors = 0;
359 req->sector = page<<3;
360 req->nr_sectors = 8;
361 req->current_nr_sectors = 8;
362 req->buffer = buffer;
363 req->sem = &sem;
364 req->bh = NULL;
365 req->next = NULL;
366 add_request(major+blk_dev,req);
367 down(&sem);
368 }
369
370
371
372
373
374 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
375 {
376 unsigned int major;
377 struct request plug;
378 int plugged;
379 int correct_size;
380 struct blk_dev_struct * dev;
381 int i;
382
383
384 while (!*bh) {
385 bh++;
386 if (--nr <= 0)
387 return;
388 };
389
390 dev = NULL;
391 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
392 dev = blk_dev + major;
393 if (!dev || !dev->request_fn) {
394 printk(
395 "ll_rw_block: Trying to read nonexistent block-device %04lX (%ld)\n",
396 (unsigned long) bh[0]->b_dev, bh[0]->b_blocknr);
397 goto sorry;
398 }
399
400
401 correct_size = BLOCK_SIZE;
402 if (blksize_size[major]) {
403 i = blksize_size[major][MINOR(bh[0]->b_dev)];
404 if (i)
405 correct_size = i;
406 }
407
408
409 for (i = 0; i < nr; i++) {
410 if (bh[i] && bh[i]->b_size != correct_size) {
411 printk(
412 "ll_rw_block: only %d-char blocks implemented (%lu)\n",
413 correct_size, bh[i]->b_size);
414 goto sorry;
415 }
416 }
417
418 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
419 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
420 goto sorry;
421 }
422
423
424
425
426
427
428 plugged = 0;
429 cli();
430 if (!dev->current_request && nr > 1) {
431 dev->current_request = &plug;
432 plug.dev = -1;
433 plug.next = NULL;
434 plugged = 1;
435 }
436 sti();
437 for (i = 0; i < nr; i++) {
438 if (bh[i]) {
439 bh[i]->b_req = 1;
440 make_request(major, rw, bh[i]);
441 if (rw == READ || rw == READA)
442 kstat.pgpgin++;
443 else
444 kstat.pgpgout++;
445 }
446 }
447 if (plugged) {
448 cli();
449 dev->current_request = plug.next;
450 (dev->request_fn)();
451 sti();
452 }
453 return;
454
455 sorry:
456 for (i = 0; i < nr; i++) {
457 if (bh[i])
458 bh[i]->b_dirt = bh[i]->b_uptodate = 0;
459 }
460 return;
461 }
462
463 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
464 {
465 int i;
466 int buffersize;
467 struct request * req;
468 unsigned int major = MAJOR(dev);
469 struct semaphore sem = MUTEX_LOCKED;
470
471 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
472 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
473 return;
474 }
475
476 if (rw!=READ && rw!=WRITE) {
477 printk("ll_rw_swap: bad block dev command, must be R/W");
478 return;
479 }
480 if (rw == WRITE && is_read_only(dev)) {
481 printk("Can't swap to read-only device 0x%X\n",dev);
482 return;
483 }
484
485 buffersize = PAGE_SIZE / nb;
486
487 for (i=0; i<nb; i++, buf += buffersize)
488 {
489 cli();
490 req = get_request_wait(NR_REQUEST, dev);
491 sti();
492 req->cmd = rw;
493 req->errors = 0;
494 req->sector = (b[i] * buffersize) >> 9;
495 req->nr_sectors = buffersize >> 9;
496 req->current_nr_sectors = buffersize >> 9;
497 req->buffer = buf;
498 req->sem = &sem;
499 req->bh = NULL;
500 req->next = NULL;
501 add_request(major+blk_dev,req);
502 down(&sem);
503 }
504 }
505
506 long blk_dev_init(long mem_start, long mem_end)
507 {
508 struct request * req;
509
510 req = all_requests + NR_REQUEST;
511 while (--req >= all_requests) {
512 req->dev = -1;
513 req->next = NULL;
514 }
515 memset(ro_bits,0,sizeof(ro_bits));
516 #ifdef CONFIG_BLK_DEV_HD
517 mem_start = hd_init(mem_start,mem_end);
518 #endif
519 #ifdef CONFIG_BLK_DEV_XD
520 mem_start = xd_init(mem_start,mem_end);
521 #endif
522 #ifdef CONFIG_CDU31A
523 mem_start = cdu31a_init(mem_start,mem_end);
524 #endif
525 #ifdef CONFIG_MCD
526 mem_start = mcd_init(mem_start,mem_end);
527 #endif
528 #ifdef CONFIG_BLK_DEV_FD
529 floppy_init();
530 #else
531 outb_p(0xc, 0x3f2);
532 #endif
533 #ifdef CONFIG_SBPCD
534 mem_start = sbpcd_init(mem_start, mem_end);
535 #endif CONFIG_SBPCD
536 if (ramdisk_size)
537 mem_start += rd_init(mem_start, ramdisk_size*1024);
538 return mem_start;
539 }