This source file includes following definitions.
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/config.h>
15 #include <linux/locks.h>
16
17 #include <asm/system.h>
18
19 #include "blk.h"
20
21
22
23
24
25 struct request request[NR_REQUEST];
26
27
28
29
30 struct wait_queue * wait_for_request = NULL;
31
32
33
34 int read_ahead[MAX_BLKDEV] = {0, };
35
36
37
38
39
40 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
41 { NULL, NULL },
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL }
51 };
52
53
54
55
56
57
58
59
60
61 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
62
63
64
65
66
67
68
69
70 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
71
72
73
74 static long ro_bits[MAX_BLKDEV][8];
75
76 int is_read_only(int dev)
77 {
78 int minor,major;
79
80 major = MAJOR(dev);
81 minor = MINOR(dev);
82 if (major < 0 || major >= MAX_BLKDEV) return 0;
83 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
84 }
85
86 void set_device_ro(int dev,int flag)
87 {
88 int minor,major;
89
90 major = MAJOR(dev);
91 minor = MINOR(dev);
92 if (major < 0 || major >= MAX_BLKDEV) return;
93 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
94 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
95 }
96
97
98
99
100
101
102 static void add_request(struct blk_dev_struct * dev, struct request * req)
103 {
104 struct request * tmp;
105
106 req->next = NULL;
107 cli();
108 if (req->bh)
109 req->bh->b_dirt = 0;
110 if (!(tmp = dev->current_request)) {
111 dev->current_request = req;
112 (dev->request_fn)();
113 sti();
114 return;
115 }
116 for ( ; tmp->next ; tmp = tmp->next) {
117 if ((IN_ORDER(tmp,req) ||
118 !IN_ORDER(tmp,tmp->next)) &&
119 IN_ORDER(req,tmp->next))
120 break;
121 }
122 req->next = tmp->next;
123 tmp->next = req;
124
125
126 if(MAJOR(req->dev) == 8 ||
127 MAJOR(req->dev) == 9 ||
128 MAJOR(req->dev) == 11)
129 (dev->request_fn)();
130
131 sti();
132 }
133
134 static void make_request(int major,int rw, struct buffer_head * bh)
135 {
136 unsigned int sector, count;
137 struct request * req;
138 int rw_ahead;
139
140
141
142 rw_ahead = (rw == READA || rw == WRITEA);
143 if (rw_ahead) {
144 if (bh->b_lock)
145 return;
146 if (rw == READA)
147 rw = READ;
148 else
149 rw = WRITE;
150 }
151 if (rw!=READ && rw!=WRITE) {
152 printk("Bad block dev command, must be R/W/RA/WA\n");
153 return;
154 }
155 count = bh->b_size >> 9;
156 sector = bh->b_blocknr * count;
157 if (blk_size[major])
158 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
159 bh->b_dirt = bh->b_uptodate = 0;
160 return;
161 }
162 lock_buffer(bh);
163 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
164 unlock_buffer(bh);
165 return;
166 }
167
168
169
170
171 repeat:
172 cli();
173 if ((major == 3 || major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
174 if(major == 3) req = req->next;
175 while (req) {
176 if (req->dev == bh->b_dev &&
177 !req->waiting &&
178 req->cmd == rw &&
179 req->sector + req->nr_sectors == sector &&
180 req->nr_sectors < 254) {
181 req->bhtail->b_reqnext = bh;
182 req->bhtail = bh;
183 req->nr_sectors += count;
184 bh->b_dirt = 0;
185 sti();
186 return;
187 }
188 req = req->next;
189 }
190 }
191
192
193
194
195 if (rw == READ)
196 req = request+NR_REQUEST;
197 else
198 req = request+(NR_REQUEST/2);
199
200 while (--req >= request)
201 if (req->dev < 0)
202 goto found;
203
204 if (rw_ahead) {
205 sti();
206 unlock_buffer(bh);
207 return;
208 }
209 sleep_on(&wait_for_request);
210 sti();
211 goto repeat;
212
213 found:
214
215 req->dev = bh->b_dev;
216 sti();
217 req->cmd = rw;
218 req->errors = 0;
219 req->sector = sector;
220 req->nr_sectors = count;
221 req->current_nr_sectors = count;
222 req->buffer = bh->b_data;
223 req->waiting = NULL;
224 req->bh = bh;
225 req->bhtail = bh;
226 req->next = NULL;
227 add_request(major+blk_dev,req);
228 }
229
230 void ll_rw_page(int rw, int dev, int page, char * buffer)
231 {
232 struct request * req;
233 unsigned int major = MAJOR(dev);
234
235 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
236 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
237 return;
238 }
239 if (rw!=READ && rw!=WRITE)
240 panic("Bad block dev command, must be R/W");
241 if (rw == WRITE && is_read_only(dev)) {
242 printk("Can't page to read-only device 0x%X\n",dev);
243 return;
244 }
245 cli();
246 repeat:
247 req = request+NR_REQUEST;
248 while (--req >= request)
249 if (req->dev<0)
250 break;
251 if (req < request) {
252 sleep_on(&wait_for_request);
253 goto repeat;
254 }
255 sti();
256
257 req->dev = dev;
258 req->cmd = rw;
259 req->errors = 0;
260 req->sector = page<<3;
261 req->nr_sectors = 8;
262 req->current_nr_sectors = 8;
263 req->buffer = buffer;
264 req->waiting = current;
265 req->bh = NULL;
266 req->next = NULL;
267 current->state = TASK_SWAPPING;
268 add_request(major+blk_dev,req);
269 schedule();
270 }
271
272
273
274
275
276 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
277 {
278 unsigned int major;
279
280 struct request plug;
281 int plugged;
282 int correct_size;
283 struct blk_dev_struct * dev;
284 int i, j;
285
286
287 while(!bh[0]){
288 bh++;
289 nr--;
290 if (nr <= 0) return;
291 };
292
293 if ((major=MAJOR(bh[0]->b_dev)) >= MAX_BLKDEV ||
294 !(blk_dev[major].request_fn)) {
295 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh[0]->b_dev,bh[0]->b_blocknr);
296 for (i=0;i<nr; i++)
297 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
298 return;
299 }
300
301 for(j=0;j<nr; j++){
302 if(!bh[j]) continue;
303
304 correct_size = BLOCK_SIZE;
305 if(blksize_size[major] && blksize_size[major][MINOR(bh[j]->b_dev)])
306 correct_size = blksize_size[major][MINOR(bh[j]->b_dev)];
307
308 if(bh[j]->b_size != correct_size) {
309
310 printk("ll_rw_block: only %d-char blocks implemented (%d)\n",
311 correct_size, bh[j]->b_size);
312
313 for (i=0;i<nr; i++)
314 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
315 return;
316 }
317 };
318
319 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
320 printk("Can't write to read-only device 0x%X\n",bh[0]->b_dev);
321 for (i=0;i<nr; i++)
322 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
323 return;
324 }
325
326
327
328
329 plugged = 0;
330 cli();
331 if (!blk_dev[major].current_request && nr > 1) {
332 blk_dev[major].current_request = &plug;
333 plug.dev = -1;
334 plug.next = NULL;
335 plugged = 1;
336 };
337 sti();
338 for (i=0;i<nr; i++)
339 if (bh[i]) {
340 bh[i]->b_req = 1;
341 make_request(major, rw, bh[i]);
342 }
343 if(plugged){
344 cli();
345 blk_dev[major].current_request = plug.next;
346 dev = major+blk_dev;
347 (dev->request_fn)();
348 sti();
349 };
350 }
351
352 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
353 {
354 int i;
355 int buffersize;
356 struct request * req;
357 unsigned int major = MAJOR(dev);
358
359 if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
360 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
361 return;
362 }
363
364 if (rw!=READ && rw!=WRITE) {
365 printk("ll_rw_swap: bad block dev command, must be R/W");
366 return;
367 }
368 if (rw == WRITE && is_read_only(dev)) {
369 printk("Can't swap to read-only device 0x%X\n",dev);
370 return;
371 }
372
373 buffersize = PAGE_SIZE / nb;
374
375 for (i=0; i<nb; i++, buf += buffersize)
376 {
377 repeat:
378 req = request+NR_REQUEST;
379 while (--req >= request)
380 if (req->dev<0)
381 break;
382 if (req < request) {
383 sleep_on(&wait_for_request);
384 goto repeat;
385 }
386
387 req->dev = dev;
388 req->cmd = rw;
389 req->errors = 0;
390 req->sector = (b[i] * buffersize) >> 9;
391 req->nr_sectors = buffersize >> 9;
392 req->current_nr_sectors = buffersize >> 9;
393 req->buffer = buf;
394 req->waiting = current;
395 req->bh = NULL;
396 req->next = NULL;
397 current->state = TASK_UNINTERRUPTIBLE;
398 add_request(major+blk_dev,req);
399 schedule();
400 }
401 }
402
403 long blk_dev_init(long mem_start, long mem_end)
404 {
405 int i;
406
407 for (i=0 ; i<NR_REQUEST ; i++) {
408 request[i].dev = -1;
409 request[i].next = NULL;
410 }
411 memset(ro_bits,0,sizeof(ro_bits));
412 #ifdef CONFIG_BLK_DEV_HD
413 mem_start = hd_init(mem_start,mem_end);
414 #endif
415 #ifdef CONFIG_BLK_DEV_XD
416 mem_start = xd_init(mem_start,mem_end);
417 #endif
418 if (ramdisk_size)
419 mem_start += rd_init(mem_start, ramdisk_size*1024);
420 return mem_start;
421 }