This source file includes following definitions.
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- ll_rw_swap_file
- blk_dev_init
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/config.h>
15 #include <linux/locks.h>
16
17 #include <asm/system.h>
18
19 #include "blk.h"
20
21
22
23
24
25 struct request request[NR_REQUEST];
26
27
28
29
30 struct wait_queue * wait_for_request = NULL;
31
32
33
34 int read_ahead[NR_BLK_DEV] = {0, };
35
36
37
38
39
40 struct blk_dev_struct blk_dev[NR_BLK_DEV] = {
41 { NULL, NULL },
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL },
48 { NULL, NULL },
49 { NULL, NULL },
50 { NULL, NULL }
51 };
52
53
54
55
56
57
58
59
60 int * blk_size[NR_BLK_DEV] = { NULL, NULL, };
61
62
63
64 static long ro_bits[NR_BLK_DEV][8];
65
66 int is_read_only(int dev)
67 {
68 int minor,major;
69
70 major = MAJOR(dev);
71 minor = MINOR(dev);
72 if (major < 0 || major >= NR_BLK_DEV) return 0;
73 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
74 }
75
76 void set_device_ro(int dev,int flag)
77 {
78 int minor,major;
79
80 major = MAJOR(dev);
81 minor = MINOR(dev);
82 if (major < 0 || major >= NR_BLK_DEV) return;
83 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
84 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
85 }
86
87
88
89
90
91
92 static void add_request(struct blk_dev_struct * dev, struct request * req)
93 {
94 struct request * tmp;
95
96 req->next = NULL;
97 cli();
98 if (req->bh)
99 req->bh->b_dirt = 0;
100 if (!(tmp = dev->current_request)) {
101 dev->current_request = req;
102 (dev->request_fn)();
103 sti();
104 return;
105 }
106 for ( ; tmp->next ; tmp = tmp->next) {
107 if ((IN_ORDER(tmp,req) ||
108 !IN_ORDER(tmp,tmp->next)) &&
109 IN_ORDER(req,tmp->next))
110 break;
111 }
112 req->next = tmp->next;
113 tmp->next = req;
114
115
116 if(MAJOR(req->dev) == 8 ||
117 MAJOR(req->dev) == 9 ||
118 MAJOR(req->dev) == 11)
119 (dev->request_fn)();
120
121 sti();
122 }
123
124 static void make_request(int major,int rw, struct buffer_head * bh)
125 {
126 unsigned int sector, count;
127 struct request * req;
128 int rw_ahead;
129
130
131
132 rw_ahead = (rw == READA || rw == WRITEA);
133 if (rw_ahead) {
134 if (bh->b_lock)
135 return;
136 if (rw == READA)
137 rw = READ;
138 else
139 rw = WRITE;
140 }
141 if (rw!=READ && rw!=WRITE) {
142 printk("Bad block dev command, must be R/W/RA/WA\n");
143 return;
144 }
145 count = bh->b_size >> 9;
146 sector = bh->b_blocknr * count;
147 if (blk_size[major])
148 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
149 bh->b_dirt = bh->b_uptodate = 0;
150 return;
151 }
152 lock_buffer(bh);
153 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
154 unlock_buffer(bh);
155 return;
156 }
157
158
159
160
161 repeat:
162 cli();
163 if ((major == 3 || major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
164 if(major == 3) req = req->next;
165 while (req) {
166 if (req->dev == bh->b_dev &&
167 !req->waiting &&
168 req->cmd == rw &&
169 req->sector + req->nr_sectors == sector &&
170 req->nr_sectors < 254) {
171 req->bhtail->b_reqnext = bh;
172 req->bhtail = bh;
173 req->nr_sectors += count;
174 bh->b_dirt = 0;
175 sti();
176 return;
177 }
178 req = req->next;
179 }
180 }
181
182
183
184
185 if (rw == READ)
186 req = request+NR_REQUEST;
187 else
188 req = request+(NR_REQUEST/2);
189
190 while (--req >= request)
191 if (req->dev < 0)
192 goto found;
193
194 if (rw_ahead) {
195 sti();
196 unlock_buffer(bh);
197 return;
198 }
199 sleep_on(&wait_for_request);
200 sti();
201 goto repeat;
202
203 found:
204
205 req->dev = bh->b_dev;
206 sti();
207 req->cmd = rw;
208 req->errors = 0;
209 req->sector = sector;
210 req->nr_sectors = count;
211 req->current_nr_sectors = count;
212 req->buffer = bh->b_data;
213 req->waiting = NULL;
214 req->bh = bh;
215 req->bhtail = bh;
216 req->next = NULL;
217 add_request(major+blk_dev,req);
218 }
219
220 void ll_rw_page(int rw, int dev, int page, char * buffer)
221 {
222 struct request * req;
223 unsigned int major = MAJOR(dev);
224
225 if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
226 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
227 return;
228 }
229 if (rw!=READ && rw!=WRITE)
230 panic("Bad block dev command, must be R/W");
231 if (rw == WRITE && is_read_only(dev)) {
232 printk("Can't page to read-only device 0x%X\n\r",dev);
233 return;
234 }
235 cli();
236 repeat:
237 req = request+NR_REQUEST;
238 while (--req >= request)
239 if (req->dev<0)
240 break;
241 if (req < request) {
242 sleep_on(&wait_for_request);
243 goto repeat;
244 }
245 sti();
246
247 req->dev = dev;
248 req->cmd = rw;
249 req->errors = 0;
250 req->sector = page<<3;
251 req->nr_sectors = 8;
252 req->current_nr_sectors = 8;
253 req->buffer = buffer;
254 req->waiting = current;
255 req->bh = NULL;
256 req->next = NULL;
257 current->state = TASK_SWAPPING;
258 add_request(major+blk_dev,req);
259 schedule();
260 }
261
262
263
264
265
266 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
267 {
268 unsigned int major;
269
270 struct request plug;
271 int plugged;
272 struct blk_dev_struct * dev;
273 int i, j;
274
275
276 while(!bh[0]){
277 bh++;
278 nr--;
279 if (nr <= 0) return;
280 };
281
282 for(j=0;j<nr; j++){
283 if(!bh[j]) continue;
284 if (bh[j]->b_size != 1024) {
285 printk("ll_rw_block: only 1024-char blocks implemented (%d)\n",bh[0]->b_size);
286 for (i=0;i<nr; i++)
287 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
288 return;
289 }
290 };
291
292 if ((major=MAJOR(bh[0]->b_dev)) >= NR_BLK_DEV ||
293 !(blk_dev[major].request_fn)) {
294 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh[0]->b_dev,bh[0]->b_blocknr);
295 for (i=0;i<nr; i++)
296 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
297 return;
298 }
299 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
300 printk("Can't write to read-only device 0x%X\n\r",bh[0]->b_dev);
301 for (i=0;i<nr; i++)
302 if (bh[i]) bh[i]->b_dirt = bh[i]->b_uptodate = 0;
303 return;
304 }
305
306
307
308
309 plugged = 0;
310 cli();
311 if (!blk_dev[major].current_request && nr > 1) {
312 blk_dev[major].current_request = &plug;
313 plug.dev = -1;
314 plug.next = NULL;
315 plugged = 1;
316 };
317 sti();
318 for (i=0;i<nr; i++)
319 if (bh[i]) make_request(major, rw, bh[i]);
320 if(plugged){
321 cli();
322 blk_dev[major].current_request = plug.next;
323 dev = major+blk_dev;
324 (dev->request_fn)();
325 sti();
326 };
327 }
328
329 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
330 {
331 int i;
332 struct request * req;
333 unsigned int major = MAJOR(dev);
334
335 if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
336 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n\r");
337 return;
338 }
339
340 if (rw!=READ && rw!=WRITE) {
341 printk("ll_rw_swap: bad block dev command, must be R/W");
342 return;
343 }
344 if (rw == WRITE && is_read_only(dev)) {
345 printk("Can't swap to read-only device 0x%X\n\r",dev);
346 return;
347 }
348
349 for (i=0; i<nb; i++, buf += BLOCK_SIZE)
350 {
351 repeat:
352 req = request+NR_REQUEST;
353 while (--req >= request)
354 if (req->dev<0)
355 break;
356 if (req < request) {
357 sleep_on(&wait_for_request);
358 goto repeat;
359 }
360
361 req->dev = dev;
362 req->cmd = rw;
363 req->errors = 0;
364 req->sector = b[i] << 1;
365 req->nr_sectors = 2;
366 req->current_nr_sectors = 2;
367 req->buffer = buf;
368 req->waiting = current;
369 req->bh = NULL;
370 req->next = NULL;
371 current->state = TASK_UNINTERRUPTIBLE;
372 add_request(major+blk_dev,req);
373 schedule();
374 }
375 }
376
377 long blk_dev_init(long mem_start, long mem_end)
378 {
379 int i;
380
381 for (i=0 ; i<NR_REQUEST ; i++) {
382 request[i].dev = -1;
383 request[i].next = NULL;
384 }
385 memset(ro_bits,0,sizeof(ro_bits));
386 #ifdef CONFIG_BLK_DEV_HD
387 mem_start = hd_init(mem_start,mem_end);
388 #endif
389 if (ramdisk_size)
390 mem_start += rd_init(mem_start, ramdisk_size*1024);
391 return mem_start;
392 }