This source file includes following definitions.
- lock_buffer
- unlock_buffer
- is_read_only
- set_device_ro
- add_request
- make_request
- ll_rw_page
- ll_rw_block
- blk_dev_init
- ll_rw_swap_file
1
2
3
4
5
6
7
8
9
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/config.h>
15
16 #include <asm/system.h>
17
18 #include "blk.h"
19
20 extern long rd_init(long mem_start, int length);
21
22
23
24
25
26 struct request request[NR_REQUEST];
27
28
29
30
31 struct wait_queue * wait_for_request = NULL;
32
33
34
35
36
37 struct blk_dev_struct blk_dev[NR_BLK_DEV] = {
38 { NULL, NULL },
39 { NULL, NULL },
40 { NULL, NULL },
41 { NULL, NULL },
42 { NULL, NULL },
43 { NULL, NULL },
44 { NULL, NULL },
45 { NULL, NULL },
46 { NULL, NULL },
47 { NULL, NULL }
48 };
49
50
51
52
53
54
55
56
57 int * blk_size[NR_BLK_DEV] = { NULL, NULL, };
58
59 static inline void lock_buffer(struct buffer_head * bh)
60 {
61 cli();
62 while (bh->b_lock)
63 sleep_on(&bh->b_wait);
64 bh->b_lock=1;
65 sti();
66 }
67
68 static inline void unlock_buffer(struct buffer_head * bh)
69 {
70 if (!bh->b_lock)
71 printk("ll_rw_block.c: buffer not locked\n\r");
72 bh->b_lock = 0;
73 wake_up(&bh->b_wait);
74 }
75
76
77
78 static long ro_bits[NR_BLK_DEV][8];
79
80 int is_read_only(int dev)
81 {
82 int minor,major;
83
84 major = MAJOR(dev);
85 minor = MINOR(dev);
86 if (major < 0 || major >= NR_BLK_DEV) return 0;
87 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
88 }
89
90 void set_device_ro(int dev,int flag)
91 {
92 int minor,major;
93
94 major = MAJOR(dev);
95 minor = MINOR(dev);
96 if (major < 0 || major >= NR_BLK_DEV) return;
97 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
98 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
99 }
100
101
102
103
104
105
106 static void add_request(struct blk_dev_struct * dev, struct request * req)
107 {
108 struct request * tmp;
109
110 req->next = NULL;
111 cli();
112 if (req->bh)
113 req->bh->b_dirt = 0;
114 if (!(tmp = dev->current_request)) {
115 dev->current_request = req;
116 (dev->request_fn)();
117 sti();
118 return;
119 }
120 for ( ; tmp->next ; tmp = tmp->next) {
121 if ((IN_ORDER(tmp,req) ||
122 !IN_ORDER(tmp,tmp->next)) &&
123 IN_ORDER(req,tmp->next))
124 break;
125 }
126 req->next = tmp->next;
127 tmp->next = req;
128 sti();
129 }
130
131 static void make_request(int major,int rw, struct buffer_head * bh)
132 {
133 unsigned int sector, count;
134 struct request * req;
135 int rw_ahead;
136
137
138
139 if (rw_ahead = (rw == READA || rw == WRITEA)) {
140 if (bh->b_lock)
141 return;
142 if (rw == READA)
143 rw = READ;
144 else
145 rw = WRITE;
146 }
147 if (rw!=READ && rw!=WRITE) {
148 printk("Bad block dev command, must be R/W/RA/WA\n");
149 return;
150 }
151 count = bh->b_size >> 9;
152 sector = bh->b_blocknr * count;
153 if (blk_size[major])
154 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
155 bh->b_dirt = bh->b_uptodate = 0;
156 return;
157 }
158 lock_buffer(bh);
159 if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
160 unlock_buffer(bh);
161 return;
162 }
163 repeat:
164 cli();
165 if ((major == 3 || major == 8 || major == 11)&& (req = blk_dev[major].current_request)) {
166 while (req = req->next) {
167 if (req->dev == bh->b_dev &&
168 !req->waiting &&
169 req->cmd == rw &&
170 req->sector + req->nr_sectors == sector &&
171 req->nr_sectors < 254) {
172 req->bhtail->b_reqnext = bh;
173 req->bhtail = bh;
174 req->nr_sectors += count;
175 bh->b_dirt = 0;
176 sti();
177 return;
178 }
179 }
180 }
181
182
183
184
185 if (rw == READ)
186 req = request+NR_REQUEST;
187 else
188 req = request+(NR_REQUEST/2);
189
190 while (--req >= request)
191 if (req->dev < 0)
192 goto found;
193
194 if (rw_ahead) {
195 sti();
196 unlock_buffer(bh);
197 return;
198 }
199 sleep_on(&wait_for_request);
200 sti();
201 goto repeat;
202
203 found:
204
205 req->dev = bh->b_dev;
206 sti();
207 req->cmd = rw;
208 req->errors = 0;
209 req->sector = sector;
210 req->nr_sectors = count;
211 req->current_nr_sectors = count;
212 req->buffer = bh->b_data;
213 req->waiting = NULL;
214 req->bh = bh;
215 req->bhtail = bh;
216 req->next = NULL;
217 add_request(major+blk_dev,req);
218 }
219
220 void ll_rw_page(int rw, int dev, int page, char * buffer)
221 {
222 struct request * req;
223 unsigned int major = MAJOR(dev);
224
225 if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
226 printk("Trying to read nonexistent block-device %04x (%d)\n",dev,page*8);
227 return;
228 }
229 if (rw!=READ && rw!=WRITE)
230 panic("Bad block dev command, must be R/W");
231 if (rw == WRITE && is_read_only(dev)) {
232 printk("Can't page to read-only device 0x%X\n\r",dev);
233 return;
234 }
235 cli();
236 repeat:
237 req = request+NR_REQUEST;
238 while (--req >= request)
239 if (req->dev<0)
240 break;
241 if (req < request) {
242 sleep_on(&wait_for_request);
243 goto repeat;
244 }
245 sti();
246
247 req->dev = dev;
248 req->cmd = rw;
249 req->errors = 0;
250 req->sector = page<<3;
251 req->nr_sectors = 8;
252 req->current_nr_sectors = 8;
253 req->buffer = buffer;
254 req->waiting = ¤t->wait;
255 req->bh = NULL;
256 req->next = NULL;
257 current->state = TASK_UNINTERRUPTIBLE;
258 add_request(major+blk_dev,req);
259 schedule();
260 }
261
262 void ll_rw_block(int rw, struct buffer_head * bh)
263 {
264 unsigned int major;
265
266 if (!bh)
267 return;
268 if (bh->b_size != 1024) {
269 printk("ll_rw_block: only 1024-char blocks implemented (%d)\n",bh->b_size);
270 bh->b_dirt = bh->b_uptodate = 0;
271 return;
272 }
273 if ((major=MAJOR(bh->b_dev)) >= NR_BLK_DEV ||
274 !(blk_dev[major].request_fn)) {
275 printk("ll_rw_block: Trying to read nonexistent block-device %04x (%d)\n",bh->b_dev,bh->b_blocknr);
276 bh->b_dirt = bh->b_uptodate = 0;
277 return;
278 }
279 if ((rw == WRITE || rw == WRITEA) && is_read_only(bh->b_dev)) {
280 printk("Can't write to read-only device 0x%X\n\r",bh->b_dev);
281 bh->b_dirt = bh->b_uptodate = 0;
282 return;
283 }
284 make_request(major,rw,bh);
285 }
286
287 long blk_dev_init(long mem_start, long mem_end)
288 {
289 int i;
290
291 for (i=0 ; i<NR_REQUEST ; i++) {
292 request[i].dev = -1;
293 request[i].next = NULL;
294 }
295 memset(ro_bits,0,sizeof(ro_bits));
296 #ifdef CONFIG_BLK_DEV_HD
297 mem_start = hd_init(mem_start,mem_end);
298 #endif
299 #ifdef RAMDISK
300 mem_start += rd_init(mem_start, RAMDISK*1024);
301 #endif
302 return mem_start;
303 }
304
305 void ll_rw_swap_file(int rw, int dev, unsigned int *b, int nb, char *buf)
306 {
307 int i;
308 struct request * req;
309 unsigned int major = MAJOR(dev);
310
311 if (major >= NR_BLK_DEV || !(blk_dev[major].request_fn)) {
312 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n\r");
313 return;
314 }
315
316 if (rw!=READ && rw!=WRITE) {
317 printk("ll_rw_swap: bad block dev command, must be R/W");
318 return;
319 }
320 if (rw == WRITE && is_read_only(dev)) {
321 printk("Can't swap to read-only device 0x%X\n\r",dev);
322 return;
323 }
324
325 for (i=0; i<nb; i++, buf += BLOCK_SIZE)
326 {
327 repeat:
328 req = request+NR_REQUEST;
329 while (--req >= request)
330 if (req->dev<0)
331 break;
332 if (req < request) {
333 sleep_on(&wait_for_request);
334 goto repeat;
335 }
336
337 req->dev = dev;
338 req->cmd = rw;
339 req->errors = 0;
340 req->sector = b[i] << 1;
341 req->nr_sectors = 2;
342 req->current_nr_sectors = 2;
343 req->buffer = buf;
344 req->waiting = ¤t->wait;
345 req->bh = NULL;
346 req->next = NULL;
347 current->state = TASK_UNINTERRUPTIBLE;
348 add_request(major+blk_dev,req);
349 schedule();
350 }
351 }