This source file includes following definitions.
- create_strip_zones
- raid0_run
- raid0_stop
- raid0_map
- raid0_status
- raid0_init
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/md.h>
23 #include <linux/raid0.h>
24 #include <linux/malloc.h>
25
26 #define MAJOR_NR MD_MAJOR
27 #define MD_DRIVER
28 #define MD_PERSONALITY
29
30 #include <linux/blk.h>
31
32 static void create_strip_zones (int minor, struct md_dev *mddev)
33 {
34 int i, j, c=0;
35 int current_offset=0;
36 struct real_dev *smallest_by_zone;
37 struct raid0_data *data=(struct raid0_data *) mddev->private;
38
39 data->nr_strip_zones=1;
40
41 for (i=1; i<mddev->nb_dev; i++)
42 {
43 for (j=0; j<i; j++)
44 if (devices[minor][i].size==devices[minor][j].size)
45 {
46 c=1;
47 break;
48 }
49
50 if (!c)
51 data->nr_strip_zones++;
52
53 c=0;
54 }
55
56 data->strip_zone=kmalloc (sizeof(struct strip_zone)*data->nr_strip_zones,
57 GFP_KERNEL);
58
59 data->smallest=NULL;
60
61 for (i=0; i<data->nr_strip_zones; i++)
62 {
63 data->strip_zone[i].dev_offset=current_offset;
64 smallest_by_zone=NULL;
65 c=0;
66
67 for (j=0; j<mddev->nb_dev; j++)
68 if (devices[minor][j].size>current_offset)
69 {
70 data->strip_zone[i].dev[c++]=devices[minor]+j;
71 if (!smallest_by_zone ||
72 smallest_by_zone->size > devices[minor][j].size)
73 smallest_by_zone=devices[minor]+j;
74 }
75
76 data->strip_zone[i].nb_dev=c;
77 data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
78
79 if (!data->smallest ||
80 data->smallest->size > data->strip_zone[i].size)
81 data->smallest=data->strip_zone+i;
82
83 data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
84 data->strip_zone[i-1].size) : 0;
85 current_offset=smallest_by_zone->size;
86 }
87 }
88
89 static int raid0_run (int minor, struct md_dev *mddev)
90 {
91 int cur=0, i=0, size, zone0_size, nb_zone, min;
92 struct raid0_data *data;
93
94 min=1 << FACTOR_SHIFT(FACTOR(mddev));
95
96 for (i=0; i<mddev->nb_dev; i++)
97 if (devices[minor][i].size<min)
98 {
99 printk ("Cannot use %dk chunks on dev %s\n", min,
100 partition_name (devices[minor][i].dev));
101 return -EINVAL;
102 }
103
104 MOD_INC_USE_COUNT;
105
106
107 md_size[minor]=0;
108
109 for (i=0; i<mddev->nb_dev; i++)
110 {
111 devices[minor][i].size &= ~((1 << FACTOR_SHIFT(FACTOR(mddev))) - 1);
112 md_size[minor] += devices[minor][i].size;
113 }
114
115 mddev->private=kmalloc (sizeof (struct raid0_data), GFP_KERNEL);
116 data=(struct raid0_data *) mddev->private;
117
118 create_strip_zones (minor, mddev);
119
120 nb_zone=data->nr_zones=
121 md_size[minor]/data->smallest->size +
122 (md_size[minor]%data->smallest->size ? 1 : 0);
123
124 data->hash_table=kmalloc (sizeof (struct raid0_hash)*nb_zone, GFP_KERNEL);
125
126 size=data->strip_zone[cur].size;
127
128 i=0;
129 while (cur<data->nr_strip_zones)
130 {
131 data->hash_table[i].zone0=data->strip_zone+cur;
132
133 if (size>=data->smallest->size)
134 {
135 data->hash_table[i++].zone1=NULL;
136 size-=data->smallest->size;
137
138 if (!size)
139 {
140 if (++cur==data->nr_strip_zones) continue;
141 size=data->strip_zone[cur].size;
142 }
143
144 continue;
145 }
146
147 if (++cur==data->nr_strip_zones)
148 {
149 data->hash_table[i].zone1=NULL;
150 continue;
151 }
152
153 zone0_size=size;
154 size=data->strip_zone[cur].size;
155 data->hash_table[i++].zone1=data->strip_zone+cur;
156 size-=(data->smallest->size - zone0_size);
157 }
158
159 return (0);
160 }
161
162
163 static int raid0_stop (int minor, struct md_dev *mddev)
164 {
165 struct raid0_data *data=(struct raid0_data *) mddev->private;
166
167 kfree (data->hash_table);
168 kfree (data->strip_zone);
169 kfree (data);
170
171 MOD_DEC_USE_COUNT;
172 return 0;
173 }
174
175
176
177
178
179
180
181
182 static int raid0_map (int minor, struct md_dev *mddev, struct request *req)
183 {
184 struct raid0_data *data=(struct raid0_data *) mddev->private;
185 static struct raid0_hash *hash;
186 struct strip_zone *zone;
187 struct real_dev *tmp_dev;
188 int i, queue, blk_in_chunk, factor, chunk;
189 long block, rblock;
190 struct buffer_head *bh;
191 static struct request pending[MAX_REAL]={{0, }, };
192
193 factor=FACTOR(mddev);
194
195 while (req->bh || req->sem)
196 {
197 block=req->sector >> 1;
198 hash=data->hash_table+(block/data->smallest->size);
199
200 if (block >= (hash->zone0->size +
201 hash->zone0->zone_offset))
202 {
203 if (!hash->zone1)
204 printk ("raid0_map : hash->zone1==NULL for block %ld\n", block);
205 zone=hash->zone1;
206 }
207 else
208 zone=hash->zone0;
209
210 blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
211 chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
212 tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
213 rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
214
215 if (req->sem)
216 {
217 req->rq_dev=tmp_dev->dev;
218 req->sector=rblock << 1;
219 add_request (blk_dev+MAJOR (tmp_dev->dev), req);
220
221 return REDIRECTED_REQ;
222 }
223
224 queue=tmp_dev - devices[minor];
225
226
227 for (i=blk_in_chunk;
228 i<(1UL << FACTOR_SHIFT(factor)) && req->bh;
229 i+=bh->b_size >> 10)
230 {
231 bh=req->bh;
232 if (!buffer_locked(bh))
233 printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
234
235 bh->b_rdev=tmp_dev->dev;
236 #if defined (CONFIG_MD_SUPPORT_RAID1)
237 bh->b_reqshared=NULL;
238 bh->b_sister_req=NULL;
239 #endif
240
241 if (!pending[queue].bh)
242 {
243 pending[queue].rq_dev=tmp_dev->dev;
244 pending[queue].bhtail=pending[queue].bh=bh;
245 pending[queue].sector=rblock << 1;
246 pending[queue].cmd=req->cmd;
247 pending[queue].current_nr_sectors=
248 pending[queue].nr_sectors=bh->b_size >> 9;
249 }
250 else
251 {
252 pending[queue].bhtail->b_reqnext=bh;
253 pending[queue].bhtail=bh;
254 pending[queue].nr_sectors+=bh->b_size >> 9;
255 }
256
257 end_redirect (req);
258 }
259 }
260
261 req->rq_status=RQ_INACTIVE;
262 wake_up (&wait_for_request);
263 make_md_request (pending, mddev->nb_dev);
264 return REDIRECTED_REQ;
265 }
266
267
268 static int raid0_status (char *page, int minor, struct md_dev *mddev)
269 {
270 int sz=0;
271 #undef MD_DEBUG
272 #ifdef MD_DEBUG
273 int j, k;
274 struct raid0_data *data=(struct raid0_data *) mddev->private;
275
276 sz+=sprintf (page+sz, " ");
277 for (j=0; j<data->nr_zones; j++)
278 {
279 sz+=sprintf (page+sz, "[z%d",
280 data->hash_table[j].zone0-data->strip_zone);
281 if (data->hash_table[j].zone1)
282 sz+=sprintf (page+sz, "/z%d] ",
283 data->hash_table[j].zone1-data->strip_zone);
284 else
285 sz+=sprintf (page+sz, "] ");
286 }
287
288 sz+=sprintf (page+sz, "\n");
289
290 for (j=0; j<data->nr_strip_zones; j++)
291 {
292 sz+=sprintf (page+sz, " z%d=[", j);
293 for (k=0; k<data->strip_zone[j].nb_dev; k++)
294 sz+=sprintf (page+sz, "%s/",
295 partition_name(data->strip_zone[j].dev[k]->dev));
296 sz--;
297 sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
298 data->strip_zone[j].zone_offset,
299 data->strip_zone[j].dev_offset,
300 data->strip_zone[j].size);
301 }
302 #endif
303 return sz;
304 }
305
306
307 static struct md_personality raid0_personality=
308 {
309 "raid0",
310 raid0_map,
311 raid0_run,
312 raid0_stop,
313 raid0_status,
314 NULL,
315 0
316 };
317
318
319 #ifndef MODULE
320
321 void raid0_init (void)
322 {
323 register_md_personality (RAID0, &raid0_personality);
324 }
325
326 #else
327
328 int init_module (void)
329 {
330 return (register_md_personality (RAID0, &raid0_personality));
331 }
332
333 void cleanup_module (void)
334 {
335 if (MOD_IN_USE)
336 printk ("md raid0 : module still busy...\n");
337 else
338 unregister_md_personality (RAID0);
339 }
340
341 #endif