This source file includes following definitions.
- find_first_zero
- clear_buf
- que
- get__map_zone
- get_free__bit
- xiafs_free_zone
- xiafs_new_zone
- xiafs_free_inode
- xiafs_new_inode
- count_zone
- xiafs_count_free_inodes
- xiafs_count_free_zones
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/sched.h>
15 #include <linux/locks.h>
16 #include <linux/xia_fs.h>
17 #include <linux/stat.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20
21 #include "xiafs_mac.h"
22
23
24 #define clear_bit(nr,addr) ({\
25 char res; \
26 __asm__ __volatile__("btrl %1,%2\n\tsetnb %0": \
27 "=q" (res):"r" (nr),"m" (*(addr))); \
28 res;})
29
30 char internal_error_message[]="XIA-FS: internal error %s %d\n";
31
32 static int find_first_zero(struct buffer_head *bh, int start_bit, int end_bit)
33 {
34
35
36
37
38
39
40 int end, i, j, tmp;
41 u_long *bmap;
42 char res;
43
44 bmap=(u_long *)bh->b_data;
45 end = end_bit >> 5;
46
47 repeat:
48 i=start_bit >> 5;
49 if ( (tmp=(~bmap[i]) & (0xffffffff << (start_bit & 31))) )
50 goto zone_found;
51 while (++i < end)
52 if (~bmap[i]) {
53 tmp=~bmap[i];
54 goto zone_found;
55 }
56 if ( !(tmp=~bmap[i] & ((1 << (end_bit & 31)) -1)) )
57 return -1;
58 zone_found:
59 for (j=0; j < 32; j++)
60 if (tmp & (1 << j))
61 break;
62 __asm__ ("btsl %1,%2\n\tsetb %0": \
63 "=q" (res):"r" (j),"m" (bmap[i]));
64 if (res) {
65 start_bit=j + (i << 5) + 1;
66 goto repeat;
67 }
68 bh->b_dirt=1;
69 return j + (i << 5);
70 }
71
72 static void clear_buf(struct buffer_head * bh)
73 {
74 register int i;
75 register long * lp;
76
77 lp=(long *)bh->b_data;
78 for (i= bh->b_size >> 2; i-- > 0; )
79 *lp++=0;
80 }
81
82 static void que(struct buffer_head * bmap[], int bznr[], int pos)
83 {
84 struct buffer_head * tbh;
85 int tmp;
86 int i;
87
88 tbh=bmap[pos];
89 tmp=bznr[pos];
90 for (i=pos; i > 0; i--) {
91 bmap[i]=bmap[i-1];
92 bznr[i]=bznr[i-1];
93 }
94 bmap[0]=tbh;
95 bznr[0]=tmp;
96 }
97
98 #define get_imap_zone(sb, bit_nr, not_que) \
99 get__map_zone((sb), (sb)->u.xiafs_sb.s_imap_buf, \
100 (sb)->u.xiafs_sb.s_imap_iznr, \
101 (sb)->u.xiafs_sb.s_imap_cached, 1, \
102 (sb)->u.xiafs_sb.s_imap_zones, _XIAFS_IMAP_SLOTS, \
103 bit_nr, not_que)
104
105 #define get_zmap_zone(sb, bit_nr, not_que) \
106 get__map_zone((sb), (sb)->u.xiafs_sb.s_zmap_buf, \
107 (sb)->u.xiafs_sb.s_zmap_zznr, \
108 (sb)->u.xiafs_sb.s_zmap_cached, \
109 1+(sb)->u.xiafs_sb.s_imap_zones, \
110 (sb)->u.xiafs_sb.s_zmap_zones, _XIAFS_ZMAP_SLOTS, \
111 bit_nr, not_que)
112
113 static struct buffer_head *
114 get__map_zone(struct super_block *sb, struct buffer_head * bmap_buf[],
115 int bznr[], u_char cache, int first_zone,
116 int bmap_zones, int slots, u_long bit_nr, int * not_que)
117 {
118 struct buffer_head * tmp_bh;
119 int z_nr, i;
120
121 z_nr = bit_nr >> XIAFS_BITS_PER_Z_BITS(sb);
122 if (z_nr >= bmap_zones) {
123 printk("XIA-FS: bad inode/zone number (%s %d)\n", WHERE_ERR);
124 return NULL;
125 }
126 if (!cache)
127 return bmap_buf[z_nr];
128 lock_super(sb);
129 for (i=0; i < slots; i++)
130 if (bznr[i]==z_nr)
131 break;
132 if (i < slots) {
133 if (not_que) {
134 *not_que=i;
135 return bmap_buf[i];
136 } else {
137 que(bmap_buf, bznr, i);
138 return bmap_buf[0];
139 }
140 }
141 tmp_bh=bread(sb->s_dev, z_nr+first_zone, XIAFS_ZSIZE(sb));
142 if (!tmp_bh) {
143 printk("XIA-FS: read bitmap failed (%s %d)\n", WHERE_ERR);
144 unlock_super(sb);
145 return NULL;
146 }
147 brelse(bmap_buf[slots-1]);
148 bmap_buf[slots-1]=tmp_bh;
149 bznr[slots-1]=z_nr;
150 if (not_que)
151 *not_que=slots-1;
152 else
153 que(bmap_buf, bznr, slots-1);
154 return tmp_bh;
155 }
156
157 #define xiafs_unlock_super(sb, cache) if (cache) unlock_super(sb);
158
159 #define get_free_ibit(sb, prev_bit) \
160 get_free__bit(sb, sb->u.xiafs_sb.s_imap_buf, \
161 sb->u.xiafs_sb.s_imap_iznr, \
162 sb->u.xiafs_sb.s_imap_cached, \
163 1, sb->u.xiafs_sb.s_imap_zones, \
164 _XIAFS_IMAP_SLOTS, prev_bit);
165
166 #define get_free_zbit(sb, prev_bit) \
167 get_free__bit(sb, sb->u.xiafs_sb.s_zmap_buf, \
168 sb->u.xiafs_sb.s_zmap_zznr, \
169 sb->u.xiafs_sb.s_zmap_cached, \
170 1 + sb->u.xiafs_sb.s_imap_zones, \
171 sb->u.xiafs_sb.s_zmap_zones, \
172 _XIAFS_ZMAP_SLOTS, prev_bit);
173
174 static u_long
175 get_free__bit(struct super_block *sb, struct buffer_head * bmap_buf[],
176 int bznr[], u_char cache, int first_zone, int bmap_zones,
177 int slots, u_long prev_bit)
178 {
179 struct buffer_head * bh;
180 int not_done=0;
181 u_long pos, start_bit, end_bit, total_bits;
182 int z_nr, tmp;
183
184 total_bits=bmap_zones << XIAFS_BITS_PER_Z_BITS(sb);
185 if (prev_bit >= total_bits)
186 prev_bit=0;
187 pos=prev_bit+1;
188 end_bit=XIAFS_BITS_PER_Z(sb);
189
190 do {
191 if (pos >= total_bits)
192 pos=0;
193 if (!not_done) {
194 not_done=1;
195 start_bit= pos & (end_bit-1);
196 } else
197 start_bit=0;
198 if ( pos < prev_bit && pos+end_bit >= prev_bit) {
199 not_done=0;
200 end_bit=prev_bit & (end_bit-1);
201 }
202 bh = get__map_zone(sb, bmap_buf, bznr, cache, first_zone,
203 bmap_zones, slots, pos, &z_nr);
204 if (!bh)
205 return 0;
206 tmp=find_first_zero(bh, start_bit, end_bit);
207 if (tmp >= 0)
208 break;
209 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
210 pos=(pos & ~(end_bit-1))+end_bit;
211 } while (not_done);
212
213 if (tmp < 0)
214 return 0;
215 if (cache)
216 que(bmap_buf, bznr, z_nr);
217 xiafs_unlock_super(sb, cache);
218 return (pos & ~(XIAFS_BITS_PER_Z(sb)-1))+tmp;
219 }
220
221 void xiafs_free_zone(struct super_block * sb, int d_addr)
222 {
223 struct buffer_head * bh;
224 unsigned int bit, offset;
225
226 if (!sb) {
227 printk(INTERN_ERR);
228 return;
229 }
230 if (d_addr < sb->u.xiafs_sb.s_firstdatazone ||
231 d_addr >= sb->u.xiafs_sb.s_nzones) {
232 printk("XIA-FS: bad zone number (%s %d)\n", WHERE_ERR);
233 return;
234 }
235 bh = get_hash_table(sb->s_dev, d_addr, XIAFS_ZSIZE(sb));
236 if (bh)
237 bh->b_dirt=0;
238 brelse(bh);
239 bit=d_addr - sb->u.xiafs_sb.s_firstdatazone + 1;
240 bh = get_zmap_zone(sb, bit, NULL);
241 if (!bh)
242 return;
243 offset = bit & (XIAFS_BITS_PER_Z(sb) -1);
244 if (clear_bit(offset, bh->b_data))
245 printk("XIA-FS: bit %d (0x%x) already cleared (%s %d)\n", bit, bit, WHERE_ERR);
246 bh->b_dirt = 1;
247 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
248 }
249
250 int xiafs_new_zone(struct super_block * sb, u_long prev_addr)
251 {
252 struct buffer_head * bh;
253 int prev_znr, tmp;
254
255 if (!sb) {
256 printk(INTERN_ERR);
257 return 0;
258 }
259 if (prev_addr < sb->u.xiafs_sb.s_firstdatazone ||
260 prev_addr >= sb->u.xiafs_sb.s_nzones) {
261 prev_addr=sb->u.xiafs_sb.s_firstdatazone;
262 }
263 prev_znr=prev_addr-sb->u.xiafs_sb.s_firstdatazone+1;
264 tmp=get_free_zbit(sb, prev_znr);
265 if (!tmp)
266 return 0;
267 tmp += sb->u.xiafs_sb.s_firstdatazone -1;
268 if (!(bh = getblk(sb->s_dev, tmp, XIAFS_ZSIZE(sb)))) {
269 printk("XIA-FS: I/O error (%s %d)\n", WHERE_ERR);
270 return 0;
271 }
272 if (bh->b_count != 1) {
273 printk(INTERN_ERR);
274 return 0;
275 }
276 clear_buf(bh);
277 bh->b_uptodate = 1;
278 bh->b_dirt = 1;
279 brelse(bh);
280 return tmp;
281 }
282
283 void xiafs_free_inode(struct inode * inode)
284 {
285 struct buffer_head * bh;
286 struct super_block * sb;
287 unsigned long ino;
288
289 if (!inode)
290 return;
291 if (!inode->i_dev || inode->i_count!=1 || inode->i_nlink || !inode->i_sb ||
292 inode->i_ino < 3 || inode->i_ino > inode->i_sb->u.xiafs_sb.s_ninodes) {
293 printk("XIA-FS: bad inode (%s %d)\n", WHERE_ERR);
294 return;
295 }
296 sb = inode->i_sb;
297 ino = inode->i_ino;
298 bh = get_imap_zone(sb, ino, NULL);
299 if (!bh)
300 return;
301 clear_inode(inode);
302 if (clear_bit(ino & (XIAFS_BITS_PER_Z(sb)-1), bh->b_data))
303 printk("XIA-FS: bit %d (0x%x) already cleared (%s %d)\n",
304 ino, ino, WHERE_ERR);
305 bh->b_dirt = 1;
306 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_imap_cached);
307 }
308
309 struct inode * xiafs_new_inode(struct inode * dir)
310 {
311 struct super_block * sb;
312 struct inode * inode;
313 ino_t tmp;
314
315 sb = dir->i_sb;
316 if (!dir || !(inode = get_empty_inode()))
317 return NULL;
318 inode->i_sb = sb;
319 inode->i_flags = inode->i_sb->s_flags;
320
321 tmp=get_free_ibit(sb, dir->i_ino);
322 if (!tmp) {
323 iput(inode);
324 return NULL;
325 }
326 inode->i_count = 1;
327 inode->i_nlink = 1;
328 inode->i_dev = sb->s_dev;
329 inode->i_uid = current->euid;
330 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->egid;
331 inode->i_dirt = 1;
332 inode->i_ino = tmp;
333 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
334 inode->i_op = NULL;
335 inode->i_blocks = 0;
336 inode->i_blksize = XIAFS_ZSIZE(inode->i_sb);
337 insert_inode_hash(inode);
338 return inode;
339 }
340
341 static int nibblemap[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4 };
342
343 static u_long count_zone(struct buffer_head * bh)
344 {
345 int i, tmp;
346 u_long sum;
347
348 sum=0;
349 for (i=bh->b_size; i-- > 0; ) {
350 tmp=bh->b_data[i];
351 sum += nibblemap[tmp & 0xf] + nibblemap[(tmp & 0xff) >> 4];
352 }
353 return sum;
354 }
355
356 unsigned long xiafs_count_free_inodes(struct super_block *sb)
357 {
358 struct buffer_head * bh;
359 int izones, i, not_que;
360 u_long sum;
361
362 sum=0;
363 izones=sb->u.xiafs_sb.s_imap_zones;
364 for (i=0; i < izones; i++) {
365 bh=get_imap_zone(sb, i << XIAFS_BITS_PER_Z_BITS(sb), ¬_que);
366 if (bh) {
367 sum += count_zone(bh);
368 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_imap_cached);
369 }
370 }
371 i=izones << XIAFS_BITS_PER_Z_BITS(sb);
372 return i - sum;
373 }
374
375 unsigned long xiafs_count_free_zones(struct super_block *sb)
376 {
377 struct buffer_head * bh;
378 int zzones, i, not_que;
379 u_long sum;
380
381 sum=0;
382 zzones=sb->u.xiafs_sb.s_zmap_zones;
383 for (i=0; i < zzones; i++) {
384 bh=get_zmap_zone(sb, i << XIAFS_BITS_PER_Z_BITS(sb), ¬_que);
385 if (bh) {
386 sum += count_zone(bh);
387 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
388 }
389 }
390 i=zzones << XIAFS_BITS_PER_Z_BITS(sb);
391 return i - sum;
392 }