This source file includes following definitions.
- find_first_zero
- clear_buf
- que
- get__map_zone
- get_free__bit
- xiafs_free_zone
- xiafs_new_zone
- xiafs_free_inode
- xiafs_new_inode
- count_zone
- xiafs_count_free_inodes
- xiafs_count_free_zones
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/sched.h>
15 #include <linux/locks.h>
16 #include <linux/xia_fs.h>
17 #include <linux/stat.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20
21 #include "xiafs_mac.h"
22
23
24 #define clear_bit(nr,addr) ({\
25 char res; \
26 __asm__ __volatile__("btrl %1,%2\n\tsetnb %0": \
27 "=q" (res):"r" (nr),"m" (*(addr))); \
28 res;})
29
30 char internal_error_message[]="XIA-FS: internal error %s %d\n";
31
32 static int find_first_zero(struct buffer_head *bh, int start_bit, int end_bit)
33 {
34
35
36
37
38
39
40 int end, i, j, tmp;
41 u_long *bmap;
42 char res;
43
44 bmap=(u_long *)bh->b_data;
45 end = end_bit >> 5;
46
47 repeat:
48 i=start_bit >> 5;
49 if ( (tmp=(~bmap[i]) & (0xffffffff << (start_bit & 31))) )
50 goto zone_found;
51 while (++i < end)
52 if (~bmap[i]) {
53 tmp=~bmap[i];
54 goto zone_found;
55 }
56 if ( !(tmp=~bmap[i] & ((1 << (end_bit & 31)) -1)) )
57 return -1;
58 zone_found:
59 for (j=0; j < 32; j++)
60 if (tmp & (1 << j))
61 break;
62 __asm__ ("btsl %1,%2\n\tsetb %0": \
63 "=q" (res):"r" (j),"m" (bmap[i]));
64 if (res) {
65 start_bit=j + (i << 5) + 1;
66 goto repeat;
67 }
68 bh->b_dirt=1;
69 return j + (i << 5);
70 }
71
72 static void clear_buf(struct buffer_head * bh)
73 {
74 register int i;
75 register long * lp;
76
77 lp=(long *)bh->b_data;
78 for (i= bh->b_size >> 2; i-- > 0; )
79 *lp++=0;
80 }
81
82 static void que(struct buffer_head * bmap[], int bznr[], int pos)
83 {
84 struct buffer_head * tbh;
85 int tmp;
86 int i;
87
88 tbh=bmap[pos];
89 tmp=bznr[pos];
90 for (i=pos; i > 0; i--) {
91 bmap[i]=bmap[i-1];
92 bznr[i]=bznr[i-1];
93 }
94 bmap[0]=tbh;
95 bznr[0]=tmp;
96 }
97
98 #define get_imap_zone(sb, bit_nr, not_que) \
99 get__map_zone((sb), (sb)->u.xiafs_sb.s_imap_buf, \
100 (sb)->u.xiafs_sb.s_imap_iznr, \
101 (sb)->u.xiafs_sb.s_imap_cached, 1, \
102 (sb)->u.xiafs_sb.s_imap_zones, _XIAFS_IMAP_SLOTS, \
103 bit_nr, not_que)
104
105 #define get_zmap_zone(sb, bit_nr, not_que) \
106 get__map_zone((sb), (sb)->u.xiafs_sb.s_zmap_buf, \
107 (sb)->u.xiafs_sb.s_zmap_zznr, \
108 (sb)->u.xiafs_sb.s_zmap_cached, \
109 1+(sb)->u.xiafs_sb.s_imap_zones, \
110 (sb)->u.xiafs_sb.s_zmap_zones, _XIAFS_ZMAP_SLOTS, \
111 bit_nr, not_que)
112
113 static struct buffer_head *
114 get__map_zone(struct super_block *sb, struct buffer_head * bmap_buf[],
115 int bznr[], u_char cache, int first_zone,
116 int bmap_zones, int slots, u_long bit_nr, int * not_que)
117 {
118 struct buffer_head * tmp_bh;
119 int z_nr, i;
120
121 z_nr = bit_nr >> XIAFS_BITS_PER_Z_BITS(sb);
122 if (z_nr >= bmap_zones) {
123 printk("XIA-FS: bad inode/zone number (%s %d)\n", WHERE_ERR);
124 return NULL;
125 }
126 if (!cache)
127 return bmap_buf[z_nr];
128 lock_super(sb);
129 for (i=0; i < slots; i++)
130 if (bznr[i]==z_nr)
131 break;
132 if (i < slots) {
133 if (not_que) {
134 *not_que=i;
135 return bmap_buf[i];
136 } else {
137 que(bmap_buf, bznr, i);
138 return bmap_buf[0];
139 }
140 }
141 tmp_bh=bread(sb->s_dev, z_nr+first_zone, XIAFS_ZSIZE(sb));
142 if (!tmp_bh) {
143 printk("XIA-FS: read bitmap failed (%s %d)\n", WHERE_ERR);
144 unlock_super(sb);
145 return NULL;
146 }
147 brelse(bmap_buf[slots-1]);
148 bmap_buf[slots-1]=tmp_bh;
149 bznr[slots-1]=z_nr;
150 if (not_que)
151 *not_que=slots-1;
152 else
153 que(bmap_buf, bznr, slots-1);
154 return tmp_bh;
155 }
156
157 #define xiafs_unlock_super(sb, cache) if (cache) unlock_super(sb);
158
159 #define get_free_ibit(sb, prev_bit) \
160 get_free__bit(sb, sb->u.xiafs_sb.s_imap_buf, \
161 sb->u.xiafs_sb.s_imap_iznr, \
162 sb->u.xiafs_sb.s_imap_cached, \
163 1, sb->u.xiafs_sb.s_imap_zones, \
164 _XIAFS_IMAP_SLOTS, prev_bit);
165
166 #define get_free_zbit(sb, prev_bit) \
167 get_free__bit(sb, sb->u.xiafs_sb.s_zmap_buf, \
168 sb->u.xiafs_sb.s_zmap_zznr, \
169 sb->u.xiafs_sb.s_zmap_cached, \
170 1 + sb->u.xiafs_sb.s_imap_zones, \
171 sb->u.xiafs_sb.s_zmap_zones, \
172 _XIAFS_ZMAP_SLOTS, prev_bit);
173
174 static u_long
175 get_free__bit(struct super_block *sb, struct buffer_head * bmap_buf[],
176 int bznr[], u_char cache, int first_zone, int bmap_zones,
177 int slots, u_long prev_bit)
178 {
179 struct buffer_head * bh;
180 int not_done=0;
181 u_long pos, start_bit, end_bit, total_bits;
182 int z_nr, tmp;
183
184 total_bits=bmap_zones << XIAFS_BITS_PER_Z_BITS(sb);
185 if (prev_bit >= total_bits)
186 prev_bit=0;
187 pos=prev_bit+1;
188 end_bit=XIAFS_BITS_PER_Z(sb);
189
190 do {
191 if (pos >= total_bits)
192 pos=0;
193 if (!not_done) {
194 not_done=1;
195 start_bit= pos & (end_bit-1);
196 } else
197 start_bit=0;
198 if ( pos < prev_bit && pos+end_bit >= prev_bit) {
199 not_done=0;
200 end_bit=prev_bit & (end_bit-1);
201 }
202 bh = get__map_zone(sb, bmap_buf, bznr, cache, first_zone,
203 bmap_zones, slots, pos, &z_nr);
204 if (!bh)
205 return 0;
206 tmp=find_first_zero(bh, start_bit, end_bit);
207 if (tmp >= 0)
208 break;
209 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
210 pos=(pos & ~(end_bit-1))+end_bit;
211 } while (not_done);
212
213 if (tmp < 0)
214 return 0;
215 if (cache)
216 que(bmap_buf, bznr, z_nr);
217 xiafs_unlock_super(sb, cache);
218 return (pos & ~(XIAFS_BITS_PER_Z(sb)-1))+tmp;
219 }
220
221 void xiafs_free_zone(struct super_block * sb, int d_addr)
222 {
223 struct buffer_head * bh;
224 unsigned int bit, offset;
225
226 if (!sb) {
227 printk(INTERN_ERR);
228 return;
229 }
230 if (d_addr < sb->u.xiafs_sb.s_firstdatazone ||
231 d_addr >= sb->u.xiafs_sb.s_nzones) {
232 printk("XIA-FS: bad zone number (%s %d)\n", WHERE_ERR);
233 return;
234 }
235 bh = get_hash_table(sb->s_dev, d_addr, XIAFS_ZSIZE(sb));
236 if (bh)
237 bh->b_dirt=0;
238 brelse(bh);
239 bit=d_addr - sb->u.xiafs_sb.s_firstdatazone + 1;
240 bh = get_zmap_zone(sb, bit, NULL);
241 if (!bh)
242 return;
243 offset = bit & (XIAFS_BITS_PER_Z(sb) -1);
244 if (clear_bit(offset, bh->b_data))
245 printk("XIA-FS: dev %04x"
246 " block bit %u (0x%x) already cleared (%s %d)\n",
247 sb->s_dev, bit, bit, WHERE_ERR);
248 bh->b_dirt = 1;
249 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
250 }
251
252 int xiafs_new_zone(struct super_block * sb, u_long prev_addr)
253 {
254 struct buffer_head * bh;
255 int prev_znr, tmp;
256
257 if (!sb) {
258 printk(INTERN_ERR);
259 return 0;
260 }
261 if (prev_addr < sb->u.xiafs_sb.s_firstdatazone ||
262 prev_addr >= sb->u.xiafs_sb.s_nzones) {
263 prev_addr=sb->u.xiafs_sb.s_firstdatazone;
264 }
265 prev_znr=prev_addr-sb->u.xiafs_sb.s_firstdatazone+1;
266 tmp=get_free_zbit(sb, prev_znr);
267 if (!tmp)
268 return 0;
269 tmp += sb->u.xiafs_sb.s_firstdatazone -1;
270 if (!(bh = getblk(sb->s_dev, tmp, XIAFS_ZSIZE(sb)))) {
271 printk("XIA-FS: I/O error (%s %d)\n", WHERE_ERR);
272 return 0;
273 }
274 if (bh->b_count != 1) {
275 printk(INTERN_ERR);
276 return 0;
277 }
278 clear_buf(bh);
279 bh->b_uptodate = 1;
280 bh->b_dirt = 1;
281 brelse(bh);
282 return tmp;
283 }
284
285 void xiafs_free_inode(struct inode * inode)
286 {
287 struct buffer_head * bh;
288 struct super_block * sb;
289 unsigned long ino;
290
291 if (!inode)
292 return;
293 if (!inode->i_dev || inode->i_count!=1 || inode->i_nlink || !inode->i_sb ||
294 inode->i_ino < 3 || inode->i_ino > inode->i_sb->u.xiafs_sb.s_ninodes) {
295 printk("XIA-FS: bad inode (%s %d)\n", WHERE_ERR);
296 return;
297 }
298 sb = inode->i_sb;
299 ino = inode->i_ino;
300 bh = get_imap_zone(sb, ino, NULL);
301 if (!bh)
302 return;
303 clear_inode(inode);
304 if (clear_bit(ino & (XIAFS_BITS_PER_Z(sb)-1), bh->b_data))
305 printk("XIA-FS: dev %04x"
306 "inode bit %ld (0x%lx) already cleared (%s %d)\n",
307 inode->i_dev, ino, ino, WHERE_ERR);
308 bh->b_dirt = 1;
309 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_imap_cached);
310 }
311
312 struct inode * xiafs_new_inode(struct inode * dir)
313 {
314 struct super_block * sb;
315 struct inode * inode;
316 ino_t tmp;
317
318 sb = dir->i_sb;
319 if (!dir || !(inode = get_empty_inode()))
320 return NULL;
321 inode->i_sb = sb;
322 inode->i_flags = inode->i_sb->s_flags;
323
324 tmp=get_free_ibit(sb, dir->i_ino);
325 if (!tmp) {
326 iput(inode);
327 return NULL;
328 }
329 inode->i_count = 1;
330 inode->i_nlink = 1;
331 inode->i_dev = sb->s_dev;
332 inode->i_uid = current->euid;
333 inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->egid;
334 inode->i_dirt = 1;
335 inode->i_ino = tmp;
336 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
337 inode->i_op = NULL;
338 inode->i_blocks = 0;
339 inode->i_blksize = XIAFS_ZSIZE(inode->i_sb);
340 insert_inode_hash(inode);
341 return inode;
342 }
343
344 static int nibblemap[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4 };
345
346 static u_long count_zone(struct buffer_head * bh)
347 {
348 int i, tmp;
349 u_long sum;
350
351 sum=0;
352 for (i=bh->b_size; i-- > 0; ) {
353 tmp=bh->b_data[i];
354 sum += nibblemap[tmp & 0xf] + nibblemap[(tmp & 0xff) >> 4];
355 }
356 return sum;
357 }
358
359 unsigned long xiafs_count_free_inodes(struct super_block *sb)
360 {
361 struct buffer_head * bh;
362 int izones, i, not_que;
363 u_long sum;
364
365 sum=0;
366 izones=sb->u.xiafs_sb.s_imap_zones;
367 for (i=0; i < izones; i++) {
368 bh=get_imap_zone(sb, i << XIAFS_BITS_PER_Z_BITS(sb), ¬_que);
369 if (bh) {
370 sum += count_zone(bh);
371 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_imap_cached);
372 }
373 }
374 i=izones << XIAFS_BITS_PER_Z_BITS(sb);
375 return i - sum;
376 }
377
378 unsigned long xiafs_count_free_zones(struct super_block *sb)
379 {
380 struct buffer_head * bh;
381 int zzones, i, not_que;
382 u_long sum;
383
384 sum=0;
385 zzones=sb->u.xiafs_sb.s_zmap_zones;
386 for (i=0; i < zzones; i++) {
387 bh=get_zmap_zone(sb, i << XIAFS_BITS_PER_Z_BITS(sb), ¬_que);
388 if (bh) {
389 sum += count_zone(bh);
390 xiafs_unlock_super(sb, sb->u.xiafs_sb.s_zmap_cached);
391 }
392 }
393 i=zzones << XIAFS_BITS_PER_Z_BITS(sb);
394 return i - sum;
395 }