This source file includes following definitions.
- fcntl_init_locks
- fcntl_getlk
- fcntl_setlk
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- unlock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <asm/segment.h>
14
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/stat.h>
19 #include <linux/fcntl.h>
20
21 #define OFFSET_MAX 0x7fffffff
22
23 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
24 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
25 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
26 static int lock_it(struct file *filp, struct file_lock *caller);
27 static int unlock_it(struct file *filp, struct file_lock *caller);
28 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template);
29 static void free_lock(struct file *filp, struct file_lock *fl);
30
31 static struct file_lock file_lock_table[NR_FILE_LOCKS];
32 static struct file_lock *file_lock_free_list;
33
34
35
36
37
38 void fcntl_init_locks(void)
39 {
40 struct file_lock *fl;
41
42 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
43 fl->fl_next = fl + 1;
44 fl->fl_owner = NULL;
45 }
46 file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
47 file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
48 file_lock_free_list = &file_lock_table[0];
49 }
50
51 int fcntl_getlk(unsigned int fd, struct flock *l)
52 {
53 int error;
54 struct flock flock;
55 struct file *filp;
56 struct file_lock *fl,file_lock;
57
58 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
59 return -EBADF;
60 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
61 if (error)
62 return error;
63 memcpy_fromfs(&flock, l, sizeof(flock));
64 if (flock.l_type == F_UNLCK)
65 return -EINVAL;
66 if (!copy_flock(filp, &file_lock, &flock))
67 return -EINVAL;
68
69 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
70 if (conflict(&file_lock, fl)) {
71 flock.l_pid = fl->fl_owner->pid;
72 flock.l_start = fl->fl_start;
73 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
74 fl->fl_end - fl->fl_start + 1;
75 flock.l_whence = fl->fl_whence;
76 flock.l_type = fl->fl_type;
77 memcpy_tofs(l, &flock, sizeof(flock));
78 return 0;
79 }
80 }
81
82 flock.l_type = F_UNLCK;
83 memcpy_tofs(l, &flock, sizeof(flock));
84 return 0;
85 }
86
87
88
89
90
91 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
92 {
93 int error;
94 struct file *filp;
95 struct file_lock *fl,file_lock;
96 struct flock flock;
97
98
99
100
101
102 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
103 return -EBADF;
104 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
105 if (error)
106 return error;
107 memcpy_fromfs(&flock, l, sizeof(flock));
108 if (!copy_flock(filp, &file_lock, &flock))
109 return -EINVAL;
110 switch (file_lock.fl_type) {
111 case F_RDLCK :
112 if (!(filp->f_mode & 1))
113 return -EBADF;
114 break;
115 case F_WRLCK :
116 if (!(filp->f_mode & 2))
117 return -EBADF;
118 break;
119 case F_SHLCK :
120 if (!(filp->f_mode & 3))
121 return -EBADF;
122 file_lock.fl_type = F_RDLCK;
123 break;
124 case F_EXLCK :
125 if (!(filp->f_mode & 3))
126 return -EBADF;
127 file_lock.fl_type = F_WRLCK;
128 break;
129 case F_UNLCK :
130 break;
131 }
132
133
134
135
136
137 if (file_lock.fl_type == F_UNLCK)
138 return unlock_it(filp, &file_lock);
139
140
141
142
143
144 repeat:
145 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
146 if (!conflict(&file_lock, fl))
147 continue;
148
149
150
151
152
153 if (cmd == F_SETLKW) {
154 if (current->signal & ~current->blocked)
155 return -ERESTARTSYS;
156 interruptible_sleep_on(&fl->fl_wait);
157 if (current->signal & ~current->blocked)
158 return -ERESTARTSYS;
159 goto repeat;
160 }
161 return -EAGAIN;
162 }
163
164
165
166
167
168 return lock_it(filp, &file_lock);
169 }
170
171
172
173
174
175 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
176 {
177 struct file_lock *fl,*next;
178
179 for (fl = filp->f_inode->i_flock; fl != NULL; ) {
180
181
182
183
184 next = fl->fl_next;
185 if (fl->fl_owner == task)
186 free_lock(filp, fl);
187 fl = next;
188 }
189 }
190
191
192
193
194
195
196 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
197 {
198 off_t start;
199
200 if (!filp->f_inode)
201 return 0;
202 if (!S_ISREG(filp->f_inode->i_mode))
203 return 0;
204 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
205 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
206 return 0;
207 switch (l->l_whence) {
208 case 0 : start = 0; break;
209 case 1 : start = filp->f_pos; break;
210 case 2 : start = filp->f_inode->i_size; break;
211 default : return 0;
212 }
213 if ((start += l->l_start) < 0 || l->l_len < 0)
214 return 0;
215 fl->fl_type = l->l_type;
216 fl->fl_start = start;
217 fl->fl_whence = 0;
218 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
219 fl->fl_end = OFFSET_MAX;
220 fl->fl_owner = current;
221 fl->fl_wait = NULL;
222 return 1;
223 }
224
225
226
227
228
229 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
230 {
231 if (caller_fl->fl_owner == sys_fl->fl_owner)
232 return 0;
233 if (!overlap(caller_fl, sys_fl))
234 return 0;
235 switch (caller_fl->fl_type) {
236 case F_RDLCK :
237 return sys_fl->fl_type != F_RDLCK;
238 case F_WRLCK :
239 return 1;
240 }
241 return 0;
242 }
243
244 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
245 {
246 if (fl1->fl_start <= fl2->fl_start) {
247 return fl1->fl_end >= fl2->fl_start;
248 } else {
249 return fl2->fl_end >= fl1->fl_start;
250 }
251 }
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 static int lock_it(struct file *filp, struct file_lock *caller)
268 {
269 struct file_lock *fl,*new;
270
271
272
273
274
275
276
277
278
279 if ((caller = alloc_lock(filp, caller)) == NULL)
280 return -ENOLCK;
281
282
283
284
285
286 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
287 if (caller->fl_owner != fl->fl_owner)
288 continue;
289 if (caller == fl)
290 continue;
291 if (!overlap(caller, fl)) {
292
293
294
295 if (caller->fl_type != fl->fl_type)
296 continue;
297 if (caller->fl_end + 1 == fl->fl_start) {
298 fl->fl_start = caller->fl_start;
299 free_lock(filp, caller);
300 caller = fl;
301
302 } else if (caller->fl_start - 1 == fl->fl_end) {
303 fl->fl_end = caller->fl_end;
304 free_lock(filp, caller);
305 caller = fl;
306
307 }
308 continue;
309 }
310
311
312
313
314 if (caller->fl_type != fl->fl_type) {
315 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
316
317
318
319
320
321 if ((new = alloc_lock(filp, fl)) == NULL) {
322 free_lock(filp, caller);
323 return -ENOLCK;
324 }
325 fl->fl_end = caller->fl_start - 1;
326 new->fl_start = caller->fl_end + 1;
327 return 0;
328 }
329 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
330
331
332
333 free_lock(filp, fl);
334 return 0;
335 }
336 if (caller->fl_end < fl->fl_end) {
337 fl->fl_start = caller->fl_end + 1;
338
339 } else if (caller->fl_start > fl->fl_start) {
340 fl->fl_end = caller->fl_start - 1;
341
342 } else {
343 printk("VFS: lock_it: program bug: unanticipated overlap\n");
344 free_lock(filp, caller);
345 return -ENOLCK;
346 }
347 } else {
348 int grew = 0;
349
350 if (caller->fl_start < fl->fl_start) {
351 fl->fl_start = caller->fl_start;
352 grew = 1;
353 }
354 if (caller->fl_end > fl->fl_end) {
355 fl->fl_end = caller->fl_end;
356 grew = 1;
357 }
358 free_lock(filp, caller);
359 caller = fl;
360 if (!grew)
361 return 0;
362
363 }
364 }
365
366
367
368
369
370
371 return 0;
372 }
373
374
375
376
377
378
379
380 static int unlock_it(struct file *filp, struct file_lock *caller)
381 {
382 int one_unlocked = 0;
383 struct file_lock *fl,*next;
384
385 for (fl = filp->f_inode->i_flock; fl != NULL; ) {
386 if (caller->fl_owner != fl->fl_owner || !overlap(caller, fl)) {
387 fl = fl->fl_next;
388 continue;
389 }
390 one_unlocked = 1;
391 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
392
393
394
395
396 if ((next = alloc_lock(filp, fl)) == NULL)
397 return -ENOLCK;
398 fl->fl_end = caller->fl_start - 1;
399 next->fl_start = caller->fl_end + 1;
400 return 0;
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419 next = fl->fl_next;
420 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
421 free_lock(filp, fl);
422 } else if (caller->fl_start > fl->fl_start) {
423 fl->fl_end = caller->fl_start - 1;
424 } else {
425
426 fl->fl_start = caller->fl_end + 1;
427 }
428 if (caller->fl_start >= fl->fl_start && caller->fl_end <= fl->fl_end)
429 return 0;
430 fl = next;
431
432 }
433
434 return one_unlocked ? 0 : -EINVAL;
435 }
436
437 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template)
438 {
439 struct file_lock *new;
440
441 if (file_lock_free_list == NULL)
442 return NULL;
443 if (file_lock_free_list->fl_owner != NULL)
444 panic("VFS: alloc_lock: broken free list\n");
445
446 new = file_lock_free_list;
447 file_lock_free_list = file_lock_free_list->fl_next;
448
449 *new = *template;
450
451 new->fl_next = filp->f_inode->i_flock;
452 filp->f_inode->i_flock = new;
453
454 new->fl_owner = current;
455 new->fl_wait = NULL;
456 return new;
457 }
458
459
460
461
462
463
464
465 static void free_lock(struct file *filp, struct file_lock *fl)
466 {
467 struct file_lock **fl_p;
468
469 if (fl->fl_owner == NULL)
470 panic("VFS: free_lock: broken lock list\n");
471
472
473
474
475
476
477 for (fl_p = &filp->f_inode->i_flock; *fl_p != NULL; fl_p = &(*fl_p)->fl_next) {
478 if (*fl_p == fl)
479 break;
480 }
481 if (*fl_p == NULL) {
482 printk("VFS: free_lock: lock is not in file's lock list\n");
483 } else {
484 *fl_p = (*fl_p)->fl_next;
485 }
486
487 fl->fl_next = file_lock_free_list;
488 file_lock_free_list = fl;
489 fl->fl_owner = NULL;
490
491 wake_up(&fl->fl_wait);
492 }