This source file includes following definitions.
- fcntl_init_locks
- fcntl_getlk
- fcntl_setlk
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- unlock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <asm/segment.h>
14
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/stat.h>
19 #include <linux/fcntl.h>
20
21 #define OFFSET_MAX 0x7fffffff
22
23 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
24 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
25 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
26 static int lock_it(struct file *filp, struct file_lock *caller);
27 static int unlock_it(struct file *filp, struct file_lock *caller);
28 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template);
29 static void free_lock(struct file *filp, struct file_lock *fl);
30
31 static struct file_lock file_lock_table[NR_FILE_LOCKS];
32 static struct file_lock *file_lock_free_list;
33
34
35
36
37
38 void fcntl_init_locks(void)
39 {
40 struct file_lock *fl;
41
42 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
43 fl->fl_next = fl + 1;
44 fl->fl_owner = NULL;
45 }
46 file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
47 file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
48 file_lock_free_list = &file_lock_table[0];
49 }
50
51 int fcntl_getlk(unsigned int fd, struct flock *l)
52 {
53 struct flock flock;
54 struct file *filp;
55 struct file_lock *fl,file_lock;
56
57 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
58 return -EBADF;
59 verify_area(l, sizeof(*l));
60 memcpy_fromfs(&flock, l, sizeof(flock));
61 if (flock.l_type == F_UNLCK)
62 return -EINVAL;
63 if (!copy_flock(filp, &file_lock, &flock))
64 return -EINVAL;
65
66 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
67 if (conflict(&file_lock, fl)) {
68 flock.l_pid = fl->fl_owner->pid;
69 flock.l_start = fl->fl_start;
70 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
71 fl->fl_end - fl->fl_start + 1;
72 flock.l_whence = fl->fl_whence;
73 flock.l_type = fl->fl_type;
74 memcpy_tofs(l, &flock, sizeof(flock));
75 return 0;
76 }
77 }
78
79 flock.l_type = F_UNLCK;
80 memcpy_tofs(l, &flock, sizeof(flock));
81 return 0;
82 }
83
84
85
86
87
88 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
89 {
90 struct file *filp;
91 struct file_lock *fl,file_lock;
92 struct flock flock;
93
94
95
96
97
98 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
99 return -EBADF;
100 verify_area(l, sizeof(*l));
101 memcpy_fromfs(&flock, l, sizeof(flock));
102 if (!copy_flock(filp, &file_lock, &flock))
103 return -EINVAL;
104 switch (file_lock.fl_type) {
105 case F_RDLCK :
106 if (!(filp->f_mode & 1))
107 return -EBADF;
108 break;
109 case F_WRLCK :
110 if (!(filp->f_mode & 2))
111 return -EBADF;
112 break;
113 case F_SHLCK :
114 if (!(filp->f_mode & 3))
115 return -EBADF;
116 file_lock.fl_type = F_RDLCK;
117 break;
118 case F_EXLCK :
119 if (!(filp->f_mode & 3))
120 return -EBADF;
121 file_lock.fl_type = F_WRLCK;
122 break;
123 case F_UNLCK :
124 break;
125 }
126
127
128
129
130
131 if (file_lock.fl_type == F_UNLCK)
132 return unlock_it(filp, &file_lock);
133
134
135
136
137
138 repeat:
139 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
140 if (!conflict(&file_lock, fl))
141 continue;
142
143
144
145
146
147 if (cmd == F_SETLKW) {
148 interruptible_sleep_on(&fl->fl_wait);
149 goto repeat;
150 }
151 return -EAGAIN;
152 }
153
154
155
156
157
158 return lock_it(filp, &file_lock);
159 }
160
161
162
163
164
165 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
166 {
167 struct file_lock *fl,*next;
168
169 for (fl = filp->f_inode->i_flock; fl != NULL; ) {
170
171
172
173
174 next = fl->fl_next;
175 if (fl->fl_owner == task)
176 free_lock(filp, fl);
177 fl = next;
178 }
179 }
180
181
182
183
184
185
186 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
187 {
188 off_t start;
189
190 if (!filp->f_inode)
191 return 0;
192 if (!S_ISREG(filp->f_inode->i_mode))
193 return 0;
194 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
195 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
196 return 0;
197 switch (l->l_whence) {
198 case 0 : start = 0; break;
199 case 1 : start = filp->f_pos; break;
200 case 2 : start = filp->f_inode->i_size; break;
201 default : return 0;
202 }
203 if ((start += l->l_start) < 0 || l->l_len < 0)
204 return 0;
205 fl->fl_type = l->l_type;
206 fl->fl_start = start;
207 fl->fl_whence = 0;
208 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
209 fl->fl_end = OFFSET_MAX;
210 fl->fl_owner = current;
211 fl->fl_wait = NULL;
212 return 1;
213 }
214
215
216
217
218
219 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
220 {
221 if (caller_fl->fl_owner == sys_fl->fl_owner)
222 return 0;
223 if (!overlap(caller_fl, sys_fl))
224 return 0;
225 switch (caller_fl->fl_type) {
226 case F_RDLCK :
227 return sys_fl->fl_type != F_RDLCK;
228 case F_WRLCK :
229 return 1;
230 }
231 return 0;
232 }
233
234 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
235 {
236 if (fl1->fl_start <= fl2->fl_start) {
237 return fl1->fl_end >= fl2->fl_start;
238 } else {
239 return fl2->fl_end >= fl1->fl_start;
240 }
241 }
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257 static int lock_it(struct file *filp, struct file_lock *caller)
258 {
259 struct file_lock *fl,*new;
260
261
262
263
264
265
266
267
268
269 if ((caller = alloc_lock(filp, caller)) == NULL)
270 return -ENOLCK;
271
272
273
274
275
276 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
277 if (caller->fl_owner != fl->fl_owner)
278 continue;
279 if (caller == fl)
280 continue;
281 if (!overlap(caller, fl)) {
282
283
284
285 if (caller->fl_type != fl->fl_type)
286 continue;
287 if (caller->fl_end + 1 == fl->fl_start) {
288 fl->fl_start = caller->fl_start;
289 free_lock(filp, caller);
290 caller = fl;
291
292 } else if (caller->fl_start - 1 == fl->fl_end) {
293 fl->fl_end = caller->fl_end;
294 free_lock(filp, caller);
295 caller = fl;
296
297 }
298 continue;
299 }
300
301
302
303
304 if (caller->fl_type != fl->fl_type) {
305 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
306
307
308
309
310
311 if ((new = alloc_lock(filp, fl)) == NULL) {
312 free_lock(filp, caller);
313 return -ENOLCK;
314 }
315 fl->fl_end = caller->fl_start - 1;
316 new->fl_start = caller->fl_end + 1;
317 return 0;
318 }
319 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
320
321
322
323 free_lock(filp, fl);
324 return 0;
325 }
326 if (caller->fl_end < fl->fl_end) {
327 fl->fl_start = caller->fl_end + 1;
328
329 } else if (caller->fl_start > fl->fl_start) {
330 fl->fl_end = caller->fl_start - 1;
331
332 } else {
333 printk("lock_it: program bug: unanticipated overlap\n");
334 free_lock(filp, caller);
335 return -ENOLCK;
336 }
337 } else {
338 int grew = 0;
339
340 if (caller->fl_start < fl->fl_start) {
341 fl->fl_start = caller->fl_start;
342 grew = 1;
343 }
344 if (caller->fl_end > fl->fl_end) {
345 fl->fl_end = caller->fl_end;
346 grew = 1;
347 }
348 free_lock(filp, caller);
349 caller = fl;
350 if (!grew)
351 return 0;
352
353 }
354 }
355
356
357
358
359
360
361 return 0;
362 }
363
364
365
366
367
368
369
370 static int unlock_it(struct file *filp, struct file_lock *caller)
371 {
372 int one_unlocked = 0;
373 struct file_lock *fl,*next;
374
375 for (fl = filp->f_inode->i_flock; fl != NULL; ) {
376 if (caller->fl_owner != fl->fl_owner || !overlap(caller, fl)) {
377 fl = fl->fl_next;
378 continue;
379 }
380 one_unlocked = 1;
381 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
382
383
384
385
386 if ((next = alloc_lock(filp, fl)) == NULL)
387 return -ENOLCK;
388 fl->fl_end = caller->fl_start - 1;
389 next->fl_start = caller->fl_end + 1;
390 return 0;
391 }
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409 next = fl->fl_next;
410 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
411 free_lock(filp, fl);
412 } else if (caller->fl_start > fl->fl_start) {
413 fl->fl_end = caller->fl_start - 1;
414 } else {
415
416 fl->fl_start = caller->fl_end + 1;
417 }
418 if (caller->fl_start >= fl->fl_start && caller->fl_end <= fl->fl_end)
419 return 0;
420 fl = next;
421
422 }
423
424 return one_unlocked ? 0 : -EINVAL;
425 }
426
427 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template)
428 {
429 struct file_lock *new;
430
431 if (file_lock_free_list == NULL)
432 return NULL;
433 if (file_lock_free_list->fl_owner != NULL)
434 panic("alloc_lock: broken free list\n");
435
436 new = file_lock_free_list;
437 file_lock_free_list = file_lock_free_list->fl_next;
438
439 *new = *template;
440
441 new->fl_next = filp->f_inode->i_flock;
442 filp->f_inode->i_flock = new;
443
444 new->fl_owner = current;
445 new->fl_wait = NULL;
446 return new;
447 }
448
449
450
451
452
453
454
455 static void free_lock(struct file *filp, struct file_lock *fl)
456 {
457 struct file_lock **fl_p;
458
459 if (fl->fl_owner == NULL)
460 panic("free_lock: broken lock list\n");
461
462
463
464
465
466
467 for (fl_p = &filp->f_inode->i_flock; *fl_p != NULL; fl_p = &(*fl_p)->fl_next) {
468 if (*fl_p == fl)
469 break;
470 }
471 if (*fl_p == NULL) {
472 printk("free_lock: lock is not in file's lock list\n");
473 } else {
474 *fl_p = (*fl_p)->fl_next;
475 }
476
477 fl->fl_next = file_lock_free_list;
478 file_lock_free_list = fl;
479 fl->fl_owner = NULL;
480
481 wake_up(&fl->fl_wait);
482 }