This source file includes following definitions.
- fcntl_getlk
- fcntl_setlk
- locks_deadlocked
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #define DEADLOCK_DETECTION
34
35 #include <asm/segment.h>
36
37 #include <linux/malloc.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
40 #include <linux/errno.h>
41 #include <linux/stat.h>
42 #include <linux/fcntl.h>
43
44 #define OFFSET_MAX ((off_t)0x7fffffff)
45
46 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
47 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
48 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
49 static int lock_it(struct file *filp, struct file_lock *caller);
50 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl);
51 static void free_lock(struct file_lock **fl);
52 #ifdef DEADLOCK_DETECTION
53 int locks_deadlocked(int my_pid,int blocked_pid);
54 #endif
55
56 static struct file_lock *file_lock_table = NULL;
57
58 int fcntl_getlk(unsigned int fd, struct flock *l)
59 {
60 int error;
61 struct flock flock;
62 struct file *filp;
63 struct file_lock *fl,file_lock;
64
65 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
66 return -EBADF;
67 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
68 if (error)
69 return error;
70 memcpy_fromfs(&flock, l, sizeof(flock));
71 if (flock.l_type == F_UNLCK)
72 return -EINVAL;
73 if (!copy_flock(filp, &file_lock, &flock))
74 return -EINVAL;
75
76 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
77 if (conflict(&file_lock, fl)) {
78 flock.l_pid = fl->fl_owner->pid;
79 flock.l_start = fl->fl_start;
80 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
81 fl->fl_end - fl->fl_start + 1;
82 flock.l_whence = fl->fl_whence;
83 flock.l_type = fl->fl_type;
84 memcpy_tofs(l, &flock, sizeof(flock));
85 return 0;
86 }
87 }
88
89 flock.l_type = F_UNLCK;
90 memcpy_tofs(l, &flock, sizeof(flock));
91 return 0;
92 }
93
94
95
96
97
98 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
99 {
100 int error;
101 struct file *filp;
102 struct file_lock *fl,file_lock;
103 struct flock flock;
104
105
106
107
108
109 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
110 return -EBADF;
111 error = verify_area(VERIFY_READ, l, sizeof(*l));
112 if (error)
113 return error;
114 memcpy_fromfs(&flock, l, sizeof(flock));
115 if (!copy_flock(filp, &file_lock, &flock))
116 return -EINVAL;
117 switch (file_lock.fl_type) {
118 case F_RDLCK :
119 if (!(filp->f_mode & 1))
120 return -EBADF;
121 break;
122 case F_WRLCK :
123 if (!(filp->f_mode & 2))
124 return -EBADF;
125 break;
126 case F_SHLCK :
127 if (!(filp->f_mode & 3))
128 return -EBADF;
129 file_lock.fl_type = F_RDLCK;
130 break;
131 case F_EXLCK :
132 if (!(filp->f_mode & 3))
133 return -EBADF;
134 file_lock.fl_type = F_WRLCK;
135 break;
136 case F_UNLCK :
137 break;
138 }
139
140
141
142
143
144 if (file_lock.fl_type != F_UNLCK) {
145 repeat:
146 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
147 if (!conflict(&file_lock, fl))
148 continue;
149
150
151
152
153 if (cmd == F_SETLKW) {
154 if (current->signal & ~current->blocked)
155 return -ERESTARTSYS;
156 #ifdef DEADLOCK_DETECTION
157 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid))
158 return -EDEADLOCK;
159 #endif
160 interruptible_sleep_on(&fl->fl_wait);
161 if (current->signal & ~current->blocked)
162 return -ERESTARTSYS;
163 goto repeat;
164 }
165 return -EAGAIN;
166 }
167 }
168
169
170
171
172
173 return lock_it(filp, &file_lock);
174 }
175
176 #ifdef DEADLOCK_DETECTION
177
178
179
180
181
182
183 int locks_deadlocked(int my_pid,int blocked_pid)
184 {
185 int ret_val;
186 struct wait_queue *dlock_wait;
187 struct file_lock *fl;
188 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
189 if (fl->fl_owner == NULL) continue;
190 if (fl->fl_owner->pid != my_pid) continue;
191 if (fl->fl_wait == NULL) continue;
192 dlock_wait = fl->fl_wait;
193 do {
194 if (dlock_wait->task != NULL) {
195 if (dlock_wait->task->pid == blocked_pid)
196 return -EDEADLOCK;
197 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
198 if (ret_val)
199 return -EDEADLOCK;
200 }
201 dlock_wait = dlock_wait->next;
202 } while (dlock_wait != fl->fl_wait);
203 }
204 return 0;
205 }
206 #endif
207
208
209
210
211
212 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
213 {
214 struct file_lock *fl;
215 struct file_lock **before;
216
217
218
219 before = &filp->f_inode->i_flock;
220 while ((fl = *before) && task != fl->fl_owner)
221 before = &fl->fl_next;
222
223
224
225 while ((fl = *before) && task == fl->fl_owner)
226 free_lock(before);
227 }
228
229
230
231
232
233
234 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
235 {
236 off_t start;
237
238 if (!filp->f_inode)
239 return 0;
240 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
241 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
242 return 0;
243 switch (l->l_whence) {
244 case 0 : start = 0; break;
245 case 1 : start = filp->f_pos; break;
246 case 2 : start = filp->f_inode->i_size; break;
247 default : return 0;
248 }
249 if ((start += l->l_start) < 0 || l->l_len < 0)
250 return 0;
251 fl->fl_type = l->l_type;
252 fl->fl_start = start;
253 fl->fl_whence = 0;
254 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
255 fl->fl_end = OFFSET_MAX;
256 fl->fl_owner = current;
257 fl->fl_wait = NULL;
258 return 1;
259 }
260
261
262
263
264
265 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
266 {
267 if (caller_fl->fl_owner == sys_fl->fl_owner)
268 return 0;
269 if (!overlap(caller_fl, sys_fl))
270 return 0;
271 switch (caller_fl->fl_type) {
272 case F_RDLCK :
273 return sys_fl->fl_type != F_RDLCK;
274 case F_WRLCK :
275 return 1;
276 }
277 return 0;
278 }
279
280 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
281 {
282 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305 static int lock_it(struct file *filp, struct file_lock *caller)
306 {
307 struct file_lock *fl;
308 struct file_lock *left = 0;
309 struct file_lock *right = 0;
310 struct file_lock **before;
311 int added = 0;
312
313
314
315
316
317 before = &filp->f_inode->i_flock;
318 while ((fl = *before) && caller->fl_owner != fl->fl_owner)
319 before = &fl->fl_next;
320
321
322
323
324
325 while ((fl = *before) && caller->fl_owner == fl->fl_owner) {
326
327
328
329 if (caller->fl_type == fl->fl_type) {
330 if (fl->fl_end < caller->fl_start - 1)
331 goto next_lock;
332
333
334
335
336 if (fl->fl_start > caller->fl_end + 1)
337 break;
338
339
340
341
342
343
344
345 if (fl->fl_start > caller->fl_start)
346 fl->fl_start = caller->fl_start;
347 else
348 caller->fl_start = fl->fl_start;
349 if (fl->fl_end < caller->fl_end)
350 fl->fl_end = caller->fl_end;
351 else
352 caller->fl_end = fl->fl_end;
353 if (added) {
354 free_lock(before);
355 continue;
356 }
357 caller = fl;
358 added = 1;
359 goto next_lock;
360 }
361
362
363
364 if (fl->fl_end < caller->fl_start)
365 goto next_lock;
366 if (fl->fl_start > caller->fl_end)
367 break;
368 if (caller->fl_type == F_UNLCK)
369 added = 1;
370 if (fl->fl_start < caller->fl_start)
371 left = fl;
372
373
374
375
376 if (fl->fl_end > caller->fl_end) {
377 right = fl;
378 break;
379 }
380 if (fl->fl_start >= caller->fl_start) {
381
382
383
384
385 if (added) {
386 free_lock(before);
387 continue;
388 }
389
390
391
392
393
394 wake_up(&fl->fl_wait);
395 fl->fl_start = caller->fl_start;
396 fl->fl_end = caller->fl_end;
397 fl->fl_type = caller->fl_type;
398 caller = fl;
399 added = 1;
400 }
401
402
403
404 next_lock:
405 before = &(*before)->fl_next;
406 }
407
408 if (! added) {
409 if (caller->fl_type == F_UNLCK) {
410
411
412
413
414
415
416
417
418
419 #if 0
420 return -EINVAL;
421 #else
422 return 0;
423 #endif
424 }
425 if (! (caller = alloc_lock(before, caller)))
426 return -ENOLCK;
427 }
428 if (right) {
429 if (left == right) {
430
431
432
433
434
435 if (! (left = alloc_lock(before, right))) {
436 if (! added)
437 free_lock(before);
438 return -ENOLCK;
439 }
440 }
441 right->fl_start = caller->fl_end + 1;
442 }
443 if (left)
444 left->fl_end = caller->fl_start - 1;
445 return 0;
446 }
447
448
449
450
451 static struct file_lock *alloc_lock(struct file_lock **pos,
452 struct file_lock *fl)
453 {
454 struct file_lock *tmp;
455
456
457 tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock), GFP_KERNEL);
458 if (!tmp)
459 return tmp;
460 tmp->fl_nextlink = file_lock_table;
461 tmp->fl_prevlink = NULL;
462 if (file_lock_table != NULL)
463 file_lock_table->fl_prevlink = tmp;
464 file_lock_table = tmp;
465
466 tmp->fl_next = *pos;
467 *pos = tmp;
468
469 tmp->fl_owner = current;
470 tmp->fl_wait = NULL;
471
472 tmp->fl_type = fl->fl_type;
473 tmp->fl_whence = fl->fl_whence;
474 tmp->fl_start = fl->fl_start;
475 tmp->fl_end = fl->fl_end;
476
477 return tmp;
478 }
479
480
481
482
483
484 static void free_lock(struct file_lock **fl_p)
485 {
486 struct file_lock *fl;
487
488 fl = *fl_p;
489 *fl_p = (*fl_p)->fl_next;
490
491 if (fl->fl_nextlink != NULL)
492 fl->fl_nextlink->fl_prevlink = fl->fl_prevlink;
493
494 if (fl->fl_prevlink != NULL)
495 fl->fl_prevlink->fl_nextlink = fl->fl_nextlink;
496 else
497 file_lock_table = fl->fl_nextlink;
498
499 wake_up(&fl->fl_wait);
500
501 kfree(fl);
502
503 return;
504 }