This source file includes following definitions.
- fcntl_getlk
- fcntl_setlk
- locks_deadlocked
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
- free_list_garbage_collect
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #define DEADLOCK_DETECTION
30
31 #include <asm/segment.h>
32
33 #include <linux/malloc.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/stat.h>
38 #include <linux/fcntl.h>
39
40 #define OFFSET_MAX ((off_t)0x7fffffff)
41
42 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
43 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
44 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
45 static int lock_it(struct file *filp, struct file_lock *caller);
46 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl);
47 static void free_lock(struct file_lock **fl);
48 static void free_list_garbage_collect(void);
49 #ifdef DEADLOCK_DETECTION
50 int locks_deadlocked(int my_pid,int blocked_pid);
51 #endif
52
53 #define FREE_LIST_GARBAGE_COLLECT 20
54
55 static struct file_lock *file_lock_table = NULL;
56 static struct file_lock *file_lock_free_list = NULL;
57 static int free_list_cnt = 0;
58
59 int fcntl_getlk(unsigned int fd, struct flock *l)
60 {
61 int error;
62 struct flock flock;
63 struct file *filp;
64 struct file_lock *fl,file_lock;
65
66 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
67 return -EBADF;
68 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
69 if (error)
70 return error;
71 memcpy_fromfs(&flock, l, sizeof(flock));
72 if (flock.l_type == F_UNLCK)
73 return -EINVAL;
74 if (!copy_flock(filp, &file_lock, &flock))
75 return -EINVAL;
76
77 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
78 if (conflict(&file_lock, fl)) {
79 flock.l_pid = fl->fl_owner->pid;
80 flock.l_start = fl->fl_start;
81 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
82 fl->fl_end - fl->fl_start + 1;
83 flock.l_whence = fl->fl_whence;
84 flock.l_type = fl->fl_type;
85 memcpy_tofs(l, &flock, sizeof(flock));
86 return 0;
87 }
88 }
89
90 flock.l_type = F_UNLCK;
91 memcpy_tofs(l, &flock, sizeof(flock));
92 return 0;
93 }
94
95
96
97
98
99 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
100 {
101 int error;
102 struct file *filp;
103 struct file_lock *fl,file_lock;
104 struct flock flock;
105
106
107
108
109
110 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
111 return -EBADF;
112 error = verify_area(VERIFY_READ, l, sizeof(*l));
113 if (error)
114 return error;
115 memcpy_fromfs(&flock, l, sizeof(flock));
116 if (!copy_flock(filp, &file_lock, &flock))
117 return -EINVAL;
118 switch (file_lock.fl_type) {
119 case F_RDLCK :
120 if (!(filp->f_mode & 1))
121 return -EBADF;
122 break;
123 case F_WRLCK :
124 if (!(filp->f_mode & 2))
125 return -EBADF;
126 break;
127 case F_SHLCK :
128 if (!(filp->f_mode & 3))
129 return -EBADF;
130 file_lock.fl_type = F_RDLCK;
131 break;
132 case F_EXLCK :
133 if (!(filp->f_mode & 3))
134 return -EBADF;
135 file_lock.fl_type = F_WRLCK;
136 break;
137 case F_UNLCK :
138 break;
139 }
140
141
142
143
144
145 if (file_lock.fl_type != F_UNLCK) {
146 repeat:
147 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
148 if (!conflict(&file_lock, fl))
149 continue;
150
151
152
153
154 if (cmd == F_SETLKW) {
155 if (current->signal & ~current->blocked)
156 return -ERESTARTSYS;
157 #ifdef DEADLOCK_DETECTION
158 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid)) return -EDEADLOCK;
159 #endif
160 interruptible_sleep_on(&fl->fl_wait);
161 if (current->signal & ~current->blocked)
162 return -ERESTARTSYS;
163 goto repeat;
164 }
165 return -EAGAIN;
166 }
167 }
168
169
170
171
172
173 return lock_it(filp, &file_lock);
174 }
175
176 #ifdef DEADLOCK_DETECTION
177
178
179
180
181
182
183 int locks_deadlocked(int my_pid,int blocked_pid)
184 {
185 int ret_val;
186 struct wait_queue *dlock_wait;
187 struct file_lock *fl;
188 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
189 if (fl->fl_owner == NULL) continue;
190 if (fl->fl_owner->pid != my_pid) continue;
191 if (fl->fl_wait == NULL) continue;
192 dlock_wait = fl->fl_wait;
193 do {
194 if (dlock_wait->task != NULL) {
195 if (dlock_wait->task->pid == blocked_pid) return -EDEADLOCK;
196 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
197 if (ret_val) return -EDEADLOCK;
198 }
199 dlock_wait = dlock_wait->next;
200 } while (dlock_wait != fl->fl_wait);
201 }
202 return 0;
203 }
204 #endif
205
206
207
208
209
210 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
211 {
212 struct file_lock *fl;
213 struct file_lock **before;
214
215
216
217 before = &filp->f_inode->i_flock;
218 while ((fl = *before) && task != fl->fl_owner)
219 before = &fl->fl_next;
220
221
222
223 while ((fl = *before) && task == fl->fl_owner)
224 free_lock(before);
225 }
226
227
228
229
230
231
232 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
233 {
234 off_t start;
235
236 if (!filp->f_inode)
237 return 0;
238 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
239 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
240 return 0;
241 switch (l->l_whence) {
242 case 0 : start = 0; break;
243 case 1 : start = filp->f_pos; break;
244 case 2 : start = filp->f_inode->i_size; break;
245 default : return 0;
246 }
247 if ((start += l->l_start) < 0 || l->l_len < 0)
248 return 0;
249 fl->fl_type = l->l_type;
250 fl->fl_start = start;
251 fl->fl_whence = 0;
252 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
253 fl->fl_end = OFFSET_MAX;
254 fl->fl_owner = current;
255 fl->fl_wait = NULL;
256 return 1;
257 }
258
259
260
261
262
263 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
264 {
265 if (caller_fl->fl_owner == sys_fl->fl_owner)
266 return 0;
267 if (!overlap(caller_fl, sys_fl))
268 return 0;
269 switch (caller_fl->fl_type) {
270 case F_RDLCK :
271 return sys_fl->fl_type != F_RDLCK;
272 case F_WRLCK :
273 return 1;
274 }
275 return 0;
276 }
277
278 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
279 {
280 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 static int lock_it(struct file *filp, struct file_lock *caller)
304 {
305 struct file_lock *fl;
306 struct file_lock *left = 0;
307 struct file_lock *right = 0;
308 struct file_lock **before;
309 int added = 0;
310
311
312
313
314
315 before = &filp->f_inode->i_flock;
316 while ((fl = *before) && caller->fl_owner != fl->fl_owner)
317 before = &fl->fl_next;
318
319
320
321
322
323 while ((fl = *before) && caller->fl_owner == fl->fl_owner) {
324
325
326
327 if (caller->fl_type == fl->fl_type) {
328 if (fl->fl_end < caller->fl_start - 1)
329 goto next_lock;
330
331
332
333
334 if (fl->fl_start > caller->fl_end + 1)
335 break;
336
337
338
339
340
341
342
343 if (fl->fl_start > caller->fl_start)
344 fl->fl_start = caller->fl_start;
345 else
346 caller->fl_start = fl->fl_start;
347 if (fl->fl_end < caller->fl_end)
348 fl->fl_end = caller->fl_end;
349 else
350 caller->fl_end = fl->fl_end;
351 if (added) {
352 free_lock(before);
353 continue;
354 }
355 caller = fl;
356 added = 1;
357 goto next_lock;
358 }
359
360
361
362 if (fl->fl_end < caller->fl_start)
363 goto next_lock;
364 if (fl->fl_start > caller->fl_end)
365 break;
366 if (caller->fl_type == F_UNLCK)
367 added = 1;
368 if (fl->fl_start < caller->fl_start)
369 left = fl;
370
371
372
373
374 if (fl->fl_end > caller->fl_end) {
375 right = fl;
376 break;
377 }
378 if (fl->fl_start >= caller->fl_start) {
379
380
381
382
383 if (added) {
384 free_lock(before);
385 continue;
386 }
387
388
389
390
391
392 wake_up(&fl->fl_wait);
393 fl->fl_start = caller->fl_start;
394 fl->fl_end = caller->fl_end;
395 fl->fl_type = caller->fl_type;
396 caller = fl;
397 added = 1;
398 }
399
400
401
402 next_lock:
403 before = &(*before)->fl_next;
404 }
405
406 if (! added) {
407 if (caller->fl_type == F_UNLCK) {
408
409
410
411
412
413
414
415
416
417 #if 0
418 return -EINVAL;
419 #else
420 return 0;
421 #endif
422 }
423 if (! (caller = alloc_lock(before, caller)))
424 return -ENOLCK;
425 }
426 if (right) {
427 if (left == right) {
428
429
430
431
432
433 if (! (left = alloc_lock(before, right))) {
434 if (! added)
435 free_lock(before);
436 return -ENOLCK;
437 }
438 }
439 right->fl_start = caller->fl_end + 1;
440 }
441 if (left)
442 left->fl_end = caller->fl_start - 1;
443 return 0;
444 }
445
446
447
448
449
450
451
452 static struct file_lock *alloc_lock(struct file_lock **pos,
453 struct file_lock *fl)
454 {
455 struct file_lock *tmp;
456
457 tmp = file_lock_free_list;
458
459 if (tmp == NULL)
460 {
461
462 tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock), GFP_KERNEL);
463 tmp -> fl_owner = NULL;
464 tmp -> fl_next = file_lock_free_list;
465 tmp -> fl_nextlink = file_lock_table;
466 file_lock_table = tmp;
467 }
468 else
469 {
470
471 file_lock_free_list = tmp->fl_next;
472 free_list_cnt--;
473 }
474
475 if (tmp->fl_owner != NULL)
476 panic("alloc_lock: broken free list\n");
477
478 tmp->fl_next = *pos;
479 *pos = tmp;
480
481 tmp->fl_owner = current;
482 tmp->fl_wait = NULL;
483
484 tmp->fl_type = fl->fl_type;
485 tmp->fl_whence = fl->fl_whence;
486 tmp->fl_start = fl->fl_start;
487 tmp->fl_end = fl->fl_end;
488
489 return tmp;
490 }
491
492
493
494
495
496 static void free_lock(struct file_lock **fl_p)
497 {
498 struct file_lock *fl;
499
500 fl = *fl_p;
501 if (fl->fl_owner == NULL)
502 panic("free_lock: broken lock list\n");
503
504 *fl_p = (*fl_p)->fl_next;
505
506 fl->fl_next = file_lock_free_list;
507 file_lock_free_list = fl;
508 fl->fl_owner = NULL;
509
510 free_list_cnt++;
511 if (free_list_cnt == FREE_LIST_GARBAGE_COLLECT)
512 free_list_garbage_collect();
513
514 wake_up(&fl->fl_wait);
515 }
516
517 static void free_list_garbage_collect(void)
518 {
519
520 return;
521 }