This source file includes following definitions.
- fcntl_getlk
- fcntl_setlk
- locks_deadlocked
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #define DEADLOCK_DETECTION
21
22 #include <asm/segment.h>
23
24 #include <linux/malloc.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/stat.h>
29 #include <linux/fcntl.h>
30
31 #define OFFSET_MAX ((off_t)0x7fffffff)
32
33 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
34 unsigned int fd);
35 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
36 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
37 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd);
38 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl,
39 unsigned int fd);
40 static void free_lock(struct file_lock **fl);
41 #ifdef DEADLOCK_DETECTION
42 int locks_deadlocked(int my_pid,int blocked_pid);
43 #endif
44
45 static struct file_lock *file_lock_table = NULL;
46 static struct file_lock *file_lock_free_list = NULL;
47
48 int fcntl_getlk(unsigned int fd, struct flock *l)
49 {
50 int error;
51 struct flock flock;
52 struct file *filp;
53 struct file_lock *fl,file_lock;
54
55 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
56 return -EBADF;
57 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
58 if (error)
59 return error;
60 memcpy_fromfs(&flock, l, sizeof(flock));
61 if (flock.l_type == F_UNLCK)
62 return -EINVAL;
63 if (!copy_flock(filp, &file_lock, &flock, fd))
64 return -EINVAL;
65
66 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
67 if (conflict(&file_lock, fl)) {
68 flock.l_pid = fl->fl_owner->pid;
69 flock.l_start = fl->fl_start;
70 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
71 fl->fl_end - fl->fl_start + 1;
72 flock.l_whence = fl->fl_whence;
73 flock.l_type = fl->fl_type;
74 memcpy_tofs(l, &flock, sizeof(flock));
75 return 0;
76 }
77 }
78
79 flock.l_type = F_UNLCK;
80 memcpy_tofs(l, &flock, sizeof(flock));
81 return 0;
82 }
83
84
85
86
87
88 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
89 {
90 int error;
91 struct file *filp;
92 struct file_lock *fl,file_lock;
93 struct flock flock;
94
95
96
97
98
99 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
100 return -EBADF;
101 error = verify_area(VERIFY_READ, l, sizeof(*l));
102 if (error)
103 return error;
104 memcpy_fromfs(&flock, l, sizeof(flock));
105 if (!copy_flock(filp, &file_lock, &flock, fd))
106 return -EINVAL;
107 switch (file_lock.fl_type) {
108 case F_RDLCK :
109 if (!(filp->f_mode & 1))
110 return -EBADF;
111 break;
112 case F_WRLCK :
113 if (!(filp->f_mode & 2))
114 return -EBADF;
115 break;
116 case F_SHLCK :
117 if (!(filp->f_mode & 3))
118 return -EBADF;
119 file_lock.fl_type = F_RDLCK;
120 break;
121 case F_EXLCK :
122 if (!(filp->f_mode & 3))
123 return -EBADF;
124 file_lock.fl_type = F_WRLCK;
125 break;
126 case F_UNLCK :
127 break;
128 }
129
130
131
132
133
134 if (file_lock.fl_type != F_UNLCK) {
135 repeat:
136 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
137 if (!conflict(&file_lock, fl))
138 continue;
139
140
141
142
143 if (cmd == F_SETLKW) {
144 if (current->signal & ~current->blocked)
145 return -ERESTARTSYS;
146 #ifdef DEADLOCK_DETECTION
147 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid)) return -EDEADLOCK;
148 #endif
149 interruptible_sleep_on(&fl->fl_wait);
150 if (current->signal & ~current->blocked)
151 return -ERESTARTSYS;
152 goto repeat;
153 }
154 return -EAGAIN;
155 }
156 }
157
158
159
160
161
162 return lock_it(filp, &file_lock, fd);
163 }
164
165 #ifdef DEADLOCK_DETECTION
166
167
168
169
170
171
172 int locks_deadlocked(int my_pid,int blocked_pid)
173 {
174 int ret_val;
175 struct wait_queue *dlock_wait;
176 struct file_lock *fl;
177 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
178 if (fl->fl_owner == NULL) continue;
179 if (fl->fl_owner->pid != my_pid) continue;
180 if (fl->fl_wait == NULL) continue;
181 dlock_wait = fl->fl_wait;
182 do {
183 if (dlock_wait->task != NULL) {
184 if (dlock_wait->task->pid == blocked_pid) return -EDEADLOCK;
185 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
186 if (ret_val) return -EDEADLOCK;
187 }
188 dlock_wait = dlock_wait->next;
189 } while (dlock_wait != fl->fl_wait);
190 }
191 return 0;
192 }
193 #endif
194
195
196
197
198
199 void fcntl_remove_locks(struct task_struct *task, struct file *filp,
200 unsigned int fd)
201 {
202 struct file_lock *fl;
203 struct file_lock **before;
204
205
206
207 before = &filp->f_inode->i_flock;
208 while ((fl = *before) && (task != fl->fl_owner || fd != fl->fl_fd))
209 before = &fl->fl_next;
210
211
212
213 while ((fl = *before) && task == fl->fl_owner && fd == fl->fl_fd)
214 free_lock(before);
215 }
216
217
218
219
220
221
222 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
223 unsigned int fd)
224 {
225 off_t start;
226
227 if (!filp->f_inode)
228 return 0;
229 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
230 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
231 return 0;
232 switch (l->l_whence) {
233 case 0 : start = 0; break;
234 case 1 : start = filp->f_pos; break;
235 case 2 : start = filp->f_inode->i_size; break;
236 default : return 0;
237 }
238 if ((start += l->l_start) < 0 || l->l_len < 0)
239 return 0;
240 fl->fl_type = l->l_type;
241 fl->fl_start = start;
242 fl->fl_whence = 0;
243 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
244 fl->fl_end = OFFSET_MAX;
245 fl->fl_owner = current;
246 fl->fl_fd = fd;
247 fl->fl_wait = NULL;
248 return 1;
249 }
250
251
252
253
254
255 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
256 {
257 if ( caller_fl->fl_owner == sys_fl->fl_owner
258 && caller_fl->fl_fd == sys_fl->fl_fd)
259 return 0;
260 if (!overlap(caller_fl, sys_fl))
261 return 0;
262 switch (caller_fl->fl_type) {
263 case F_RDLCK :
264 return sys_fl->fl_type != F_RDLCK;
265 case F_WRLCK :
266 return 1;
267 }
268 return 0;
269 }
270
271 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
272 {
273 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
274 }
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd)
297 {
298 struct file_lock *fl;
299 struct file_lock *left = 0;
300 struct file_lock *right = 0;
301 struct file_lock **before;
302 int added = 0;
303
304
305
306
307
308 before = &filp->f_inode->i_flock;
309 while ((fl = *before) &&
310 (caller->fl_owner != fl->fl_owner ||
311 caller->fl_fd != fl->fl_fd))
312 before = &fl->fl_next;
313
314
315
316
317
318 while ( (fl = *before)
319 && caller->fl_owner == fl->fl_owner
320 && caller->fl_fd == fl->fl_fd) {
321
322
323
324 if (caller->fl_type == fl->fl_type) {
325 if (fl->fl_end < caller->fl_start - 1)
326 goto next_lock;
327
328
329
330
331 if (fl->fl_start > caller->fl_end + 1)
332 break;
333
334
335
336
337
338
339
340 if (fl->fl_start > caller->fl_start)
341 fl->fl_start = caller->fl_start;
342 else
343 caller->fl_start = fl->fl_start;
344 if (fl->fl_end < caller->fl_end)
345 fl->fl_end = caller->fl_end;
346 else
347 caller->fl_end = fl->fl_end;
348 if (added) {
349 free_lock(before);
350 continue;
351 }
352 caller = fl;
353 added = 1;
354 goto next_lock;
355 }
356
357
358
359 if (fl->fl_end < caller->fl_start)
360 goto next_lock;
361 if (fl->fl_start > caller->fl_end)
362 break;
363 if (caller->fl_type == F_UNLCK)
364 added = 1;
365 if (fl->fl_start < caller->fl_start)
366 left = fl;
367
368
369
370
371 if (fl->fl_end > caller->fl_end) {
372 right = fl;
373 break;
374 }
375 if (fl->fl_start >= caller->fl_start) {
376
377
378
379
380 if (added) {
381 free_lock(before);
382 continue;
383 }
384
385
386
387
388
389 wake_up(&fl->fl_wait);
390 fl->fl_start = caller->fl_start;
391 fl->fl_end = caller->fl_end;
392 fl->fl_type = caller->fl_type;
393 caller = fl;
394 added = 1;
395 }
396
397
398
399 next_lock:
400 before = &(*before)->fl_next;
401 }
402
403 if (! added) {
404 if (caller->fl_type == F_UNLCK) {
405
406
407
408
409
410
411
412
413
414 #if 0
415 return -EINVAL;
416 #else
417 return 0;
418 #endif
419 }
420 if (! (caller = alloc_lock(before, caller, fd)))
421 return -ENOLCK;
422 }
423 if (right) {
424 if (left == right) {
425
426
427
428
429
430 if (! (left = alloc_lock(before, right, fd))) {
431 if (! added)
432 free_lock(before);
433 return -ENOLCK;
434 }
435 }
436 right->fl_start = caller->fl_end + 1;
437 }
438 if (left)
439 left->fl_end = caller->fl_start - 1;
440 return 0;
441 }
442
443
444
445
446
447
448
449
450 static struct file_lock *alloc_lock(struct file_lock **pos,
451 struct file_lock *fl,
452 unsigned int fd)
453 {
454 struct file_lock *tmp;
455
456 tmp = file_lock_free_list;
457
458 if (tmp == NULL)
459 {
460
461 tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock), GFP_KERNEL);
462 tmp -> fl_owner = NULL;
463 tmp -> fl_next = file_lock_free_list;
464 tmp -> fl_nextlink = file_lock_table;
465 file_lock_table = tmp;
466 }
467 else
468 {
469
470 file_lock_free_list = tmp->fl_next;
471 }
472
473 if (tmp->fl_owner != NULL)
474 panic("alloc_lock: broken free list\n");
475
476 tmp->fl_next = *pos;
477 *pos = tmp;
478
479 tmp->fl_owner = current;
480 tmp->fl_fd = fd;
481 tmp->fl_wait = NULL;
482
483 tmp->fl_type = fl->fl_type;
484 tmp->fl_whence = fl->fl_whence;
485 tmp->fl_start = fl->fl_start;
486 tmp->fl_end = fl->fl_end;
487
488 return tmp;
489 }
490
491
492
493
494
495 static void free_lock(struct file_lock **fl_p)
496 {
497 struct file_lock *fl;
498
499 fl = *fl_p;
500 if (fl->fl_owner == NULL)
501 panic("free_lock: broken lock list\n");
502
503 *fl_p = (*fl_p)->fl_next;
504
505 fl->fl_next = file_lock_free_list;
506 file_lock_free_list = fl;
507 fl->fl_owner = NULL;
508
509 wake_up(&fl->fl_wait);
510 }