This source file includes following definitions.
- fcntl_init_locks
- fcntl_getlk
- fcntl_setlk
- locks_deadlocked
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #define DEADLOCK_DETECTION
16
17 #include <asm/segment.h>
18
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/stat.h>
23 #include <linux/fcntl.h>
24
25 #define OFFSET_MAX ((off_t)0x7fffffff)
26
27 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
28 unsigned int fd);
29 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
30 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
31 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd);
32 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl,
33 unsigned int fd);
34 static void free_lock(struct file_lock **fl);
35 #ifdef DEADLOCK_DETECTION
36 int locks_deadlocked(int my_pid,int blocked_pid);
37 #endif
38
39 static struct file_lock file_lock_table[NR_FILE_LOCKS];
40 static struct file_lock *file_lock_free_list;
41
42
43
44
45
46 void fcntl_init_locks(void)
47 {
48 struct file_lock *fl;
49
50 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
51 fl->fl_next = fl + 1;
52 fl->fl_owner = NULL;
53 }
54 file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
55 file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
56 file_lock_free_list = &file_lock_table[0];
57 }
58
59 int fcntl_getlk(unsigned int fd, struct flock *l)
60 {
61 int error;
62 struct flock flock;
63 struct file *filp;
64 struct file_lock *fl,file_lock;
65
66 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
67 return -EBADF;
68 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
69 if (error)
70 return error;
71 memcpy_fromfs(&flock, l, sizeof(flock));
72 if (flock.l_type == F_UNLCK)
73 return -EINVAL;
74 if (!copy_flock(filp, &file_lock, &flock, fd))
75 return -EINVAL;
76
77 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
78 if (conflict(&file_lock, fl)) {
79 flock.l_pid = fl->fl_owner->pid;
80 flock.l_start = fl->fl_start;
81 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
82 fl->fl_end - fl->fl_start + 1;
83 flock.l_whence = fl->fl_whence;
84 flock.l_type = fl->fl_type;
85 memcpy_tofs(l, &flock, sizeof(flock));
86 return 0;
87 }
88 }
89
90 flock.l_type = F_UNLCK;
91 memcpy_tofs(l, &flock, sizeof(flock));
92 return 0;
93 }
94
95
96
97
98
99 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
100 {
101 int error;
102 struct file *filp;
103 struct file_lock *fl,file_lock;
104 struct flock flock;
105
106
107
108
109
110 if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
111 return -EBADF;
112 error = verify_area(VERIFY_READ, l, sizeof(*l));
113 if (error)
114 return error;
115 memcpy_fromfs(&flock, l, sizeof(flock));
116 if (!copy_flock(filp, &file_lock, &flock, fd))
117 return -EINVAL;
118 switch (file_lock.fl_type) {
119 case F_RDLCK :
120 if (!(filp->f_mode & 1))
121 return -EBADF;
122 break;
123 case F_WRLCK :
124 if (!(filp->f_mode & 2))
125 return -EBADF;
126 break;
127 case F_SHLCK :
128 if (!(filp->f_mode & 3))
129 return -EBADF;
130 file_lock.fl_type = F_RDLCK;
131 break;
132 case F_EXLCK :
133 if (!(filp->f_mode & 3))
134 return -EBADF;
135 file_lock.fl_type = F_WRLCK;
136 break;
137 case F_UNLCK :
138 break;
139 }
140
141
142
143
144
145 if (file_lock.fl_type != F_UNLCK) {
146 repeat:
147 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
148 if (!conflict(&file_lock, fl))
149 continue;
150
151
152
153
154 if (cmd == F_SETLKW) {
155 if (current->signal & ~current->blocked)
156 return -ERESTARTSYS;
157 #ifdef DEADLOCK_DETECTION
158 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid)) return -EDEADLOCK;
159 #endif
160 interruptible_sleep_on(&fl->fl_wait);
161 if (current->signal & ~current->blocked)
162 return -ERESTARTSYS;
163 goto repeat;
164 }
165 return -EAGAIN;
166 }
167 }
168
169
170
171
172
173 return lock_it(filp, &file_lock, fd);
174 }
175
176 #ifdef DEADLOCK_DETECTION
177
178
179
180
181
182
183 int locks_deadlocked(int my_pid,int blocked_pid)
184 {
185 int ret_val;
186 struct wait_queue *dlock_wait;
187 struct file_lock *fl;
188 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
189 if (fl->fl_owner == NULL) continue;
190 if (fl->fl_owner->pid != my_pid) continue;
191 if (fl->fl_wait == NULL) continue;
192 dlock_wait = fl->fl_wait;
193 do {
194 if (dlock_wait->task != NULL) {
195 if (dlock_wait->task->pid == blocked_pid) return -EDEADLOCK;
196 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
197 if (ret_val) return -EDEADLOCK;
198 }
199 dlock_wait = dlock_wait->next;
200 } while (dlock_wait != fl->fl_wait);
201 }
202 return 0;
203 }
204 #endif
205
206
207
208
209
210 void fcntl_remove_locks(struct task_struct *task, struct file *filp,
211 unsigned int fd)
212 {
213 struct file_lock *fl;
214 struct file_lock **before;
215
216
217
218 before = &filp->f_inode->i_flock;
219 while ((fl = *before) && (task != fl->fl_owner || fd != fl->fl_fd))
220 before = &fl->fl_next;
221
222
223
224 while ((fl = *before) && task == fl->fl_owner && fd == fl->fl_fd)
225 free_lock(before);
226 }
227
228
229
230
231
232
233 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
234 unsigned int fd)
235 {
236 off_t start;
237
238 if (!filp->f_inode)
239 return 0;
240 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
241 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
242 return 0;
243 switch (l->l_whence) {
244 case 0 : start = 0; break;
245 case 1 : start = filp->f_pos; break;
246 case 2 : start = filp->f_inode->i_size; break;
247 default : return 0;
248 }
249 if ((start += l->l_start) < 0 || l->l_len < 0)
250 return 0;
251 fl->fl_type = l->l_type;
252 fl->fl_start = start;
253 fl->fl_whence = 0;
254 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
255 fl->fl_end = OFFSET_MAX;
256 fl->fl_owner = current;
257 fl->fl_fd = fd;
258 fl->fl_wait = NULL;
259 return 1;
260 }
261
262
263
264
265
266 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
267 {
268 if ( caller_fl->fl_owner == sys_fl->fl_owner
269 && caller_fl->fl_fd == sys_fl->fl_fd)
270 return 0;
271 if (!overlap(caller_fl, sys_fl))
272 return 0;
273 switch (caller_fl->fl_type) {
274 case F_RDLCK :
275 return sys_fl->fl_type != F_RDLCK;
276 case F_WRLCK :
277 return 1;
278 }
279 return 0;
280 }
281
282 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
283 {
284 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
285 }
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd)
308 {
309 struct file_lock *fl;
310 struct file_lock *left = 0;
311 struct file_lock *right = 0;
312 struct file_lock **before;
313 int added = 0;
314
315
316
317
318
319 before = &filp->f_inode->i_flock;
320 while ((fl = *before) &&
321 (caller->fl_owner != fl->fl_owner ||
322 caller->fl_fd != fl->fl_fd))
323 before = &fl->fl_next;
324
325
326
327
328
329 while ( (fl = *before)
330 && caller->fl_owner == fl->fl_owner
331 && caller->fl_fd == fl->fl_fd) {
332
333
334
335 if (caller->fl_type == fl->fl_type) {
336 if (fl->fl_end < caller->fl_start - 1)
337 goto next_lock;
338
339
340
341
342 if (fl->fl_start > caller->fl_end + 1)
343 break;
344
345
346
347
348
349
350
351 if (fl->fl_start > caller->fl_start)
352 fl->fl_start = caller->fl_start;
353 else
354 caller->fl_start = fl->fl_start;
355 if (fl->fl_end < caller->fl_end)
356 fl->fl_end = caller->fl_end;
357 else
358 caller->fl_end = fl->fl_end;
359 if (added) {
360 free_lock(before);
361 continue;
362 }
363 caller = fl;
364 added = 1;
365 goto next_lock;
366 }
367
368
369
370 if (fl->fl_end < caller->fl_start)
371 goto next_lock;
372 if (fl->fl_start > caller->fl_end)
373 break;
374 if (caller->fl_type == F_UNLCK)
375 added = 1;
376 if (fl->fl_start < caller->fl_start)
377 left = fl;
378
379
380
381
382 if (fl->fl_end > caller->fl_end) {
383 right = fl;
384 break;
385 }
386 if (fl->fl_start >= caller->fl_start) {
387
388
389
390
391 if (added) {
392 free_lock(before);
393 continue;
394 }
395
396
397
398
399
400 wake_up(&fl->fl_wait);
401 fl->fl_start = caller->fl_start;
402 fl->fl_end = caller->fl_end;
403 fl->fl_type = caller->fl_type;
404 caller = fl;
405 added = 1;
406 }
407
408
409
410 next_lock:
411 before = &(*before)->fl_next;
412 }
413
414 if (! added) {
415 if (caller->fl_type == F_UNLCK) {
416
417
418
419
420
421
422
423
424
425 #if 0
426 return -EINVAL;
427 #else
428 return 0;
429 #endif
430 }
431 if (! (caller = alloc_lock(before, caller, fd)))
432 return -ENOLCK;
433 }
434 if (right) {
435 if (left == right) {
436
437
438
439
440
441 if (! (left = alloc_lock(before, right, fd))) {
442 if (! added)
443 free_lock(before);
444 return -ENOLCK;
445 }
446 }
447 right->fl_start = caller->fl_end + 1;
448 }
449 if (left)
450 left->fl_end = caller->fl_start - 1;
451 return 0;
452 }
453
454
455
456
457
458 static struct file_lock *alloc_lock(struct file_lock **pos,
459 struct file_lock *fl,
460 unsigned int fd)
461 {
462 struct file_lock *tmp;
463
464 tmp = file_lock_free_list;
465 if (tmp == NULL)
466 return NULL;
467 if (tmp->fl_owner != NULL)
468 panic("alloc_lock: broken free list\n");
469
470
471 file_lock_free_list = tmp->fl_next;
472
473 *tmp = *fl;
474
475 tmp->fl_next = *pos;
476 *pos = tmp;
477
478 tmp->fl_owner = current;
479 tmp->fl_fd = fd;
480 tmp->fl_wait = NULL;
481 return tmp;
482 }
483
484
485
486
487
488 static void free_lock(struct file_lock **fl_p)
489 {
490 struct file_lock *fl;
491
492 fl = *fl_p;
493 if (fl->fl_owner == NULL)
494 panic("free_lock: broken lock list\n");
495
496 *fl_p = (*fl_p)->fl_next;
497
498 fl->fl_next = file_lock_free_list;
499 file_lock_free_list = fl;
500 fl->fl_owner = NULL;
501
502 wake_up(&fl->fl_wait);
503 }