This source file includes following definitions.
- locks_free_lock
- locks_insert_block
- locks_delete_block
- sys_flock
- fcntl_getlk
- fcntl_setlk
- locks_remove_locks
- posix_make_lock
- flock_make_lock
- posix_locks_conflict
- flock_locks_conflict
- locks_conflict
- locks_overlap
- posix_locks_deadlock
- flock_lock_file
- posix_lock_file
- locks_alloc_lock
- locks_insert_lock
- locks_delete_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 #include <asm/segment.h>
68
69 #include <linux/malloc.h>
70 #include <linux/sched.h>
71 #include <linux/kernel.h>
72 #include <linux/errno.h>
73 #include <linux/stat.h>
74 #include <linux/fcntl.h>
75
76
77 #define OFFSET_MAX ((off_t)0x7fffffff)
78
79 static int flock_make_lock(struct file *filp, struct file_lock *fl,
80 unsigned int cmd);
81 static int posix_make_lock(struct file *filp, struct file_lock *fl,
82 struct flock *l);
83 static int flock_locks_conflict(struct file_lock *caller_fl,
84 struct file_lock *sys_fl);
85 static int posix_locks_conflict(struct file_lock *caller_fl,
86 struct file_lock *sys_fl);
87 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
88 static int flock_lock_file(struct file *filp, struct file_lock *caller,
89 unsigned int wait);
90 static int posix_lock_file(struct file *filp, struct file_lock *caller,
91 unsigned int wait);
92 static int posix_locks_deadlock(struct task_struct *my_task,
93 struct task_struct *blocked_task);
94 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
95
96 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
97 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
98 static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
99
100 static struct file_lock *file_lock_table = NULL;
101
102
103 static inline void locks_free_lock(struct file_lock **fl)
104 {
105 kfree(*fl);
106 *fl = NULL;
107 }
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 static inline void locks_insert_block(struct file_lock **block,
124 struct file_lock *fl)
125 {
126 struct file_lock *bfl;
127
128 while ((bfl = *block) != NULL) {
129 block = &bfl->fl_block;
130 }
131
132 *block = fl;
133 fl->fl_block = NULL;
134
135 return;
136 }
137
138 static inline void locks_delete_block(struct file_lock **block,
139 struct file_lock *fl)
140 {
141 struct file_lock *bfl;
142
143 while ((bfl = *block) != NULL) {
144 if (bfl == fl) {
145 *block = fl->fl_block;
146 fl->fl_block = NULL;
147 return;
148 }
149 block = &bfl->fl_block;
150 }
151 }
152
153
154
155
156 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
157 {
158 struct file_lock file_lock;
159 struct file *filp;
160
161 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
162 return (-EBADF);
163
164 if (!flock_make_lock(filp, &file_lock, cmd))
165 return (-EINVAL);
166
167 if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
168 return (-EBADF);
169
170 return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
171 }
172
173
174
175
176 int fcntl_getlk(unsigned int fd, struct flock *l)
177 {
178 int error;
179 struct flock flock;
180 struct file *filp;
181 struct file_lock *fl,file_lock;
182
183 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
184 return (-EBADF);
185 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
186 if (error)
187 return (error);
188
189 memcpy_fromfs(&flock, l, sizeof(flock));
190 if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) ||
191 (flock.l_type == F_SHLCK))
192 return (-EINVAL);
193
194 if (!posix_make_lock(filp, &file_lock, &flock))
195 return (-EINVAL);
196
197 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
198 if (posix_locks_conflict(&file_lock, fl)) {
199 flock.l_pid = fl->fl_owner->pid;
200 flock.l_start = fl->fl_start;
201 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
202 fl->fl_end - fl->fl_start + 1;
203 flock.l_whence = 0;
204 flock.l_type = fl->fl_type;
205 memcpy_tofs(l, &flock, sizeof(flock));
206 return (0);
207 }
208 }
209
210 flock.l_type = F_UNLCK;
211 memcpy_tofs(l, &flock, sizeof(flock));
212 return (0);
213 }
214
215
216
217
218
219
220 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
221 {
222 int error;
223 struct file *filp;
224 struct file_lock file_lock;
225 struct flock flock;
226
227
228
229
230
231 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
232 return (-EBADF);
233
234 error = verify_area(VERIFY_READ, l, sizeof(*l));
235 if (error)
236 return (error);
237
238 memcpy_fromfs(&flock, l, sizeof(flock));
239 if (!posix_make_lock(filp, &file_lock, &flock))
240 return (-EINVAL);
241
242 switch (flock.l_type) {
243 case F_RDLCK :
244 if (!(filp->f_mode & 1))
245 return -EBADF;
246 break;
247 case F_WRLCK :
248 if (!(filp->f_mode & 2))
249 return -EBADF;
250 break;
251 case F_SHLCK :
252 case F_EXLCK :
253 if (!(filp->f_mode & 3))
254 return -EBADF;
255 break;
256 case F_UNLCK :
257 break;
258 }
259
260 return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));
261 }
262
263
264
265 void locks_remove_locks(struct task_struct *task, struct file *filp)
266 {
267 struct file_lock *fl;
268 struct file_lock **before;
269
270
271
272
273
274 before = &filp->f_inode->i_flock;
275 while ((fl = *before) != NULL) {
276 if (((fl->fl_flags == F_POSIX) && (fl->fl_owner == task)) ||
277 ((fl->fl_flags == F_FLOCK) && (fl->fl_file == filp) &&
278 (filp->f_count == 1)))
279 locks_delete_lock(before, 0);
280 else
281 before = &fl->fl_next;
282 }
283
284 return;
285 }
286
287
288
289
290 static int posix_make_lock(struct file *filp, struct file_lock *fl,
291 struct flock *l)
292 {
293 off_t start;
294
295 if (!filp->f_inode)
296 return (0);
297
298 switch (l->l_type) {
299 case F_RDLCK :
300 case F_WRLCK :
301 case F_UNLCK :
302 fl->fl_type = l->l_type;
303 break;
304 case F_SHLCK :
305 fl->fl_type = F_RDLCK;
306 break;
307 case F_EXLCK :
308 fl->fl_type = F_WRLCK;
309 break;
310 default :
311 return (0);
312 }
313
314 switch (l->l_whence) {
315 case 0 :
316 start = 0;
317 break;
318 case 1 :
319 start = filp->f_pos;
320 break;
321 case 2 :
322 start = filp->f_inode->i_size;
323 break;
324 default :
325 return (0);
326 }
327
328 if (((start += l->l_start) < 0) || (l->l_len < 0))
329 return (0);
330 fl->fl_start = start;
331 if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
332 fl->fl_end = OFFSET_MAX;
333
334 fl->fl_flags = F_POSIX;
335 fl->fl_file = filp;
336 fl->fl_owner = current;
337 fl->fl_wait = NULL;
338
339 return (1);
340 }
341
342
343
344
345 static int flock_make_lock(struct file *filp, struct file_lock *fl,
346 unsigned int cmd)
347 {
348 if (!filp->f_inode)
349 return (0);
350
351 switch (cmd & ~LOCK_NB) {
352 case LOCK_SH :
353 fl->fl_type = F_RDLCK;
354 break;
355 case LOCK_EX :
356 fl->fl_type = F_WRLCK;
357 break;
358 case LOCK_UN :
359 fl->fl_type = F_UNLCK;
360 break;
361 default :
362 return (0);
363 }
364
365 fl->fl_flags = F_FLOCK;
366 fl->fl_start = 0;
367 fl->fl_end = OFFSET_MAX;
368 fl->fl_file = filp;
369 fl->fl_owner = current;
370 fl->fl_wait = NULL;
371
372 return (1);
373 }
374
375
376
377
378 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
379 {
380
381
382
383 if ((sys_fl->fl_flags == F_POSIX) &&
384 (caller_fl->fl_owner == sys_fl->fl_owner))
385 return (0);
386
387 return (locks_conflict(caller_fl, sys_fl));
388 }
389
390
391
392
393 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
394 {
395
396
397
398 if ((sys_fl->fl_flags == F_FLOCK) &&
399 (caller_fl->fl_file == sys_fl->fl_file))
400 return (0);
401
402 return (locks_conflict(caller_fl, sys_fl));
403 }
404
405
406
407
408 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
409 {
410 if (!locks_overlap(caller_fl, sys_fl))
411 return (0);
412
413 switch (caller_fl->fl_type) {
414 case F_RDLCK :
415 return (sys_fl->fl_type == F_WRLCK);
416
417 case F_WRLCK :
418 return (1);
419
420 default:
421 printk("locks_conflict(): impossible lock type - %d\n",
422 caller_fl->fl_type);
423 break;
424 }
425 return (0);
426 }
427
428
429
430 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
431 {
432 return ((fl1->fl_end >= fl2->fl_start) &&
433 (fl2->fl_end >= fl1->fl_start));
434 }
435
436
437
438
439
440
441
442
443
444
445
446 static int posix_locks_deadlock(struct task_struct *my_task,
447 struct task_struct *blocked_task)
448 {
449 struct wait_queue *dlock_wait;
450 struct file_lock *fl;
451
452 next_task:
453 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
454 if (fl->fl_owner == NULL || fl->fl_wait == NULL)
455 continue;
456 dlock_wait = fl->fl_wait;
457 do {
458 if (dlock_wait->task == blocked_task) {
459 if (fl->fl_owner == my_task) {
460 return(-EDEADLOCK);
461 }
462 blocked_task = fl->fl_owner;
463 goto next_task;
464 }
465 dlock_wait = dlock_wait->next;
466 } while (dlock_wait != fl->fl_wait);
467 }
468 return (0);
469 }
470
471
472
473
474
475 static int flock_lock_file(struct file *filp, struct file_lock *caller,
476 unsigned int wait)
477 {
478 struct file_lock *fl;
479 struct file_lock *new_fl;
480 struct file_lock **before;
481 int change = 0;
482
483
484
485
486 before = &filp->f_inode->i_flock;
487 while ((fl = *before) && (fl->fl_flags == F_FLOCK)) {
488 if (caller->fl_file == fl->fl_file) {
489 if (caller->fl_type == fl->fl_type)
490 return (0);
491 change = 1;
492 break;
493 }
494 before = &fl->fl_next;
495 }
496
497
498
499 if (change)
500 locks_delete_lock(before, caller->fl_type != F_UNLCK);
501 if (caller->fl_type == F_UNLCK)
502 return (0);
503 if ((new_fl = locks_alloc_lock(caller)) == NULL)
504 return (-ENOLCK);
505 repeat:
506 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
507 if (!flock_locks_conflict(new_fl, fl))
508 continue;
509
510 if (wait) {
511 if (current->signal & ~current->blocked) {
512
513
514
515
516
517 locks_free_lock(&new_fl);
518 return (-ERESTARTSYS);
519 }
520 locks_insert_block(&fl->fl_block, new_fl);
521 interruptible_sleep_on(&new_fl->fl_wait);
522 wake_up(&new_fl->fl_wait);
523 if (current->signal & ~current->blocked) {
524
525
526
527
528
529
530
531 locks_delete_block(&fl->fl_block, new_fl);
532 locks_free_lock(&new_fl);
533 return (-ERESTARTSYS);
534 }
535 goto repeat;
536 }
537
538 locks_free_lock(&new_fl);
539 return (-EAGAIN);
540 }
541 locks_insert_lock(&filp->f_inode->i_flock, new_fl);
542 return (0);
543 }
544
545
546
547
548
549
550
551
552
553
554
555
556
557 static int posix_lock_file(struct file *filp, struct file_lock *caller,
558 unsigned int wait)
559 {
560 struct file_lock *fl;
561 struct file_lock *new_fl;
562 struct file_lock *left = NULL;
563 struct file_lock *right = NULL;
564 struct file_lock **before;
565 int added = 0;
566
567 if (caller->fl_type != F_UNLCK) {
568 repeat:
569 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
570 if (!posix_locks_conflict(caller, fl))
571 continue;
572 if (wait) {
573 if (current->signal & ~current->blocked)
574 return (-ERESTARTSYS);
575 if (fl->fl_flags == F_POSIX)
576 if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
577 return (-EDEADLOCK);
578 interruptible_sleep_on(&fl->fl_wait);
579 if (current->signal & ~current->blocked)
580 return (-ERESTARTSYS);
581 goto repeat;
582 }
583 return (-EAGAIN);
584 }
585 }
586
587
588
589
590 before = &filp->f_inode->i_flock;
591
592
593
594 while ((fl = *before) && ((fl->fl_flags == F_FLOCK) ||
595 (caller->fl_owner != fl->fl_owner))) {
596 before = &fl->fl_next;
597 }
598
599
600
601
602 while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
603
604
605 if (caller->fl_type == fl->fl_type) {
606 if (fl->fl_end < caller->fl_start - 1)
607 goto next_lock;
608
609
610
611 if (fl->fl_start > caller->fl_end + 1)
612 break;
613
614
615
616
617
618
619 if (fl->fl_start > caller->fl_start)
620 fl->fl_start = caller->fl_start;
621 else
622 caller->fl_start = fl->fl_start;
623 if (fl->fl_end < caller->fl_end)
624 fl->fl_end = caller->fl_end;
625 else
626 caller->fl_end = fl->fl_end;
627 if (added) {
628 locks_delete_lock(before, 0);
629 continue;
630 }
631 caller = fl;
632 added = 1;
633 }
634 else {
635
636
637
638 if (fl->fl_end < caller->fl_start)
639 goto next_lock;
640 if (fl->fl_start > caller->fl_end)
641 break;
642 if (caller->fl_type == F_UNLCK)
643 added = 1;
644 if (fl->fl_start < caller->fl_start)
645 left = fl;
646
647
648
649 if (fl->fl_end > caller->fl_end) {
650 right = fl;
651 break;
652 }
653 if (fl->fl_start >= caller->fl_start) {
654
655
656
657 if (added) {
658 locks_delete_lock(before, 0);
659 continue;
660 }
661
662
663
664
665
666 wake_up(&fl->fl_wait);
667 fl->fl_start = caller->fl_start;
668 fl->fl_end = caller->fl_end;
669 fl->fl_type = caller->fl_type;
670 caller = fl;
671 added = 1;
672 }
673 }
674
675
676 next_lock:
677 before = &(*before)->fl_next;
678 }
679
680 if (!added) {
681 if (caller->fl_type == F_UNLCK)
682 return (0);
683 if ((new_fl = locks_alloc_lock(caller)) == NULL)
684 return (-ENOLCK);
685 locks_insert_lock(before, new_fl);
686
687 }
688 if (right) {
689 if (left == right) {
690
691
692
693
694 if ((left = locks_alloc_lock(right)) == NULL) {
695 if (!added)
696 locks_delete_lock(before, 0);
697 return (-ENOLCK);
698 }
699 locks_insert_lock(before, left);
700 }
701 right->fl_start = caller->fl_end + 1;
702 }
703 if (left)
704 left->fl_end = caller->fl_start - 1;
705 return (0);
706 }
707
708
709
710
711
712
713 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
714 {
715 struct file_lock *tmp;
716
717
718 if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
719 GFP_ATOMIC)) == NULL)
720 return (tmp);
721
722 tmp->fl_nextlink = NULL;
723 tmp->fl_prevlink = NULL;
724 tmp->fl_next = NULL;
725 tmp->fl_block = NULL;
726 tmp->fl_flags = fl->fl_flags;
727 tmp->fl_owner = fl->fl_owner;
728 tmp->fl_file = fl->fl_file;
729 tmp->fl_wait = NULL;
730 tmp->fl_type = fl->fl_type;
731 tmp->fl_start = fl->fl_start;
732 tmp->fl_end = fl->fl_end;
733
734 return (tmp);
735 }
736
737
738
739
740
741 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
742 {
743 fl->fl_nextlink = file_lock_table;
744 fl->fl_prevlink = NULL;
745 if (file_lock_table != NULL)
746 file_lock_table->fl_prevlink = fl;
747 file_lock_table = fl;
748 fl->fl_next = *pos;
749 *pos = fl;
750
751 return;
752 }
753
754
755
756
757
758
759
760
761
762
763 static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
764 {
765 struct file_lock *fl;
766 struct file_lock *bfl;
767
768 fl = *fl_p;
769 *fl_p = (*fl_p)->fl_next;
770
771 if (fl->fl_nextlink != NULL)
772 fl->fl_nextlink->fl_prevlink = fl->fl_prevlink;
773
774 if (fl->fl_prevlink != NULL)
775 fl->fl_prevlink->fl_nextlink = fl->fl_nextlink;
776 else {
777 file_lock_table = fl->fl_nextlink;
778 }
779
780 while ((bfl = fl->fl_block) != NULL) {
781 fl->fl_block = bfl->fl_block;
782 bfl->fl_block = NULL;
783 wake_up(&bfl->fl_wait);
784 if (wait)
785 sleep_on(&bfl->fl_wait);
786 }
787
788 wake_up(&fl->fl_wait);
789 kfree(fl);
790
791 return;
792 }