This source file includes following definitions.
- locks_free_lock
- locks_insert_block
- locks_delete_block
- sys_flock
- fcntl_getlk
- fcntl_setlk
- locks_remove_locks
- locks_verify_locked
- locks_mandatory_locked
- locks_verify_area
- locks_mandatory_area
- posix_make_lock
- flock_make_lock
- posix_locks_conflict
- flock_locks_conflict
- locks_conflict
- locks_overlap
- posix_locks_deadlock
- flock_lock_file
- posix_lock_file
- locks_alloc_lock
- locks_insert_lock
- locks_delete_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 #include <linux/malloc.h>
77 #include <linux/sched.h>
78 #include <linux/kernel.h>
79 #include <linux/errno.h>
80 #include <linux/stat.h>
81 #include <linux/fcntl.h>
82
83 #include <asm/segment.h>
84
85 #define OFFSET_MAX ((off_t)0x7fffffff)
86
87 static int flock_make_lock(struct file *filp, struct file_lock *fl,
88 unsigned int cmd);
89 static int posix_make_lock(struct file *filp, struct file_lock *fl,
90 struct flock *l);
91 static int flock_locks_conflict(struct file_lock *caller_fl,
92 struct file_lock *sys_fl);
93 static int posix_locks_conflict(struct file_lock *caller_fl,
94 struct file_lock *sys_fl);
95 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
96 static int flock_lock_file(struct file *filp, struct file_lock *caller,
97 unsigned int wait);
98 static int posix_lock_file(struct file *filp, struct file_lock *caller,
99 unsigned int wait);
100 static int posix_locks_deadlock(struct task_struct *my_task,
101 struct task_struct *blocked_task);
102 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
103
104 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
105 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
106 static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
107
108 static struct file_lock *file_lock_table = NULL;
109
110
111 static inline void locks_free_lock(struct file_lock *fl)
112 {
113 kfree(fl);
114 return;
115 }
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 static inline void locks_insert_block(struct file_lock *bfl,
132 struct file_lock *fl)
133 {
134 while (bfl->fl_block != NULL) {
135 bfl = bfl->fl_block;
136 }
137
138 bfl->fl_block = fl;
139 fl->fl_block = NULL;
140
141 return;
142 }
143
144 static inline void locks_delete_block(struct file_lock *bfl,
145 struct file_lock *fl)
146 {
147 struct file_lock *tfl;
148
149 while ((tfl = bfl->fl_block) != NULL) {
150 if (tfl == fl) {
151 bfl->fl_block = fl->fl_block;
152 fl->fl_block = NULL;
153 return;
154 }
155 bfl = tfl;
156 }
157 return;
158 }
159
160
161
162
163 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
164 {
165 struct file_lock file_lock;
166 struct file *filp;
167
168 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
169 return (-EBADF);
170
171 if (!flock_make_lock(filp, &file_lock, cmd))
172 return (-EINVAL);
173
174 if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
175 return (-EBADF);
176
177 return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
178 }
179
180
181
182
183 int fcntl_getlk(unsigned int fd, struct flock *l)
184 {
185 int error;
186 struct flock flock;
187 struct file *filp;
188 struct file_lock *fl,file_lock;
189
190 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
191 return (-EBADF);
192 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
193 if (error)
194 return (error);
195
196 memcpy_fromfs(&flock, l, sizeof(flock));
197 if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) ||
198 (flock.l_type == F_SHLCK))
199 return (-EINVAL);
200
201 if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
202 return (-EINVAL);
203
204 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
205 if (posix_locks_conflict(&file_lock, fl)) {
206 flock.l_pid = fl->fl_owner->pid;
207 flock.l_start = fl->fl_start;
208 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
209 fl->fl_end - fl->fl_start + 1;
210 flock.l_whence = 0;
211 flock.l_type = fl->fl_type;
212 memcpy_tofs(l, &flock, sizeof(flock));
213 return (0);
214 }
215 }
216
217 flock.l_type = F_UNLCK;
218 memcpy_tofs(l, &flock, sizeof(flock));
219 return (0);
220 }
221
222
223
224
225
226
227 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
228 {
229 int error;
230 struct file *filp;
231 struct file_lock file_lock;
232 struct flock flock;
233 struct inode *inode;
234
235
236
237
238
239 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
240 return (-EBADF);
241
242 error = verify_area(VERIFY_READ, l, sizeof(*l));
243 if (error)
244 return (error);
245
246 if (!(inode = filp->f_inode))
247 return (-EINVAL);
248
249
250
251
252 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) {
253 struct vm_area_struct *vma = inode->i_mmap;
254 do {
255 if (vma->vm_flags & VM_MAYSHARE)
256 return (-EAGAIN);
257 vma = vma->vm_next_share;
258 } while (vma != inode->i_mmap);
259 }
260
261 memcpy_fromfs(&flock, l, sizeof(flock));
262 if (!posix_make_lock(filp, &file_lock, &flock))
263 return (-EINVAL);
264
265 switch (flock.l_type) {
266 case F_RDLCK :
267 if (!(filp->f_mode & 1))
268 return (-EBADF);
269 break;
270 case F_WRLCK :
271 if (!(filp->f_mode & 2))
272 return (-EBADF);
273 break;
274 case F_SHLCK :
275 case F_EXLCK :
276 #if 1
277
278 {
279 static int count = 0;
280 if (count < 5) {
281 count++;
282 printk(KERN_WARNING
283 "fcntl_setlk() called by process %d with broken flock() emulation\n",
284 current->pid);
285 }
286 }
287 #endif
288 if (!(filp->f_mode & 3))
289 return (-EBADF);
290 break;
291 case F_UNLCK :
292 break;
293 }
294
295 return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));
296 }
297
298
299
300 void locks_remove_locks(struct task_struct *task, struct file *filp)
301 {
302 struct file_lock *fl;
303 struct file_lock **before;
304
305
306
307
308
309 before = &filp->f_inode->i_flock;
310 while ((fl = *before) != NULL) {
311 if (((fl->fl_flags == F_POSIX) && (fl->fl_owner == task)) ||
312 ((fl->fl_flags == F_FLOCK) && (fl->fl_file == filp) &&
313 (filp->f_count == 1)))
314 locks_delete_lock(before, 0);
315 else
316 before = &fl->fl_next;
317 }
318
319 return;
320 }
321
322 int locks_verify_locked(struct inode *inode)
323 {
324
325
326
327 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
328 return (locks_mandatory_locked(inode));
329 return (0);
330 }
331
332 int locks_mandatory_locked(struct inode *inode)
333 {
334 struct file_lock *fl;
335
336
337
338 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
339 if (fl->fl_flags == F_POSIX && fl->fl_owner != current)
340 return (-EAGAIN);
341 }
342 return (0);
343 }
344
345 int locks_verify_area(int read_write, struct inode *inode, struct file *filp,
346 unsigned int offset, unsigned int count)
347 {
348
349
350
351 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
352 return (locks_mandatory_area(read_write, inode, filp, offset,
353 count));
354 return (0);
355 }
356
357 int locks_mandatory_area(int read_write, struct inode *inode,
358 struct file *filp, unsigned int offset,
359 unsigned int count)
360 {
361 struct file_lock *fl;
362
363 repeat:
364
365
366
367
368 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
369 if (fl->fl_flags == F_FLOCK ||
370 (fl->fl_flags == F_POSIX && fl->fl_owner == current))
371 continue;
372 if (fl->fl_end < offset ||
373 fl->fl_start >= offset + count)
374 continue;
375
376
377
378
379 if (read_write == FLOCK_VERIFY_WRITE ||
380 fl->fl_type == F_WRLCK) {
381 if (filp && (filp->f_flags & O_NONBLOCK))
382 return (-EAGAIN);
383 if (current->signal & ~current->blocked)
384 return (-ERESTARTSYS);
385 if (posix_locks_deadlock(current, fl->fl_owner))
386 return (-EDEADLOCK);
387 interruptible_sleep_on(&fl->fl_wait);
388 if (current->signal & ~current->blocked)
389 return (-ERESTARTSYS);
390
391
392
393
394 if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)
395 break;
396 goto repeat;
397 }
398 }
399 return (0);
400 }
401
402
403
404
405 static int posix_make_lock(struct file *filp, struct file_lock *fl,
406 struct flock *l)
407 {
408 off_t start;
409
410 switch (l->l_type) {
411 case F_RDLCK :
412 case F_WRLCK :
413 case F_UNLCK :
414 fl->fl_type = l->l_type;
415 break;
416 case F_SHLCK :
417 fl->fl_type = F_RDLCK;
418 break;
419 case F_EXLCK :
420 fl->fl_type = F_WRLCK;
421 break;
422 default :
423 return (0);
424 }
425
426 switch (l->l_whence) {
427 case 0 :
428 start = 0;
429 break;
430 case 1 :
431 start = filp->f_pos;
432 break;
433 case 2 :
434 start = filp->f_inode->i_size;
435 break;
436 default :
437 return (0);
438 }
439
440 if (((start += l->l_start) < 0) || (l->l_len < 0))
441 return (0);
442 fl->fl_start = start;
443 if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
444 fl->fl_end = OFFSET_MAX;
445
446 fl->fl_flags = F_POSIX;
447 fl->fl_file = filp;
448 fl->fl_owner = current;
449 fl->fl_wait = NULL;
450
451 return (1);
452 }
453
454
455
456
457 static int flock_make_lock(struct file *filp, struct file_lock *fl,
458 unsigned int cmd)
459 {
460 if (!filp->f_inode)
461 return (0);
462
463 switch (cmd & ~LOCK_NB) {
464 case LOCK_SH :
465 fl->fl_type = F_RDLCK;
466 break;
467 case LOCK_EX :
468 fl->fl_type = F_WRLCK;
469 break;
470 case LOCK_UN :
471 fl->fl_type = F_UNLCK;
472 break;
473 default :
474 return (0);
475 }
476
477 fl->fl_flags = F_FLOCK;
478 fl->fl_start = 0;
479 fl->fl_end = OFFSET_MAX;
480 fl->fl_file = filp;
481 fl->fl_owner = current;
482 fl->fl_wait = NULL;
483
484 return (1);
485 }
486
487
488
489
490 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
491 {
492
493
494
495 if ((sys_fl->fl_flags == F_POSIX) &&
496 (caller_fl->fl_owner == sys_fl->fl_owner))
497 return (0);
498
499 return (locks_conflict(caller_fl, sys_fl));
500 }
501
502
503
504
505 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
506 {
507
508
509
510 if ((sys_fl->fl_flags == F_FLOCK) &&
511 (caller_fl->fl_file == sys_fl->fl_file))
512 return (0);
513
514 return (locks_conflict(caller_fl, sys_fl));
515 }
516
517
518
519
520 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
521 {
522 if (!locks_overlap(caller_fl, sys_fl))
523 return (0);
524
525 switch (caller_fl->fl_type) {
526 case F_RDLCK :
527 return (sys_fl->fl_type == F_WRLCK);
528
529 case F_WRLCK :
530 return (1);
531
532 default:
533 printk("locks_conflict(): impossible lock type - %d\n",
534 caller_fl->fl_type);
535 break;
536 }
537 return (0);
538 }
539
540
541
542 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
543 {
544 return ((fl1->fl_end >= fl2->fl_start) &&
545 (fl2->fl_end >= fl1->fl_start));
546 }
547
548
549
550
551
552
553
554
555
556
557
558 static int posix_locks_deadlock(struct task_struct *my_task,
559 struct task_struct *blocked_task)
560 {
561 struct wait_queue *dlock_wait;
562 struct file_lock *fl;
563
564 next_task:
565 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
566 if (fl->fl_owner == NULL || fl->fl_wait == NULL)
567 continue;
568 dlock_wait = fl->fl_wait;
569 do {
570 if (dlock_wait->task == blocked_task) {
571 if (fl->fl_owner == my_task) {
572 return(-EDEADLOCK);
573 }
574 blocked_task = fl->fl_owner;
575 goto next_task;
576 }
577 dlock_wait = dlock_wait->next;
578 } while (dlock_wait != fl->fl_wait);
579 }
580 return (0);
581 }
582
583
584
585
586
587 static int flock_lock_file(struct file *filp, struct file_lock *caller,
588 unsigned int wait)
589 {
590 struct file_lock *fl;
591 struct file_lock *new_fl;
592 struct file_lock **before;
593 int change = 0;
594
595
596
597
598 before = &filp->f_inode->i_flock;
599 while ((fl = *before) && (fl->fl_flags == F_FLOCK)) {
600 if (caller->fl_file == fl->fl_file) {
601 if (caller->fl_type == fl->fl_type)
602 return (0);
603 change = 1;
604 break;
605 }
606 before = &fl->fl_next;
607 }
608
609
610
611 if (change)
612 locks_delete_lock(before, caller->fl_type != F_UNLCK);
613 if (caller->fl_type == F_UNLCK)
614 return (0);
615 if ((new_fl = locks_alloc_lock(caller)) == NULL)
616 return (-ENOLCK);
617 repeat:
618 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
619 if (!flock_locks_conflict(new_fl, fl))
620 continue;
621
622 if (wait) {
623 if (current->signal & ~current->blocked) {
624
625
626
627
628
629 locks_free_lock(new_fl);
630 return (-ERESTARTSYS);
631 }
632 locks_insert_block(fl, new_fl);
633 interruptible_sleep_on(&new_fl->fl_wait);
634 wake_up(&new_fl->fl_wait);
635 if (current->signal & ~current->blocked) {
636
637
638
639
640
641
642
643 locks_delete_block(fl, new_fl);
644 locks_free_lock(new_fl);
645 return (-ERESTARTSYS);
646 }
647 goto repeat;
648 }
649
650 locks_free_lock(new_fl);
651 return (-EAGAIN);
652 }
653 locks_insert_lock(&filp->f_inode->i_flock, new_fl);
654 return (0);
655 }
656
657
658
659
660
661
662
663
664
665
666
667
668
669 static int posix_lock_file(struct file *filp, struct file_lock *caller,
670 unsigned int wait)
671 {
672 struct file_lock *fl;
673 struct file_lock *new_fl;
674 struct file_lock *left = NULL;
675 struct file_lock *right = NULL;
676 struct file_lock **before;
677 int added = 0;
678
679 if (caller->fl_type != F_UNLCK) {
680 repeat:
681 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
682 if (!posix_locks_conflict(caller, fl))
683 continue;
684 if (wait) {
685 if (current->signal & ~current->blocked)
686 return (-ERESTARTSYS);
687 if (fl->fl_flags == F_POSIX)
688 if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
689 return (-EDEADLOCK);
690 interruptible_sleep_on(&fl->fl_wait);
691 if (current->signal & ~current->blocked)
692 return (-ERESTARTSYS);
693 goto repeat;
694 }
695 return (-EAGAIN);
696 }
697 }
698
699
700
701
702 before = &filp->f_inode->i_flock;
703
704
705
706 while ((fl = *before) && ((fl->fl_flags == F_FLOCK) ||
707 (caller->fl_owner != fl->fl_owner))) {
708 before = &fl->fl_next;
709 }
710
711
712
713
714 while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
715
716
717 if (caller->fl_type == fl->fl_type) {
718 if (fl->fl_end < caller->fl_start - 1)
719 goto next_lock;
720
721
722
723 if (fl->fl_start > caller->fl_end + 1)
724 break;
725
726
727
728
729
730
731 if (fl->fl_start > caller->fl_start)
732 fl->fl_start = caller->fl_start;
733 else
734 caller->fl_start = fl->fl_start;
735 if (fl->fl_end < caller->fl_end)
736 fl->fl_end = caller->fl_end;
737 else
738 caller->fl_end = fl->fl_end;
739 if (added) {
740 locks_delete_lock(before, 0);
741 continue;
742 }
743 caller = fl;
744 added = 1;
745 }
746 else {
747
748
749
750 if (fl->fl_end < caller->fl_start)
751 goto next_lock;
752 if (fl->fl_start > caller->fl_end)
753 break;
754 if (caller->fl_type == F_UNLCK)
755 added = 1;
756 if (fl->fl_start < caller->fl_start)
757 left = fl;
758
759
760
761 if (fl->fl_end > caller->fl_end) {
762 right = fl;
763 break;
764 }
765 if (fl->fl_start >= caller->fl_start) {
766
767
768
769 if (added) {
770 locks_delete_lock(before, 0);
771 continue;
772 }
773
774
775
776
777
778 wake_up(&fl->fl_wait);
779 fl->fl_start = caller->fl_start;
780 fl->fl_end = caller->fl_end;
781 fl->fl_type = caller->fl_type;
782 caller = fl;
783 added = 1;
784 }
785 }
786
787
788 next_lock:
789 before = &(*before)->fl_next;
790 }
791
792 if (!added) {
793 if (caller->fl_type == F_UNLCK)
794 return (0);
795 if ((new_fl = locks_alloc_lock(caller)) == NULL)
796 return (-ENOLCK);
797 locks_insert_lock(before, new_fl);
798
799 }
800 if (right) {
801 if (left == right) {
802
803
804
805
806 if ((left = locks_alloc_lock(right)) == NULL) {
807 if (!added)
808 locks_delete_lock(before, 0);
809 return (-ENOLCK);
810 }
811 locks_insert_lock(before, left);
812 }
813 right->fl_start = caller->fl_end + 1;
814 }
815 if (left)
816 left->fl_end = caller->fl_start - 1;
817 return (0);
818 }
819
820
821
822
823
824
825 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
826 {
827 struct file_lock *tmp;
828
829
830 if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
831 GFP_ATOMIC)) == NULL)
832 return (tmp);
833
834 tmp->fl_nextlink = NULL;
835 tmp->fl_prevlink = NULL;
836 tmp->fl_next = NULL;
837 tmp->fl_block = NULL;
838 tmp->fl_flags = fl->fl_flags;
839 tmp->fl_owner = fl->fl_owner;
840 tmp->fl_file = fl->fl_file;
841 tmp->fl_wait = NULL;
842 tmp->fl_type = fl->fl_type;
843 tmp->fl_start = fl->fl_start;
844 tmp->fl_end = fl->fl_end;
845
846 return (tmp);
847 }
848
849
850
851
852
853 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
854 {
855 fl->fl_nextlink = file_lock_table;
856 fl->fl_prevlink = NULL;
857 if (file_lock_table != NULL)
858 file_lock_table->fl_prevlink = fl;
859 file_lock_table = fl;
860 fl->fl_next = *pos;
861 *pos = fl;
862
863 return;
864 }
865
866
867
868
869
870
871
872
873
874
875 static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
876 {
877 struct file_lock *fl;
878 struct file_lock *pfl;
879 struct file_lock *nfl;
880
881 fl = *fl_p;
882 *fl_p = fl->fl_next;
883 pfl = fl->fl_prevlink;
884 nfl = fl->fl_nextlink;
885
886 if (nfl != NULL)
887 nfl->fl_prevlink = pfl;
888
889 if (pfl != NULL)
890 pfl->fl_nextlink = nfl;
891 else
892 file_lock_table = nfl;
893
894 while ((nfl = fl->fl_block) != NULL) {
895 fl->fl_block = nfl->fl_block;
896 nfl->fl_block = NULL;
897 wake_up(&nfl->fl_wait);
898 if (wait)
899 sleep_on(&nfl->fl_wait);
900 }
901
902 wake_up(&fl->fl_wait);
903 kfree(fl);
904
905 return;
906 }