This source file includes following definitions.
- locks_free_lock
- locks_insert_block
- locks_delete_block
- sys_flock
- fcntl_getlk
- fcntl_setlk
- locks_remove_locks
- locks_verify_locked
- locks_mandatory_locked
- locks_verify_area
- locks_mandatory_area
- posix_make_lock
- flock_make_lock
- posix_locks_conflict
- flock_locks_conflict
- locks_conflict
- locks_overlap
- posix_locks_deadlock
- flock_lock_file
- posix_lock_file
- locks_alloc_lock
- locks_insert_lock
- locks_delete_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73 #include <linux/malloc.h>
74 #include <linux/sched.h>
75 #include <linux/kernel.h>
76 #include <linux/errno.h>
77 #include <linux/stat.h>
78 #include <linux/fcntl.h>
79
80 #include <asm/segment.h>
81
82 #define OFFSET_MAX ((off_t)0x7fffffff)
83
84 static int flock_make_lock(struct file *filp, struct file_lock *fl,
85 unsigned int cmd);
86 static int posix_make_lock(struct file *filp, struct file_lock *fl,
87 struct flock *l);
88 static int flock_locks_conflict(struct file_lock *caller_fl,
89 struct file_lock *sys_fl);
90 static int posix_locks_conflict(struct file_lock *caller_fl,
91 struct file_lock *sys_fl);
92 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
93 static int flock_lock_file(struct file *filp, struct file_lock *caller,
94 unsigned int wait);
95 static int posix_lock_file(struct file *filp, struct file_lock *caller,
96 unsigned int wait);
97 static int posix_locks_deadlock(struct task_struct *my_task,
98 struct task_struct *blocked_task);
99 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
100
101 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
102 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
103 static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
104
105 static struct file_lock *file_lock_table = NULL;
106
107
108 static inline void locks_free_lock(struct file_lock **fl)
109 {
110 kfree(*fl);
111 *fl = NULL;
112 }
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 static inline void locks_insert_block(struct file_lock **block,
129 struct file_lock *fl)
130 {
131 struct file_lock *bfl;
132
133 while ((bfl = *block) != NULL) {
134 block = &bfl->fl_block;
135 }
136
137 *block = fl;
138 fl->fl_block = NULL;
139
140 return;
141 }
142
143 static inline void locks_delete_block(struct file_lock **block,
144 struct file_lock *fl)
145 {
146 struct file_lock *bfl;
147
148 while ((bfl = *block) != NULL) {
149 if (bfl == fl) {
150 *block = fl->fl_block;
151 fl->fl_block = NULL;
152 return;
153 }
154 block = &bfl->fl_block;
155 }
156 }
157
158
159
160
161 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
162 {
163 struct file_lock file_lock;
164 struct file *filp;
165
166 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
167 return (-EBADF);
168
169 if (!flock_make_lock(filp, &file_lock, cmd))
170 return (-EINVAL);
171
172 if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
173 return (-EBADF);
174
175 return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
176 }
177
178
179
180
181 int fcntl_getlk(unsigned int fd, struct flock *l)
182 {
183 int error;
184 struct flock flock;
185 struct file *filp;
186 struct file_lock *fl,file_lock;
187
188 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
189 return (-EBADF);
190 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
191 if (error)
192 return (error);
193
194 memcpy_fromfs(&flock, l, sizeof(flock));
195 if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) ||
196 (flock.l_type == F_SHLCK))
197 return (-EINVAL);
198
199 if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
200 return (-EINVAL);
201
202 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
203 if (posix_locks_conflict(&file_lock, fl)) {
204 flock.l_pid = fl->fl_owner->pid;
205 flock.l_start = fl->fl_start;
206 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
207 fl->fl_end - fl->fl_start + 1;
208 flock.l_whence = 0;
209 flock.l_type = fl->fl_type;
210 memcpy_tofs(l, &flock, sizeof(flock));
211 return (0);
212 }
213 }
214
215 flock.l_type = F_UNLCK;
216 memcpy_tofs(l, &flock, sizeof(flock));
217 return (0);
218 }
219
220
221
222
223
224
225 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
226 {
227 int error;
228 struct file *filp;
229 struct file_lock file_lock;
230 struct flock flock;
231 struct inode *inode;
232
233
234
235
236
237 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
238 return (-EBADF);
239
240 error = verify_area(VERIFY_READ, l, sizeof(*l));
241 if (error)
242 return (error);
243
244 if (!(inode = filp->f_inode))
245 return (-EINVAL);
246
247
248
249
250 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) {
251 struct vm_area_struct *vma = inode->i_mmap;
252 do {
253 if (vma->vm_flags & VM_MAYSHARE)
254 return (-EAGAIN);
255 vma = vma->vm_next_share;
256 } while (vma != inode->i_mmap);
257 }
258
259 memcpy_fromfs(&flock, l, sizeof(flock));
260 if (!posix_make_lock(filp, &file_lock, &flock))
261 return (-EINVAL);
262
263 switch (flock.l_type) {
264 case F_RDLCK :
265 if (!(filp->f_mode & 1))
266 return (-EBADF);
267 break;
268 case F_WRLCK :
269 if (!(filp->f_mode & 2))
270 return (-EBADF);
271 break;
272 case F_SHLCK :
273 case F_EXLCK :
274 if (!(filp->f_mode & 3))
275 return (-EBADF);
276 break;
277 case F_UNLCK :
278 break;
279 }
280
281 return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));
282 }
283
284
285
286 void locks_remove_locks(struct task_struct *task, struct file *filp)
287 {
288 struct file_lock *fl;
289 struct file_lock **before;
290
291
292
293
294
295 before = &filp->f_inode->i_flock;
296 while ((fl = *before) != NULL) {
297 if (((fl->fl_flags == F_POSIX) && (fl->fl_owner == task)) ||
298 ((fl->fl_flags == F_FLOCK) && (fl->fl_file == filp) &&
299 (filp->f_count == 1)))
300 locks_delete_lock(before, 0);
301 else
302 before = &fl->fl_next;
303 }
304
305 return;
306 }
307
308 int locks_verify_locked(struct inode *inode)
309 {
310
311
312
313 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
314 return (locks_mandatory_locked(inode));
315 return (0);
316 }
317
318 int locks_mandatory_locked(struct inode *inode)
319 {
320 struct file_lock *fl;
321
322
323
324 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
325 if (fl->fl_flags == F_POSIX && fl->fl_owner != current)
326 return (-EAGAIN);
327 }
328 return (0);
329 }
330
331 int locks_verify_area(int read_write, struct inode *inode, struct file *filp,
332 unsigned int offset, unsigned int count)
333 {
334
335
336
337 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
338 return (locks_mandatory_area(read_write, inode, filp, offset,
339 count));
340 return (0);
341 }
342
343 int locks_mandatory_area(int read_write, struct inode *inode,
344 struct file *filp, unsigned int offset,
345 unsigned int count)
346 {
347 struct file_lock *fl;
348
349 repeat:
350
351
352
353
354 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
355 if (fl->fl_flags == F_FLOCK ||
356 (fl->fl_flags == F_POSIX && fl->fl_owner == current))
357 continue;
358 if (fl->fl_end < offset ||
359 fl->fl_start >= offset + count)
360 continue;
361
362
363
364
365 if (read_write == FLOCK_VERIFY_WRITE ||
366 fl->fl_type == F_WRLCK) {
367 if (filp && (filp->f_flags & O_NONBLOCK))
368 return (-EAGAIN);
369 if (current->signal & ~current->blocked)
370 return (-ERESTARTSYS);
371 if (posix_locks_deadlock(current, fl->fl_owner))
372 return (-EDEADLOCK);
373 interruptible_sleep_on(&fl->fl_wait);
374 if (current->signal & ~current->blocked)
375 return (-ERESTARTSYS);
376
377
378
379
380 if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)
381 break;
382 goto repeat;
383 }
384 }
385 return (0);
386 }
387
388
389
390
391 static int posix_make_lock(struct file *filp, struct file_lock *fl,
392 struct flock *l)
393 {
394 off_t start;
395
396 switch (l->l_type) {
397 case F_RDLCK :
398 case F_WRLCK :
399 case F_UNLCK :
400 fl->fl_type = l->l_type;
401 break;
402 case F_SHLCK :
403 fl->fl_type = F_RDLCK;
404 break;
405 case F_EXLCK :
406 fl->fl_type = F_WRLCK;
407 break;
408 default :
409 return (0);
410 }
411
412 switch (l->l_whence) {
413 case 0 :
414 start = 0;
415 break;
416 case 1 :
417 start = filp->f_pos;
418 break;
419 case 2 :
420 start = filp->f_inode->i_size;
421 break;
422 default :
423 return (0);
424 }
425
426 if (((start += l->l_start) < 0) || (l->l_len < 0))
427 return (0);
428 fl->fl_start = start;
429 if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
430 fl->fl_end = OFFSET_MAX;
431
432 fl->fl_flags = F_POSIX;
433 fl->fl_file = filp;
434 fl->fl_owner = current;
435 fl->fl_wait = NULL;
436
437 return (1);
438 }
439
440
441
442
443 static int flock_make_lock(struct file *filp, struct file_lock *fl,
444 unsigned int cmd)
445 {
446 if (!filp->f_inode)
447 return (0);
448
449 switch (cmd & ~LOCK_NB) {
450 case LOCK_SH :
451 fl->fl_type = F_RDLCK;
452 break;
453 case LOCK_EX :
454 fl->fl_type = F_WRLCK;
455 break;
456 case LOCK_UN :
457 fl->fl_type = F_UNLCK;
458 break;
459 default :
460 return (0);
461 }
462
463 fl->fl_flags = F_FLOCK;
464 fl->fl_start = 0;
465 fl->fl_end = OFFSET_MAX;
466 fl->fl_file = filp;
467 fl->fl_owner = current;
468 fl->fl_wait = NULL;
469
470 return (1);
471 }
472
473
474
475
476 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
477 {
478
479
480
481 if ((sys_fl->fl_flags == F_POSIX) &&
482 (caller_fl->fl_owner == sys_fl->fl_owner))
483 return (0);
484
485 return (locks_conflict(caller_fl, sys_fl));
486 }
487
488
489
490
491 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
492 {
493
494
495
496 if ((sys_fl->fl_flags == F_FLOCK) &&
497 (caller_fl->fl_file == sys_fl->fl_file))
498 return (0);
499
500 return (locks_conflict(caller_fl, sys_fl));
501 }
502
503
504
505
506 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
507 {
508 if (!locks_overlap(caller_fl, sys_fl))
509 return (0);
510
511 switch (caller_fl->fl_type) {
512 case F_RDLCK :
513 return (sys_fl->fl_type == F_WRLCK);
514
515 case F_WRLCK :
516 return (1);
517
518 default:
519 printk("locks_conflict(): impossible lock type - %d\n",
520 caller_fl->fl_type);
521 break;
522 }
523 return (0);
524 }
525
526
527
528 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
529 {
530 return ((fl1->fl_end >= fl2->fl_start) &&
531 (fl2->fl_end >= fl1->fl_start));
532 }
533
534
535
536
537
538
539
540
541
542
543
544 static int posix_locks_deadlock(struct task_struct *my_task,
545 struct task_struct *blocked_task)
546 {
547 struct wait_queue *dlock_wait;
548 struct file_lock *fl;
549
550 next_task:
551 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
552 if (fl->fl_owner == NULL || fl->fl_wait == NULL)
553 continue;
554 dlock_wait = fl->fl_wait;
555 do {
556 if (dlock_wait->task == blocked_task) {
557 if (fl->fl_owner == my_task) {
558 return(-EDEADLOCK);
559 }
560 blocked_task = fl->fl_owner;
561 goto next_task;
562 }
563 dlock_wait = dlock_wait->next;
564 } while (dlock_wait != fl->fl_wait);
565 }
566 return (0);
567 }
568
569
570
571
572
573 static int flock_lock_file(struct file *filp, struct file_lock *caller,
574 unsigned int wait)
575 {
576 struct file_lock *fl;
577 struct file_lock *new_fl;
578 struct file_lock **before;
579 int change = 0;
580
581
582
583
584 before = &filp->f_inode->i_flock;
585 while ((fl = *before) && (fl->fl_flags == F_FLOCK)) {
586 if (caller->fl_file == fl->fl_file) {
587 if (caller->fl_type == fl->fl_type)
588 return (0);
589 change = 1;
590 break;
591 }
592 before = &fl->fl_next;
593 }
594
595
596
597 if (change)
598 locks_delete_lock(before, caller->fl_type != F_UNLCK);
599 if (caller->fl_type == F_UNLCK)
600 return (0);
601 if ((new_fl = locks_alloc_lock(caller)) == NULL)
602 return (-ENOLCK);
603 repeat:
604 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
605 if (!flock_locks_conflict(new_fl, fl))
606 continue;
607
608 if (wait) {
609 if (current->signal & ~current->blocked) {
610
611
612
613
614
615 locks_free_lock(&new_fl);
616 return (-ERESTARTSYS);
617 }
618 locks_insert_block(&fl->fl_block, new_fl);
619 interruptible_sleep_on(&new_fl->fl_wait);
620 wake_up(&new_fl->fl_wait);
621 if (current->signal & ~current->blocked) {
622
623
624
625
626
627
628
629 locks_delete_block(&fl->fl_block, new_fl);
630 locks_free_lock(&new_fl);
631 return (-ERESTARTSYS);
632 }
633 goto repeat;
634 }
635
636 locks_free_lock(&new_fl);
637 return (-EAGAIN);
638 }
639 locks_insert_lock(&filp->f_inode->i_flock, new_fl);
640 return (0);
641 }
642
643
644
645
646
647
648
649
650
651
652
653
654
655 static int posix_lock_file(struct file *filp, struct file_lock *caller,
656 unsigned int wait)
657 {
658 struct file_lock *fl;
659 struct file_lock *new_fl;
660 struct file_lock *left = NULL;
661 struct file_lock *right = NULL;
662 struct file_lock **before;
663 int added = 0;
664
665 if (caller->fl_type != F_UNLCK) {
666 repeat:
667 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
668 if (!posix_locks_conflict(caller, fl))
669 continue;
670 if (wait) {
671 if (current->signal & ~current->blocked)
672 return (-ERESTARTSYS);
673 if (fl->fl_flags == F_POSIX)
674 if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
675 return (-EDEADLOCK);
676 interruptible_sleep_on(&fl->fl_wait);
677 if (current->signal & ~current->blocked)
678 return (-ERESTARTSYS);
679 goto repeat;
680 }
681 return (-EAGAIN);
682 }
683 }
684
685
686
687
688 before = &filp->f_inode->i_flock;
689
690
691
692 while ((fl = *before) && ((fl->fl_flags == F_FLOCK) ||
693 (caller->fl_owner != fl->fl_owner))) {
694 before = &fl->fl_next;
695 }
696
697
698
699
700 while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
701
702
703 if (caller->fl_type == fl->fl_type) {
704 if (fl->fl_end < caller->fl_start - 1)
705 goto next_lock;
706
707
708
709 if (fl->fl_start > caller->fl_end + 1)
710 break;
711
712
713
714
715
716
717 if (fl->fl_start > caller->fl_start)
718 fl->fl_start = caller->fl_start;
719 else
720 caller->fl_start = fl->fl_start;
721 if (fl->fl_end < caller->fl_end)
722 fl->fl_end = caller->fl_end;
723 else
724 caller->fl_end = fl->fl_end;
725 if (added) {
726 locks_delete_lock(before, 0);
727 continue;
728 }
729 caller = fl;
730 added = 1;
731 }
732 else {
733
734
735
736 if (fl->fl_end < caller->fl_start)
737 goto next_lock;
738 if (fl->fl_start > caller->fl_end)
739 break;
740 if (caller->fl_type == F_UNLCK)
741 added = 1;
742 if (fl->fl_start < caller->fl_start)
743 left = fl;
744
745
746
747 if (fl->fl_end > caller->fl_end) {
748 right = fl;
749 break;
750 }
751 if (fl->fl_start >= caller->fl_start) {
752
753
754
755 if (added) {
756 locks_delete_lock(before, 0);
757 continue;
758 }
759
760
761
762
763
764 wake_up(&fl->fl_wait);
765 fl->fl_start = caller->fl_start;
766 fl->fl_end = caller->fl_end;
767 fl->fl_type = caller->fl_type;
768 caller = fl;
769 added = 1;
770 }
771 }
772
773
774 next_lock:
775 before = &(*before)->fl_next;
776 }
777
778 if (!added) {
779 if (caller->fl_type == F_UNLCK)
780 return (0);
781 if ((new_fl = locks_alloc_lock(caller)) == NULL)
782 return (-ENOLCK);
783 locks_insert_lock(before, new_fl);
784
785 }
786 if (right) {
787 if (left == right) {
788
789
790
791
792 if ((left = locks_alloc_lock(right)) == NULL) {
793 if (!added)
794 locks_delete_lock(before, 0);
795 return (-ENOLCK);
796 }
797 locks_insert_lock(before, left);
798 }
799 right->fl_start = caller->fl_end + 1;
800 }
801 if (left)
802 left->fl_end = caller->fl_start - 1;
803 return (0);
804 }
805
806
807
808
809
810
811 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
812 {
813 struct file_lock *tmp;
814
815
816 if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
817 GFP_ATOMIC)) == NULL)
818 return (tmp);
819
820 tmp->fl_nextlink = NULL;
821 tmp->fl_prevlink = NULL;
822 tmp->fl_next = NULL;
823 tmp->fl_block = NULL;
824 tmp->fl_flags = fl->fl_flags;
825 tmp->fl_owner = fl->fl_owner;
826 tmp->fl_file = fl->fl_file;
827 tmp->fl_wait = NULL;
828 tmp->fl_type = fl->fl_type;
829 tmp->fl_start = fl->fl_start;
830 tmp->fl_end = fl->fl_end;
831
832 return (tmp);
833 }
834
835
836
837
838
839 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
840 {
841 fl->fl_nextlink = file_lock_table;
842 fl->fl_prevlink = NULL;
843 if (file_lock_table != NULL)
844 file_lock_table->fl_prevlink = fl;
845 file_lock_table = fl;
846 fl->fl_next = *pos;
847 *pos = fl;
848
849 return;
850 }
851
852
853
854
855
856
857
858
859
860
861 static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
862 {
863 struct file_lock *fl;
864 struct file_lock *pfl;
865 struct file_lock *nfl;
866
867 fl = *fl_p;
868 *fl_p = fl->fl_next;
869 pfl = fl->fl_prevlink;
870 nfl = fl->fl_nextlink;
871
872 if (nfl != NULL)
873 nfl->fl_prevlink = pfl;
874
875 if (pfl != NULL)
876 pfl->fl_nextlink = nfl;
877 else {
878 file_lock_table = nfl;
879 }
880
881 while ((nfl = fl->fl_block) != NULL) {
882 fl->fl_block = nfl->fl_block;
883 nfl->fl_block = NULL;
884 wake_up(&nfl->fl_wait);
885 if (wait)
886 sleep_on(&nfl->fl_wait);
887 }
888
889 wake_up(&fl->fl_wait);
890 kfree(fl);
891
892 return;
893 }