This source file includes following definitions.
- locks_free_lock
- locks_insert_block
- locks_delete_block
- sys_flock
- fcntl_getlk
- fcntl_setlk
- locks_remove_locks
- locks_verify_locked
- locks_mandatory_locked
- locks_verify_area
- locks_mandatory_area
- posix_make_lock
- flock_make_lock
- posix_locks_conflict
- flock_locks_conflict
- locks_conflict
- locks_overlap
- locks_deadlock
- flock_lock_file
- posix_lock_file
- locks_alloc_lock
- locks_insert_lock
- locks_delete_lock
- lock_get_status
- get_locks_status
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 #include <linux/malloc.h>
81 #include <linux/sched.h>
82 #include <linux/kernel.h>
83 #include <linux/errno.h>
84 #include <linux/stat.h>
85 #include <linux/fcntl.h>
86
87 #include <asm/segment.h>
88
89 #define OFFSET_MAX ((off_t)0x7fffffff)
90
91 static int flock_make_lock(struct file *filp, struct file_lock *fl,
92 unsigned int cmd);
93 static int posix_make_lock(struct file *filp, struct file_lock *fl,
94 struct flock *l);
95 static int flock_locks_conflict(struct file_lock *caller_fl,
96 struct file_lock *sys_fl);
97 static int posix_locks_conflict(struct file_lock *caller_fl,
98 struct file_lock *sys_fl);
99 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
100 static int flock_lock_file(struct file *filp, struct file_lock *caller,
101 unsigned int wait);
102 static int posix_lock_file(struct file *filp, struct file_lock *caller,
103 unsigned int wait);
104 static int locks_deadlock(struct task_struct *my_task,
105 struct task_struct *blocked_task);
106 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
107
108 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
109 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
110 static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
111 static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx);
112
113 static struct file_lock *file_lock_table = NULL;
114
115
116 static inline void locks_free_lock(struct file_lock *fl)
117 {
118 kfree(fl);
119 return;
120 }
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136 static inline void locks_insert_block(struct file_lock *bfl,
137 struct file_lock *fl)
138 {
139 while (bfl->fl_block != NULL) {
140 bfl = bfl->fl_block;
141 }
142
143 bfl->fl_block = fl;
144 fl->fl_block = NULL;
145
146 return;
147 }
148
149 static inline void locks_delete_block(struct file_lock *bfl,
150 struct file_lock *fl)
151 {
152 struct file_lock *tfl;
153
154 while ((tfl = bfl->fl_block) != NULL) {
155 if (tfl == fl) {
156 bfl->fl_block = fl->fl_block;
157 fl->fl_block = NULL;
158 return;
159 }
160 bfl = tfl;
161 }
162 return;
163 }
164
165
166
167
168 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
169 {
170 struct file_lock file_lock;
171 struct file *filp;
172
173 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
174 return (-EBADF);
175
176 if (!flock_make_lock(filp, &file_lock, cmd))
177 return (-EINVAL);
178
179 if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
180 return (-EBADF);
181
182 return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
183 }
184
185
186
187
188 int fcntl_getlk(unsigned int fd, struct flock *l)
189 {
190 int error;
191 struct flock flock;
192 struct file *filp;
193 struct file_lock *fl,file_lock;
194
195 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
196 return (-EBADF);
197 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
198 if (error)
199 return (error);
200
201 memcpy_fromfs(&flock, l, sizeof(flock));
202 if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) ||
203 (flock.l_type == F_SHLCK))
204 return (-EINVAL);
205
206 if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
207 return (-EINVAL);
208
209 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
210 if (posix_locks_conflict(&file_lock, fl)) {
211 flock.l_pid = fl->fl_owner->pid;
212 flock.l_start = fl->fl_start;
213 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
214 fl->fl_end - fl->fl_start + 1;
215 flock.l_whence = 0;
216 flock.l_type = fl->fl_type;
217 memcpy_tofs(l, &flock, sizeof(flock));
218 return (0);
219 }
220 }
221
222 flock.l_type = F_UNLCK;
223 memcpy_tofs(l, &flock, sizeof(flock));
224 return (0);
225 }
226
227
228
229
230
231
232 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
233 {
234 int error;
235 struct file *filp;
236 struct file_lock file_lock;
237 struct flock flock;
238 struct inode *inode;
239
240
241
242
243
244 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
245 return (-EBADF);
246
247 error = verify_area(VERIFY_READ, l, sizeof(*l));
248 if (error)
249 return (error);
250
251 if (!(inode = filp->f_inode))
252 return (-EINVAL);
253
254
255
256
257 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) {
258 struct vm_area_struct *vma = inode->i_mmap;
259 do {
260 if (vma->vm_flags & VM_MAYSHARE)
261 return (-EAGAIN);
262 vma = vma->vm_next_share;
263 } while (vma != inode->i_mmap);
264 }
265
266 memcpy_fromfs(&flock, l, sizeof(flock));
267 if (!posix_make_lock(filp, &file_lock, &flock))
268 return (-EINVAL);
269
270 switch (flock.l_type) {
271 case F_RDLCK :
272 if (!(filp->f_mode & 1))
273 return (-EBADF);
274 break;
275 case F_WRLCK :
276 if (!(filp->f_mode & 2))
277 return (-EBADF);
278 break;
279 case F_SHLCK :
280 case F_EXLCK :
281 #if 1
282
283 {
284 static int count = 0;
285 if (count < 5) {
286 count++;
287 printk(KERN_WARNING
288 "fcntl_setlk() called by process %d with broken flock() emulation\n",
289 current->pid);
290 }
291 }
292 #endif
293 if (!(filp->f_mode & 3))
294 return (-EBADF);
295 break;
296 case F_UNLCK :
297 break;
298 }
299
300 return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));
301 }
302
303
304
305 void locks_remove_locks(struct task_struct *task, struct file *filp)
306 {
307 struct file_lock *fl;
308 struct file_lock **before;
309
310
311
312
313
314 before = &filp->f_inode->i_flock;
315 while ((fl = *before) != NULL) {
316 if (((fl->fl_flags & F_POSIX) && (fl->fl_owner == task)) ||
317 ((fl->fl_flags & F_FLOCK) && (fl->fl_file == filp) &&
318 (filp->f_count == 1)))
319 locks_delete_lock(before, 0);
320 else
321 before = &fl->fl_next;
322 }
323
324 return;
325 }
326
327 int locks_verify_locked(struct inode *inode)
328 {
329
330
331
332 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
333 return (locks_mandatory_locked(inode));
334 return (0);
335 }
336
337 int locks_mandatory_locked(struct inode *inode)
338 {
339 struct file_lock *fl;
340
341
342
343 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
344 if ((fl->fl_flags & F_POSIX) && (fl->fl_owner != current))
345 return (-EAGAIN);
346 }
347 return (0);
348 }
349
350 int locks_verify_area(int read_write, struct inode *inode, struct file *filp,
351 unsigned int offset, unsigned int count)
352 {
353
354
355
356 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
357 return (locks_mandatory_area(read_write, inode, filp, offset,
358 count));
359 return (0);
360 }
361
362 int locks_mandatory_area(int read_write, struct inode *inode,
363 struct file *filp, unsigned int offset,
364 unsigned int count)
365 {
366 struct file_lock *fl;
367
368 repeat:
369
370
371
372
373 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
374 if ((fl->fl_flags & F_FLOCK) ||
375 ((fl->fl_flags & F_POSIX) && (fl->fl_owner == current)))
376 continue;
377 if (fl->fl_end < offset ||
378 fl->fl_start >= offset + count)
379 continue;
380
381
382
383
384 if (read_write == FLOCK_VERIFY_WRITE ||
385 fl->fl_type == F_WRLCK) {
386 if (filp && (filp->f_flags & O_NONBLOCK))
387 return (-EAGAIN);
388 if (current->signal & ~current->blocked)
389 return (-ERESTARTSYS);
390 if (locks_deadlock(current, fl->fl_owner))
391 return (-EDEADLOCK);
392 interruptible_sleep_on(&fl->fl_wait);
393 if (current->signal & ~current->blocked)
394 return (-ERESTARTSYS);
395
396
397
398
399 if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID)
400 break;
401 goto repeat;
402 }
403 }
404 return (0);
405 }
406
407
408
409
410 static int posix_make_lock(struct file *filp, struct file_lock *fl,
411 struct flock *l)
412 {
413 off_t start;
414
415 fl->fl_flags = F_POSIX;
416
417 switch (l->l_type) {
418 case F_RDLCK :
419 case F_WRLCK :
420 case F_UNLCK :
421 fl->fl_type = l->l_type;
422 break;
423 case F_SHLCK :
424 fl->fl_type = F_RDLCK;
425 fl->fl_flags |= F_BROKEN;
426 break;
427 case F_EXLCK :
428 fl->fl_type = F_WRLCK;
429 fl->fl_flags |= F_BROKEN;
430 break;
431 default :
432 return (0);
433 }
434
435 switch (l->l_whence) {
436 case 0 :
437 start = 0;
438 break;
439 case 1 :
440 start = filp->f_pos;
441 break;
442 case 2 :
443 start = filp->f_inode->i_size;
444 break;
445 default :
446 return (0);
447 }
448
449 if (((start += l->l_start) < 0) || (l->l_len < 0))
450 return (0);
451 fl->fl_start = start;
452 if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
453 fl->fl_end = OFFSET_MAX;
454
455 fl->fl_file = filp;
456 fl->fl_owner = current;
457 fl->fl_wait = NULL;
458
459 return (1);
460 }
461
462
463
464
465 static int flock_make_lock(struct file *filp, struct file_lock *fl,
466 unsigned int cmd)
467 {
468 if (!filp->f_inode)
469 return (0);
470
471 switch (cmd & ~LOCK_NB) {
472 case LOCK_SH :
473 fl->fl_type = F_RDLCK;
474 break;
475 case LOCK_EX :
476 fl->fl_type = F_WRLCK;
477 break;
478 case LOCK_UN :
479 fl->fl_type = F_UNLCK;
480 break;
481 default :
482 return (0);
483 }
484
485 fl->fl_flags = F_FLOCK;
486 fl->fl_start = 0;
487 fl->fl_end = OFFSET_MAX;
488 fl->fl_file = filp;
489 fl->fl_owner = current;
490 fl->fl_wait = NULL;
491
492 return (1);
493 }
494
495
496
497
498 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
499 {
500
501
502
503 if ((sys_fl->fl_flags & F_POSIX) &&
504 (caller_fl->fl_owner == sys_fl->fl_owner))
505 return (0);
506
507 return (locks_conflict(caller_fl, sys_fl));
508 }
509
510
511
512
513 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
514 {
515
516
517
518 if ((sys_fl->fl_flags & F_FLOCK) &&
519 (caller_fl->fl_file == sys_fl->fl_file))
520 return (0);
521
522 return (locks_conflict(caller_fl, sys_fl));
523 }
524
525
526
527
528 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
529 {
530 if (!locks_overlap(caller_fl, sys_fl))
531 return (0);
532
533 switch (caller_fl->fl_type) {
534 case F_RDLCK :
535 return (sys_fl->fl_type == F_WRLCK);
536
537 case F_WRLCK :
538 return (1);
539
540 default:
541 printk("locks_conflict(): impossible lock type - %d\n",
542 caller_fl->fl_type);
543 break;
544 }
545 return (0);
546 }
547
548
549
550 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
551 {
552 return ((fl1->fl_end >= fl2->fl_start) &&
553 (fl2->fl_end >= fl1->fl_start));
554 }
555
556
557
558
559
560
561
562
563
564
565
566 static int locks_deadlock(struct task_struct *my_task,
567 struct task_struct *blocked_task)
568 {
569 struct wait_queue *dlock_wait;
570 struct file_lock *fl;
571
572 next_task:
573 if (my_task == blocked_task)
574 return (1);
575 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
576 if (fl->fl_owner == NULL || fl->fl_wait == NULL)
577 continue;
578 dlock_wait = fl->fl_wait;
579 do {
580 if (dlock_wait->task == blocked_task) {
581 if (fl->fl_owner == my_task) {
582 return (1);
583 }
584 blocked_task = fl->fl_owner;
585 goto next_task;
586 }
587 dlock_wait = dlock_wait->next;
588 } while (dlock_wait != fl->fl_wait);
589 }
590 return (0);
591 }
592
593
594
595
596
597 static int flock_lock_file(struct file *filp, struct file_lock *caller,
598 unsigned int wait)
599 {
600 struct file_lock *fl;
601 struct file_lock *new_fl;
602 struct file_lock **before;
603 int change = 0;
604
605
606
607
608 before = &filp->f_inode->i_flock;
609 while ((fl = *before) && (fl->fl_flags & F_FLOCK)) {
610 if (caller->fl_file == fl->fl_file) {
611 if (caller->fl_type == fl->fl_type)
612 return (0);
613 change = 1;
614 break;
615 }
616 before = &fl->fl_next;
617 }
618
619
620
621 if (change)
622 locks_delete_lock(before, caller->fl_type != F_UNLCK);
623 if (caller->fl_type == F_UNLCK)
624 return (0);
625 if ((new_fl = locks_alloc_lock(caller)) == NULL)
626 return (-ENOLCK);
627 repeat:
628 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
629 if (!flock_locks_conflict(new_fl, fl))
630 continue;
631
632 if (wait) {
633 if (current->signal & ~current->blocked) {
634
635
636
637
638
639 locks_free_lock(new_fl);
640 return (-ERESTARTSYS);
641 }
642
643
644
645
646 if (locks_deadlock(new_fl->fl_owner, fl->fl_owner)) {
647 locks_free_lock(new_fl);
648 return (-EAGAIN);
649 }
650 locks_insert_block(fl, new_fl);
651 interruptible_sleep_on(&new_fl->fl_wait);
652 wake_up(&new_fl->fl_wait);
653 if (current->signal & ~current->blocked) {
654
655
656
657
658
659
660 locks_delete_block(fl, new_fl);
661 locks_free_lock(new_fl);
662 return (-ERESTARTSYS);
663 }
664 goto repeat;
665 }
666
667 locks_free_lock(new_fl);
668 return (-EAGAIN);
669 }
670 locks_insert_lock(&filp->f_inode->i_flock, new_fl);
671 return (0);
672 }
673
674
675
676
677
678
679
680
681
682
683
684
685
686 static int posix_lock_file(struct file *filp, struct file_lock *caller,
687 unsigned int wait)
688 {
689 struct file_lock *fl;
690 struct file_lock *new_fl;
691 struct file_lock *left = NULL;
692 struct file_lock *right = NULL;
693 struct file_lock **before;
694 int added = 0;
695
696 if (caller->fl_type != F_UNLCK) {
697 repeat:
698 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
699 if (!posix_locks_conflict(caller, fl))
700 continue;
701 if (wait) {
702 if (current->signal & ~current->blocked)
703 return (-ERESTARTSYS);
704 if (locks_deadlock(caller->fl_owner, fl->fl_owner))
705 return (-EDEADLOCK);
706 interruptible_sleep_on(&fl->fl_wait);
707 if (current->signal & ~current->blocked)
708 return (-ERESTARTSYS);
709 goto repeat;
710 }
711 return (-EAGAIN);
712 }
713 }
714
715
716
717
718 before = &filp->f_inode->i_flock;
719
720
721
722 while ((fl = *before) && ((fl->fl_flags & F_FLOCK) ||
723 (caller->fl_owner != fl->fl_owner))) {
724 before = &fl->fl_next;
725 }
726
727
728
729
730 while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
731
732
733 if (caller->fl_type == fl->fl_type) {
734 if (fl->fl_end < caller->fl_start - 1)
735 goto next_lock;
736
737
738
739 if (fl->fl_start > caller->fl_end + 1)
740 break;
741
742
743
744
745
746
747 if (fl->fl_start > caller->fl_start)
748 fl->fl_start = caller->fl_start;
749 else
750 caller->fl_start = fl->fl_start;
751 if (fl->fl_end < caller->fl_end)
752 fl->fl_end = caller->fl_end;
753 else
754 caller->fl_end = fl->fl_end;
755 if (added) {
756 locks_delete_lock(before, 0);
757 continue;
758 }
759 caller = fl;
760 added = 1;
761 }
762 else {
763
764
765
766 if (fl->fl_end < caller->fl_start)
767 goto next_lock;
768 if (fl->fl_start > caller->fl_end)
769 break;
770 if (caller->fl_type == F_UNLCK)
771 added = 1;
772 if (fl->fl_start < caller->fl_start)
773 left = fl;
774
775
776
777 if (fl->fl_end > caller->fl_end) {
778 right = fl;
779 break;
780 }
781 if (fl->fl_start >= caller->fl_start) {
782
783
784
785 if (added) {
786 locks_delete_lock(before, 0);
787 continue;
788 }
789
790
791
792
793
794 wake_up(&fl->fl_wait);
795 fl->fl_start = caller->fl_start;
796 fl->fl_end = caller->fl_end;
797 fl->fl_type = caller->fl_type;
798 caller = fl;
799 added = 1;
800 }
801 }
802
803
804 next_lock:
805 before = &fl->fl_next;
806 }
807
808 if (!added) {
809 if (caller->fl_type == F_UNLCK)
810 return (0);
811 if ((new_fl = locks_alloc_lock(caller)) == NULL)
812 return (-ENOLCK);
813 locks_insert_lock(before, new_fl);
814 }
815 if (right) {
816 if (left == right) {
817
818
819
820
821 if ((left = locks_alloc_lock(right)) == NULL) {
822 if (!added)
823 locks_delete_lock(before, 0);
824 return (-ENOLCK);
825 }
826 locks_insert_lock(before, left);
827 }
828 right->fl_start = caller->fl_end + 1;
829 }
830 if (left)
831 left->fl_end = caller->fl_start - 1;
832 return (0);
833 }
834
835
836
837
838
839
840 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
841 {
842 struct file_lock *tmp;
843
844
845 if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
846 GFP_ATOMIC)) == NULL)
847 return (tmp);
848
849 tmp->fl_nextlink = NULL;
850 tmp->fl_prevlink = NULL;
851 tmp->fl_next = NULL;
852 tmp->fl_block = NULL;
853 tmp->fl_flags = fl->fl_flags;
854 tmp->fl_owner = fl->fl_owner;
855 tmp->fl_file = fl->fl_file;
856 tmp->fl_wait = NULL;
857 tmp->fl_type = fl->fl_type;
858 tmp->fl_start = fl->fl_start;
859 tmp->fl_end = fl->fl_end;
860
861 return (tmp);
862 }
863
864
865
866
867
868 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
869 {
870 fl->fl_nextlink = file_lock_table;
871 fl->fl_prevlink = NULL;
872 if (file_lock_table != NULL)
873 file_lock_table->fl_prevlink = fl;
874 file_lock_table = fl;
875 fl->fl_next = *pos;
876 *pos = fl;
877
878 return;
879 }
880
881
882
883
884
885
886
887
888
889
890 static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
891 {
892 struct file_lock *fl;
893 struct file_lock *pfl;
894 struct file_lock *nfl;
895
896 fl = *fl_p;
897 *fl_p = fl->fl_next;
898 pfl = fl->fl_prevlink;
899 nfl = fl->fl_nextlink;
900
901 if (nfl != NULL)
902 nfl->fl_prevlink = pfl;
903
904 if (pfl != NULL)
905 pfl->fl_nextlink = nfl;
906 else
907 file_lock_table = nfl;
908
909 while ((nfl = fl->fl_block) != NULL) {
910 fl->fl_block = nfl->fl_block;
911 nfl->fl_block = NULL;
912 wake_up(&nfl->fl_wait);
913 if (wait)
914 sleep_on(&nfl->fl_wait);
915 }
916
917 wake_up(&fl->fl_wait);
918 kfree(fl);
919
920 return;
921 }
922
923
924 static char *lock_get_status(struct file_lock *fl, char *p, int id, char *pfx)
925 {
926 struct wait_queue *wt;
927
928 p += sprintf(p, "%d:%s ", id, pfx);
929 if (fl->fl_flags & F_POSIX) {
930 p += sprintf(p, "%s %s ",
931 (fl->fl_flags & F_BROKEN) ? "BROKEN" : "POSIX ",
932 ((fl->fl_file->f_inode->i_mode & (S_IXGRP | S_ISGID))
933 == S_ISGID) ? "MANDATORY" : "ADVISORY ");
934 }
935 else {
936 p += sprintf(p, "FLOCK ADVISORY ");
937 }
938 p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE");
939 p += sprintf(p, "%d %04x:%ld %ld %ld ",
940 fl->fl_owner->pid, fl->fl_file->f_inode->i_dev,
941 fl->fl_file->f_inode->i_ino, fl->fl_start,
942 fl->fl_end);
943 p += sprintf(p, "%08lx %08lx %08lx %08lx %08lx\n%d:%s",
944 (long)fl, (long)fl->fl_prevlink, (long)fl->fl_nextlink,
945 (long)fl->fl_next, (long)fl->fl_block, id, pfx);
946 if ((wt = fl->fl_wait) != NULL) {
947 do {
948 p += sprintf(p, " %d", wt->task->pid);
949 wt = wt->next;
950 } while (wt != fl->fl_wait);
951 }
952 p += sprintf(p, "\n");
953 return (p);
954 }
955
956 int get_locks_status(char *buf)
957 {
958 struct file_lock *fl;
959 struct file_lock *bfl;
960 char *p;
961 int i;
962
963 p = buf;
964 for (fl = file_lock_table, i = 1; fl != NULL; fl = fl->fl_nextlink, i++) {
965 p = lock_get_status(fl, p, i, "");
966 for (bfl = fl; bfl->fl_block != NULL; bfl = bfl->fl_block)
967 p = lock_get_status(bfl->fl_block, p, i, " ->");
968 }
969 return (p - buf);
970 }
971