1 /*
2 * linux/fs/locks.c
3 *
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
6 *
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11 *
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14 *
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
18 *
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@keo.kvaerner.no), February 14, 1995
26 *
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@keo.kvaerner.no), February 21, 1995
30 *
31 * Implemented two lock personalities - F_FLOCK and F_POSIX.
32 *
33 * F_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
35 *
36 * F_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
39 *
40 * F_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
45 *
46 * F_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 *
51 * NOTE:
52 * I do not intend to implement mandatory locks unless demand is *HUGE*.
53 * They are not in BSD, and POSIX.1 does not require them. I have never
54 * seen any public code that relied on them. As Kelly Carmichael suggests
55 * above, mandatory locks requires lots of changes elsewhere and I am
56 * reluctant to start something so drastic for so little gain.
57 * Andy Walker (andy@keo.kvaerner.no), June 09, 1995
58 */
59
60 #include <asm/segment.h>
61
62 #include <linux/malloc.h>
63 #include <linux/sched.h>
64 #include <linux/kernel.h>
65 #include <linux/errno.h>
66 #include <linux/stat.h>
67 #include <linux/fcntl.h>
68
69 #define OFFSET_MAX ((off_t)0x7fffffff) /* FIXME: move elsewhere? */
70
71 static int flock_make_lock(struct file *filp, struct file_lock *fl,
72 unsigned int cmd);
73 static int posix_make_lock(struct file *filp, struct file_lock *fl,
74 struct flock *l);
75 static int flock_locks_conflict(struct file_lock *caller_fl,
76 struct file_lock *sys_fl);
77 static int posix_locks_conflict(struct file_lock *caller_fl,
78 struct file_lock *sys_fl);
79 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
80 static int flock_lock_file(struct file *filp, struct file_lock *caller,
81 unsigned int wait);
82 static int posix_lock_file(struct file *filp, struct file_lock *caller,
83 unsigned int wait);
84 static int posix_locks_deadlock(struct task_struct *my_task,
85 struct task_struct *blocked_task);
86 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2);
87
88 static struct file_lock *locks_alloc_lock(struct file_lock *fl);
89 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
90 static void locks_delete_lock(struct file_lock **fl, unsigned int wait);
91 static void locks_insert_block(struct file_lock **block, struct file_lock *fl);
92
93 static struct file_lock *file_lock_table = NULL;
94
95 /* flock() system call entry point. Apply a FLOCK style locks to
96 * an open file descriptor.
97 */
98 asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
99 {
100 struct file_lock file_lock;
101 struct file *filp;
102
103 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
104 return (-EBADF);
105
106 if (!flock_make_lock(filp, &file_lock, cmd))
107 return (-EINVAL);
108
109 if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3))
110 return (-EBADF);
111
112 return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
113 }
114
115 /* Report the first existing locks that would conflict with l. This implements
116 * the F_GETLK command of fcntl().
117 */
118 int fcntl_getlk(unsigned int fd, struct flock *l)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
119 {
120 int error;
121 struct flock flock;
122 struct file *filp;
123 struct file_lock *fl,file_lock;
124
125 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
126 return (-EBADF);
127 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
128 if (error)
129 return (error);
130
131 memcpy_fromfs(&flock, l, sizeof(flock));
132 if ((flock.l_type == F_UNLCK) || (flock.l_type == F_EXLCK) ||
133 (flock.l_type == F_SHLCK))
134 return (-EINVAL);
135
136 if (!posix_make_lock(filp, &file_lock, &flock))
137 return (-EINVAL);
138
139 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
140 if (posix_locks_conflict(&file_lock, fl)) {
141 flock.l_pid = fl->fl_owner->pid;
142 flock.l_start = fl->fl_start;
143 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
144 fl->fl_end - fl->fl_start + 1;
145 flock.l_whence = 0;
146 flock.l_type = fl->fl_type;
147 memcpy_tofs(l, &flock, sizeof(flock));
148 return (0);
149 }
150 }
151
152 flock.l_type = F_UNLCK; /* no conflict found */
153 memcpy_tofs(l, &flock, sizeof(flock));
154 return (0);
155 }
156
157 /* Apply the lock described by l to an open file descriptor. This implements
158 * both the F_SETLK and F_SETLKW commands of fcntl(). It also emulates flock()
159 * in a pretty broken way for older C libraries.
160 */
161 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
162 {
163 int error;
164 struct file *filp;
165 struct file_lock file_lock;
166 struct flock flock;
167
168 /*
169 * Get arguments and validate them ...
170 */
171
172 if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd]))
173 return (-EBADF);
174
175 error = verify_area(VERIFY_READ, l, sizeof(*l));
176 if (error)
177 return (error);
178
179 memcpy_fromfs(&flock, l, sizeof(flock));
180 if (!posix_make_lock(filp, &file_lock, &flock))
181 return (-EINVAL);
182
183 switch (flock.l_type) {
184 case F_RDLCK :
185 if (!(filp->f_mode & 1))
186 return -EBADF;
187 break;
188 case F_WRLCK :
189 if (!(filp->f_mode & 2))
190 return -EBADF;
191 break;
192 case F_SHLCK :
193 case F_EXLCK :
194 if (!(filp->f_mode & 3))
195 return -EBADF;
196 break;
197 case F_UNLCK :
198 break;
199 }
200
201 return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW));
202 }
203
204 /* This function is called when the file is closed.
205 */
206 void locks_remove_locks(struct task_struct *task, struct file *filp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
207 {
208 struct file_lock *fl;
209 struct file_lock **before;
210
211 /* For POSIX locks we free all locks on this file for the given task.
212 * For FLOCK we only free locks on this *open* file if it is the last
213 * close on that file.
214 */
215 before = &filp->f_inode->i_flock;
216 while ((fl = *before) != NULL) {
217 if (((fl->fl_flags == F_POSIX) && (fl->fl_owner == task)) ||
218 ((fl->fl_flags == F_FLOCK) && (fl->fl_file == filp) &&
219 (filp->f_count == 1)))
220 locks_delete_lock(before, 0);
221 else
222 before = &fl->fl_next;
223 }
224
225 return;
226 }
227
228 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
229 * style lock.
230 */
231 static int posix_make_lock(struct file *filp, struct file_lock *fl,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
232 struct flock *l)
233 {
234 off_t start;
235
236 if (!filp->f_inode) /* just in case */
237 return (0);
238
239 switch (l->l_type) {
240 case F_RDLCK :
241 case F_WRLCK :
242 case F_UNLCK :
243 fl->fl_type = l->l_type;
244 break;
245 case F_SHLCK :
246 fl->fl_type = F_RDLCK;
247 break;
248 case F_EXLCK :
249 fl->fl_type = F_WRLCK;
250 break;
251 default :
252 return (0);
253 }
254
255 switch (l->l_whence) {
256 case 0 : /*SEEK_SET*/
257 start = 0;
258 break;
259 case 1 : /*SEEK_CUR*/
260 start = filp->f_pos;
261 break;
262 case 2 : /*SEEK_END*/
263 start = filp->f_inode->i_size;
264 break;
265 default :
266 return (0);
267 }
268
269 if (((start += l->l_start) < 0) || (l->l_len < 0))
270 return (0);
271 fl->fl_start = start; /* we record the absolute position */
272 if ((l->l_len == 0) || ((fl->fl_end = start + l->l_len - 1) < 0))
273 fl->fl_end = OFFSET_MAX;
274
275 fl->fl_flags = F_POSIX;
276 fl->fl_file = filp;
277 fl->fl_owner = current;
278 fl->fl_wait = NULL; /* just for cleanliness */
279
280 return (1);
281 }
282
283 /* Verify a call to flock() and fill in a file_lock structure with an appropriate
284 * FLOCK lock.
285 */
286 static int flock_make_lock(struct file *filp, struct file_lock *fl,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
287 unsigned int cmd)
288 {
289 if (!filp->f_inode) /* just in case */
290 return (0);
291
292 switch (cmd & ~LOCK_NB) {
293 case LOCK_SH :
294 fl->fl_type = F_RDLCK;
295 break;
296 case LOCK_EX :
297 fl->fl_type = F_WRLCK;
298 break;
299 case LOCK_UN :
300 fl->fl_type = F_UNLCK;
301 break;
302 default :
303 return (0);
304 }
305
306 fl->fl_flags = F_FLOCK;
307 fl->fl_start = 0;
308 fl->fl_end = OFFSET_MAX;
309 fl->fl_file = filp;
310 fl->fl_owner = current;
311 fl->fl_wait = NULL; /* just for cleanliness */
312
313 return (1);
314 }
315
316 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific checking
317 * before calling the locks_conflict().
318 */
319 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
320 {
321 /* POSIX locks owned by the same process do not conflict with
322 * each other.
323 */
324 if ((sys_fl->fl_flags == F_POSIX) &&
325 (caller_fl->fl_owner == sys_fl->fl_owner))
326 return (0);
327
328 return (locks_conflict(caller_fl, sys_fl));
329 }
330
331 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific checking
332 * before calling the locks_conflict().
333 */
334 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
335 {
336 /* FLOCK locks referring to the same filp do not conflict with
337 * each other.
338 */
339 if ((sys_fl->fl_flags == F_FLOCK) &&
340 (caller_fl->fl_file == sys_fl->fl_file))
341 return (0);
342
343 return (locks_conflict(caller_fl, sys_fl));
344 }
345
346 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
347 * checks for overlapping locks and shared/exclusive status.
348 */
349 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
350 {
351 if (!locks_overlap(caller_fl, sys_fl))
352 return (0);
353
354 switch (caller_fl->fl_type) {
355 case F_RDLCK :
356 return (sys_fl->fl_type == F_WRLCK);
357
358 case F_WRLCK :
359 return (1);
360
361 default:
362 printk("locks_conflict(): impossible lock type - %d\n",
363 caller_fl->fl_type);
364 break;
365 }
366 return (0); /* This should never happen */
367 }
368
369 /* Check if two locks overlap each other.
370 */
371 static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
372 {
373 return ((fl1->fl_end >= fl2->fl_start) &&
374 (fl2->fl_end >= fl1->fl_start));
375 }
376
377 /* This function tests for deadlock condition before putting a process to sleep.
378 * The detection scheme is recursive... we may need a test to make it exit if the
379 * function gets stuck due to bad lock data. 4.4 BSD uses a maximum depth of 50
380 * for this.
381 */
382 static int posix_locks_deadlock(struct task_struct *my_task,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
383 struct task_struct *blocked_task)
384 {
385 struct wait_queue *dlock_wait;
386 struct file_lock *fl;
387
388 for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
389 if (fl->fl_owner == NULL)
390 continue; /* Should never happen! */
391 if (fl->fl_owner != my_task)
392 continue;
393 if (fl->fl_wait == NULL)
394 continue; /* no queues */
395 dlock_wait = fl->fl_wait;
396 do {
397 if (dlock_wait->task != NULL) {
398 if (dlock_wait->task == blocked_task)
399 return (-EDEADLOCK);
400 if (posix_locks_deadlock(dlock_wait->task, blocked_task))
401 return (-EDEADLOCK);
402 }
403 dlock_wait = dlock_wait->next;
404 } while (dlock_wait != fl->fl_wait);
405 }
406 return (0);
407 }
408
409 /* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sorting
410 * first in an inode's lock list, and always insert new locks at the head
411 * of the list.
412 */
413 static int flock_lock_file(struct file *filp, struct file_lock *caller,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
414 unsigned int wait)
415 {
416 struct file_lock *fl;
417 struct file_lock *new_fl;
418 struct file_lock **before;
419 int change = 0;
420
421 /* This a compact little algorithm based on us always placing FLOCK
422 * locks at the front of the list.
423 */
424 before = &filp->f_inode->i_flock;
425 while ((fl = *before) && (fl->fl_flags == F_FLOCK)) {
426 if (caller->fl_file == fl->fl_file) {
427 if (caller->fl_type == fl->fl_type)
428 return (0);
429 change = 1;
430 break;
431 }
432 before = &fl->fl_next;
433 }
434 /* change means that we are changing the type of an existing lock, or
435 * or else unlocking it.
436 */
437 if (change)
438 locks_delete_lock(before, caller->fl_type != F_UNLCK);
439 if (caller->fl_type == F_UNLCK)
440 return (0);
441 if ((new_fl = locks_alloc_lock(caller)) == NULL)
442 return (-ENOLCK);
443 repeat:
444 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
445 if (!flock_locks_conflict(new_fl, fl))
446 continue;
447
448 if (wait) {
449 if (current->signal & ~current->blocked) {
450 locks_delete_lock(&new_fl, 0);
451 return (-ERESTARTSYS);
452 }
453 locks_insert_block(&fl->fl_block, new_fl);
454 interruptible_sleep_on(&new_fl->fl_wait);
455 wake_up(&new_fl->fl_wait);
456 if (current->signal & ~current->blocked) {
457 locks_delete_lock(&new_fl, 0);
458 return (-ERESTARTSYS);
459 }
460 goto repeat;
461 }
462 locks_delete_lock(&new_fl, 0);
463 return (-EAGAIN);
464 }
465 locks_insert_lock(&filp->f_inode->i_flock, new_fl);
466 return (0);
467 }
468
469 /* Add a POSIX style lock to a file.
470 * We merge adjacent locks whenever possible. POSIX locks come after FLOCK
471 * locks in the list and are sorted by owner task, then by starting address
472 *
473 * Kai Petzke writes:
474 * To make freeing a lock much faster, we keep a pointer to the lock before the
475 * actual one. But the real gain of the new coding was, that lock_it() and
476 * unlock_it() became one function.
477 *
478 * To all purists: Yes, I use a few goto's. Just pass on to the next function.
479 */
480
481 static int posix_lock_file(struct file *filp, struct file_lock *caller,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
482 unsigned int wait)
483 {
484 struct file_lock *fl;
485 struct file_lock *new_fl;
486 struct file_lock *left = NULL;
487 struct file_lock *right = NULL;
488 struct file_lock **before;
489 int added = 0;
490
491 if (caller->fl_type != F_UNLCK) {
492 repeat:
493 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
494 if (!posix_locks_conflict(caller, fl))
495 continue;
496 if (wait) {
497 if (current->signal & ~current->blocked)
498 return (-ERESTARTSYS);
499 if (fl->fl_flags == F_POSIX)
500 if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
501 return (-EDEADLOCK);
502 interruptible_sleep_on(&fl->fl_wait);
503 if (current->signal & ~current->blocked)
504 return (-ERESTARTSYS);
505 goto repeat;
506 }
507 return (-EAGAIN);
508 }
509 }
510 /*
511 * Find the first old lock with the same owner as the new lock.
512 */
513
514 before = &filp->f_inode->i_flock;
515
516 /* First skip FLOCK locks and locks owned by other processes.
517 */
518 while ((fl = *before) && ((fl->fl_flags == F_FLOCK) ||
519 (caller->fl_owner != fl->fl_owner)))
520 before = &fl->fl_next;
521
522 /* Process locks with this owner.
523 */
524 while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
525 /* Detect adjacent or overlapping regions (if same lock type)
526 */
527 if (caller->fl_type == fl->fl_type) {
528 if (fl->fl_end < caller->fl_start - 1)
529 goto next_lock;
530 /* If the next lock in the list has entirely bigger
531 * addresses than the new one, insert the lock here.
532 */
533 if (fl->fl_start > caller->fl_end + 1)
534 break;
535
536 /* If we come here, the new and old lock are of the
537 * same type and adjacent or overlapping. Make one
538 * lock yielding from the lower start address of both
539 * locks to the higher end address.
540 */
541 if (fl->fl_start > caller->fl_start)
542 fl->fl_start = caller->fl_start;
543 else
544 caller->fl_start = fl->fl_start;
545 if (fl->fl_end < caller->fl_end)
546 fl->fl_end = caller->fl_end;
547 else
548 caller->fl_end = fl->fl_end;
549 if (added) {
550 locks_delete_lock(before, 0);
551 continue;
552 }
553 caller = fl;
554 added = 1;
555 goto next_lock;
556 }
557 /* Processing for different lock types is a bit more complex.
558 */
559 if (fl->fl_end < caller->fl_start)
560 goto next_lock;
561 if (fl->fl_start > caller->fl_end)
562 break;
563 if (caller->fl_type == F_UNLCK)
564 added = 1;
565 if (fl->fl_start < caller->fl_start)
566 left = fl;
567 /* If the next lock in the list has a higher end address than
568 * the new one, insert the new one here.
569 */
570 if (fl->fl_end > caller->fl_end) {
571 right = fl;
572 break;
573 }
574 if (fl->fl_start >= caller->fl_start) {
575 /* The new lock completely replaces an old one (This may
576 * happen several times).
577 */
578 if (added) {
579 locks_delete_lock(before, 0);
580 continue;
581 }
582 /* Replace the old lock with the new one. Wake up
583 * anybody waiting for the old one, as the change in
584 * lock type might satisfy his needs.
585 */
586 wake_up(&fl->fl_wait);
587 fl->fl_start = caller->fl_start;
588 fl->fl_end = caller->fl_end;
589 fl->fl_type = caller->fl_type;
590 caller = fl;
591 added = 1;
592 }
593 /* Go on to next lock.
594 */
595 next_lock:
596 before = &(*before)->fl_next;
597 }
598
599 if (!added) {
600 if (caller->fl_type == F_UNLCK)
601 return (0);
602 if ((new_fl = locks_alloc_lock(caller)) == NULL)
603 return (-ENOLCK);
604 locks_insert_lock(before, new_fl);
605
606 }
607 if (right) {
608 if (left == right) {
609 /* The new lock breaks the old one in two pieces, so we
610 * have to allocate one more lock (in this case, even
611 * F_UNLCK may fail!).
612 */
613 if ((left = locks_alloc_lock(right)) == NULL) {
614 if (!added)
615 locks_delete_lock(before, 0);
616 return (-ENOLCK);
617 }
618 locks_insert_lock(before, left);
619 }
620 right->fl_start = caller->fl_end + 1;
621 }
622 if (left)
623 left->fl_end = caller->fl_start - 1;
624 return (0);
625 }
626
627 /* Allocate memory for a new lock and initialize its fields from
628 * fl. The lock is not inserted into any lists until locks_insert_lock()
629 * or locks_insert_block() are called.
630 */
631
632 static struct file_lock *locks_alloc_lock(struct file_lock *fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
633 {
634 struct file_lock *tmp;
635
636 /* Okay, let's make a new file_lock structure... */
637 if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
638 GFP_KERNEL)) == NULL)
639 return (tmp);
640
641 tmp->fl_nextlink = NULL;
642 tmp->fl_prevlink = NULL;
643 tmp->fl_next = NULL;
644 tmp->fl_block = NULL;
645 tmp->fl_flags = fl->fl_flags;
646 tmp->fl_owner = fl->fl_owner;
647 tmp->fl_file = fl->fl_file;
648 tmp->fl_wait = NULL;
649 tmp->fl_type = fl->fl_type;
650 tmp->fl_start = fl->fl_start;
651 tmp->fl_end = fl->fl_end;
652
653 return (tmp);
654 }
655
656 /* Insert file lock fl into an inode's lock list at the position indicated
657 * by pos. At the same time add the lock to the global file lock list.
658 */
659
660 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
661 {
662 fl->fl_nextlink = file_lock_table;
663 fl->fl_prevlink = NULL;
664 if (file_lock_table != NULL)
665 file_lock_table->fl_prevlink = fl;
666 file_lock_table = fl;
667 fl->fl_next = *pos; /* insert into file's list */
668 *pos = fl;
669
670 return;
671 }
672
673 /* Delete a lock and free it.
674 * First remove our lock from the lock lists. Then remove all the blocked locks
675 * from our blocked list, waking up the processes that own them. If told to wait,
676 * then sleep on each of these lock's wait queues. Each blocked process will wake
677 * up and immediately wake up its own wait queue allowing us to be scheduled again.
678 * Lastly, wake up our own wait queue before freeing the file_lock structure.
679 */
680
681 static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
682 {
683 struct file_lock *fl;
684 struct file_lock *bfl;
685
686 fl = *fl_p;
687 *fl_p = (*fl_p)->fl_next;
688
689 if (fl->fl_nextlink != NULL)
690 fl->fl_nextlink->fl_prevlink = fl->fl_prevlink;
691
692 if (fl->fl_prevlink != NULL)
693 fl->fl_prevlink->fl_nextlink = fl->fl_nextlink;
694 else
695 file_lock_table = fl->fl_nextlink;
696
697 while ((bfl = fl->fl_block) != NULL) {
698 fl->fl_block = bfl->fl_block;
699 bfl->fl_block = NULL;
700 wake_up(&bfl->fl_wait);
701 if (wait)
702 sleep_on(&bfl->fl_wait);
703 }
704
705 wake_up(&fl->fl_wait);
706 kfree(fl);
707
708 return;
709 }
710
711 /* Add lock fl to the blocked list pointed to by block.
712 * We search to the end of the existing list and insert the the new
713 * struct. This ensures processes will be woken up in the order they
714 * blocked.
715 * NOTE: nowhere does the documentation insist that processes be woken
716 * up in this order, but it seems like the reasonable thing to do.
717 * If the blocked list gets long then this search could get expensive,
718 * in which case we could consider waking the processes up in reverse
719 * order, or making the blocked list a doubly linked circular list.
720 */
721 static void locks_insert_block(struct file_lock **block, struct file_lock *fl)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
722 {
723 struct file_lock *bfl;
724
725 while ((bfl = *block) != NULL)
726 block = &bfl->fl_block;
727
728 *block = fl;
729 fl->fl_block = NULL;
730
731 return;
732 }
733