root/fs/locks.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fcntl_getlk
  2. fcntl_setlk
  3. locks_deadlocked
  4. fcntl_remove_locks
  5. copy_flock
  6. conflict
  7. overlap
  8. lock_it
  9. alloc_lock
  10. free_lock

   1 /*
   2  *  linux/fs/locks.c
   3  *
   4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5  *  Doug Evans, 92Aug07, dje@sspiff.uucp.
   6  *
   7  *  Deadlock Detection added by Kelly Carmichael, kelly@[142.24.8.65]
   8  *  September 17, 1994.
   9  *
  10  *  FIXME: one thing isn't handled yet:
  11  *      - mandatory locks (requires lots of changes elsewhere)
  12  *
  13  *  Edited by Kai Petzke, wpp@marie.physik.tu-berlin.de
  14  *
  15  *  Converted file_lock_table to a linked list from an array, which eliminates
  16  *  the limits on how many active file locks are open - Chad Page
  17  *  (pageone@netcom.com), November 27, 1994 
  18  * 
  19  *  Removed dependency on file descriptors. dup()'ed file descriptors now
  20  *  get the same locks as the original file descriptors, and a close() on
  21  *  any file descriptor removes ALL the locks on the file for the current
  22  *  process. Since locks still depend on the process id, locks are inherited
  23  *  after an exec() but not after a fork(). This agrees with POSIX, and both
  24  *  BSD and SVR4 practice.
  25  *  Andy Walker (andy@keo.kvaerner.no), February 14, 1995
  26  *
  27  *  Scrapped free list which is redundant now that we allocate locks
  28  *  dynamically with kmalloc()/kfree().
  29  *  Andy Walker (andy@keo.kvaerner.no), February 21, 1995
  30  *
  31  */
  32 
  33 #define DEADLOCK_DETECTION
  34 
  35 #include <asm/segment.h>
  36 
  37 #include <linux/malloc.h>
  38 #include <linux/sched.h>
  39 #include <linux/kernel.h>
  40 #include <linux/errno.h>
  41 #include <linux/stat.h>
  42 #include <linux/fcntl.h>
  43 
  44 #define OFFSET_MAX      ((off_t)0x7fffffff)     /* FIXME: move elsewhere? */
  45 
  46 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
  47 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
  48 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
  49 static int lock_it(struct file *filp, struct file_lock *caller);
  50 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl);
  51 static void free_lock(struct file_lock **fl);
  52 #ifdef DEADLOCK_DETECTION
  53 int locks_deadlocked(int my_pid,int blocked_pid);
  54 #endif
  55 
  56 static struct file_lock *file_lock_table = NULL;
  57 
  58 int fcntl_getlk(unsigned int fd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         int error;
  61         struct flock flock;
  62         struct file *filp;
  63         struct file_lock *fl,file_lock;
  64 
  65         if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
  66                 return -EBADF;
  67         error = verify_area(VERIFY_WRITE,l, sizeof(*l));
  68         if (error)
  69                 return error;
  70         memcpy_fromfs(&flock, l, sizeof(flock));
  71         if (flock.l_type == F_UNLCK)
  72                 return -EINVAL;
  73         if (!copy_flock(filp, &file_lock, &flock))
  74                 return -EINVAL;
  75 
  76         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
  77                 if (conflict(&file_lock, fl)) {
  78                         flock.l_pid = fl->fl_owner->pid;
  79                         flock.l_start = fl->fl_start;
  80                         flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
  81                                 fl->fl_end - fl->fl_start + 1;
  82                         flock.l_whence = fl->fl_whence;
  83                         flock.l_type = fl->fl_type;
  84                         memcpy_tofs(l, &flock, sizeof(flock));
  85                         return 0;
  86                 }
  87         }
  88 
  89         flock.l_type = F_UNLCK;                 /* no conflict found */
  90         memcpy_tofs(l, &flock, sizeof(flock));
  91         return 0;
  92 }
  93 
  94 /*
  95  * This function implements both F_SETLK and F_SETLKW.
  96  */
  97 
  98 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  99 {
 100         int error;
 101         struct file *filp;
 102         struct file_lock *fl,file_lock;
 103         struct flock flock;
 104 
 105         /*
 106          * Get arguments and validate them ...
 107          */
 108 
 109         if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
 110                 return -EBADF;
 111         error = verify_area(VERIFY_READ, l, sizeof(*l));
 112         if (error)
 113                 return error;
 114         memcpy_fromfs(&flock, l, sizeof(flock));
 115         if (!copy_flock(filp, &file_lock, &flock))
 116                 return -EINVAL;
 117         switch (file_lock.fl_type) {
 118         case F_RDLCK :
 119                 if (!(filp->f_mode & 1))
 120                         return -EBADF;
 121                 break;
 122         case F_WRLCK :
 123                 if (!(filp->f_mode & 2))
 124                         return -EBADF;
 125                 break;
 126         case F_SHLCK :
 127                 if (!(filp->f_mode & 3))
 128                         return -EBADF;
 129                 file_lock.fl_type = F_RDLCK;
 130                 break;
 131         case F_EXLCK :
 132                 if (!(filp->f_mode & 3))
 133                         return -EBADF;
 134                 file_lock.fl_type = F_WRLCK;
 135                 break;
 136         case F_UNLCK :
 137                 break;
 138         }
 139 
 140         /*
 141          * Scan for a conflicting lock ...
 142          */
 143   
 144         if (file_lock.fl_type != F_UNLCK) {
 145 repeat:
 146                 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 147                         if (!conflict(&file_lock, fl))
 148                                 continue;
 149                         /*
 150                          * File is locked by another process. If this is
 151                          * F_SETLKW wait for the lock to be released.
 152                          */
 153                         if (cmd == F_SETLKW) {
 154                                 if (current->signal & ~current->blocked)
 155                                         return -ERESTARTSYS;
 156 #ifdef DEADLOCK_DETECTION
 157                                 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid))
 158                                         return -EDEADLOCK;
 159 #endif
 160                                 interruptible_sleep_on(&fl->fl_wait);
 161                                 if (current->signal & ~current->blocked)
 162                                         return -ERESTARTSYS;
 163                                 goto repeat;
 164                         }
 165                         return -EAGAIN;
 166                 }
 167         }
 168 
 169         /*
 170          * Lock doesn't conflict with any other lock ...
 171          */
 172 
 173         return lock_it(filp, &file_lock);
 174 }
 175 
 176 #ifdef DEADLOCK_DETECTION
 177 /*
 178  * This function tests for deadlock condition before putting a process to sleep
 179  * this detection scheme is recursive... we may need some test as to make it
 180  * exit if the function gets stuck due to bad lock data.
 181  */
 182 
 183 int locks_deadlocked(int my_pid,int blocked_pid)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185         int ret_val;
 186         struct wait_queue *dlock_wait;
 187         struct file_lock *fl;
 188         for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
 189                 if (fl->fl_owner == NULL) continue;     /* not a used lock */
 190                 if (fl->fl_owner->pid != my_pid) continue;
 191                 if (fl->fl_wait == NULL) continue;      /* no queues */
 192                 dlock_wait = fl->fl_wait;
 193                 do {
 194                         if (dlock_wait->task != NULL) {
 195                                 if (dlock_wait->task->pid == blocked_pid)
 196                                         return -EDEADLOCK;
 197                                 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
 198                                 if (ret_val)
 199                                         return -EDEADLOCK;
 200                         }
 201                         dlock_wait = dlock_wait->next;
 202                 } while (dlock_wait != fl->fl_wait);
 203         }
 204         return 0;
 205 }
 206 #endif
 207 
 208 /*
 209  * This function is called when the file is closed.
 210  */
 211 
 212 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         struct file_lock *fl;
 215         struct file_lock **before;
 216 
 217         /* Find first lock owned by caller ... */
 218 
 219         before = &filp->f_inode->i_flock;
 220         while ((fl = *before) && task != fl->fl_owner)
 221                 before = &fl->fl_next;
 222 
 223         /* The list is sorted by owner and fd ... */
 224 
 225         while ((fl = *before) && task == fl->fl_owner)
 226                 free_lock(before);
 227 }
 228 
 229 /*
 230  * Verify a "struct flock" and copy it to a "struct file_lock" ...
 231  * Result is a boolean indicating success.
 232  */
 233 
 234 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         off_t start;
 237 
 238         if (!filp->f_inode)     /* just in case */
 239                 return 0;
 240         if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
 241          && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
 242                 return 0;
 243         switch (l->l_whence) {
 244         case 0 /*SEEK_SET*/ : start = 0; break;
 245         case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
 246         case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
 247         default : return 0;
 248         }
 249         if ((start += l->l_start) < 0 || l->l_len < 0)
 250                 return 0;
 251         fl->fl_type = l->l_type;
 252         fl->fl_start = start;   /* we record the absolute position */
 253         fl->fl_whence = 0;      /* FIXME: do we record {l_start} as passed? */
 254         if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
 255                 fl->fl_end = OFFSET_MAX;
 256         fl->fl_owner = current;
 257         fl->fl_wait = NULL;             /* just for cleanliness */
 258         return 1;
 259 }
 260 
 261 /*
 262  * Determine if lock {sys_fl} blocks lock {caller_fl} ...
 263  */
 264 
 265 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         if (caller_fl->fl_owner == sys_fl->fl_owner)
 268                 return 0;
 269         if (!overlap(caller_fl, sys_fl))
 270                 return 0;
 271         switch (caller_fl->fl_type) {
 272         case F_RDLCK :
 273                 return sys_fl->fl_type != F_RDLCK;
 274         case F_WRLCK :
 275                 return 1;       /* overlapping region not owned by caller */
 276         }
 277         return 0;       /* shouldn't get here, but just in case */
 278 }
 279 
 280 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
 283 }
 284 
 285 /*
 286  * Add a lock to a file ...
 287  * Result is 0 for success or -ENOLCK.
 288  *
 289  * We merge adjacent locks whenever possible.
 290  *
 291  * WARNING: We assume the lock doesn't conflict with any other lock.
 292  */
 293   
 294 /*
 295  * Rewritten by Kai Petzke:
 296  * We sort the lock list first by owner, then by the starting address.
 297  *
 298  * To make freeing a lock much faster, we keep a pointer to the lock before the
 299  * actual one. But the real gain of the new coding was, that lock_it() and
 300  * unlock_it() became one function.
 301  *
 302  * To all purists: Yes, I use a few goto's. Just pass on to the next function.
 303  */
 304 
 305 static int lock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         struct file_lock *fl;
 308         struct file_lock *left = 0;
 309         struct file_lock *right = 0;
 310         struct file_lock **before;
 311         int added = 0;
 312 
 313         /*
 314          * Find the first old lock with the same owner as the new lock.
 315          */
 316 
 317         before = &filp->f_inode->i_flock;
 318         while ((fl = *before) && caller->fl_owner != fl->fl_owner)
 319                 before = &fl->fl_next;
 320 
 321         /*
 322          * Look up all locks of this owner.
 323          */
 324 
 325         while ((fl = *before) && caller->fl_owner == fl->fl_owner) {
 326                 /*
 327                  * Detect adjacent or overlapping regions (if same lock type)
 328                  */
 329                 if (caller->fl_type == fl->fl_type) {
 330                         if (fl->fl_end < caller->fl_start - 1)
 331                                 goto next_lock;
 332                         /*
 333                          * If the next lock in the list has entirely bigger
 334                          * addresses than the new one, insert the lock here.
 335                          */
 336                         if (fl->fl_start > caller->fl_end + 1)
 337                                 break;
 338 
 339                         /*
 340                          * If we come here, the new and old lock are of the
 341                          * same type and adjacent or overlapping. Make one
 342                          * lock yielding from the lower start address of both
 343                          * locks to the higher end address.
 344                          */
 345                         if (fl->fl_start > caller->fl_start)
 346                                 fl->fl_start = caller->fl_start;
 347                         else
 348                                 caller->fl_start = fl->fl_start;
 349                         if (fl->fl_end < caller->fl_end)
 350                                 fl->fl_end = caller->fl_end;
 351                         else
 352                                 caller->fl_end = fl->fl_end;
 353                         if (added) {
 354                                 free_lock(before);
 355                                 continue;
 356                         }
 357                         caller = fl;
 358                         added = 1;
 359                         goto next_lock;
 360                 }
 361                 /*
 362                  * Processing for different lock types is a bit more complex.
 363                  */
 364                 if (fl->fl_end < caller->fl_start)
 365                         goto next_lock;
 366                 if (fl->fl_start > caller->fl_end)
 367                         break;
 368                 if (caller->fl_type == F_UNLCK)
 369                         added = 1;
 370                 if (fl->fl_start < caller->fl_start)
 371                         left = fl;
 372                 /*
 373                  * If the next lock in the list has a higher end address than
 374                  * the new one, insert the new one here.
 375                  */
 376                 if (fl->fl_end > caller->fl_end) {
 377                         right = fl;
 378                         break;
 379                 }
 380                 if (fl->fl_start >= caller->fl_start) {
 381                         /*
 382                          * The new lock completely replaces an old one (This may
 383                          * happen several times).
 384                          */
 385                         if (added) {
 386                                 free_lock(before);
 387                                 continue;
 388                         }
 389                         /*
 390                          * Replace the old lock with the new one. Wake up
 391                          * anybody waiting for the old one, as the change in
 392                          * lock type might satisfy his needs.
 393                          */
 394                         wake_up(&fl->fl_wait);
 395                         fl->fl_start = caller->fl_start;
 396                         fl->fl_end   = caller->fl_end;
 397                         fl->fl_type  = caller->fl_type;
 398                         caller = fl;
 399                         added = 1;
 400                 }
 401                 /*
 402                  * Go on to next lock.
 403                  */
 404 next_lock:
 405                 before = &(*before)->fl_next;
 406         }
 407 
 408         if (! added) {
 409                 if (caller->fl_type == F_UNLCK) {
 410 /*
 411  * XXX - under iBCS-2, attempting to unlock a not-locked region is 
 412  *      not considered an error condition, although I'm not sure if this 
 413  *      should be a default behavior (it makes porting to native Linux easy)
 414  *      or a personality option.
 415  *
 416  *      Does Xopen/1170 say anything about this?
 417  *      - drew@Colorado.EDU
 418  */
 419 #if 0
 420                         return -EINVAL;
 421 #else
 422                         return 0;
 423 #endif
 424                 }
 425                 if (! (caller = alloc_lock(before, caller)))
 426                         return -ENOLCK;
 427         }
 428         if (right) {
 429                 if (left == right) {
 430                         /*
 431                          * The new lock breaks the old one in two pieces, so we
 432                          * have to allocate one more lock (in this case, even
 433                          * F_UNLCK may fail!).
 434                          */
 435                         if (! (left = alloc_lock(before, right))) {
 436                                 if (! added)
 437                                         free_lock(before);
 438                                 return -ENOLCK;
 439                         }
 440                 }
 441                 right->fl_start = caller->fl_end + 1;
 442         }
 443         if (left)
 444                 left->fl_end = caller->fl_start - 1;
 445         return 0;
 446 }
 447 
 448 /*
 449  * File_lock() inserts a lock at the position pos of the linked list.
 450  */
 451 static struct file_lock *alloc_lock(struct file_lock **pos,
     /* [previous][next][first][last][top][bottom][index][help] */
 452                                     struct file_lock *fl)
 453 {
 454         struct file_lock *tmp;
 455 
 456         /* Okay, let's make a new file_lock structure... */
 457         tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock), GFP_KERNEL);
 458         if (!tmp)
 459                 return tmp;
 460         tmp->fl_nextlink = file_lock_table;
 461         tmp->fl_prevlink = NULL;
 462         if (file_lock_table != NULL)
 463                 file_lock_table->fl_prevlink = tmp;
 464         file_lock_table = tmp;
 465 
 466         tmp->fl_next = *pos;    /* insert into file's list */
 467         *pos = tmp;
 468 
 469         tmp->fl_owner = current;
 470         tmp->fl_wait = NULL;
 471 
 472         tmp->fl_type = fl->fl_type;
 473         tmp->fl_whence = fl->fl_whence;
 474         tmp->fl_start = fl->fl_start;
 475         tmp->fl_end = fl->fl_end;
 476 
 477         return tmp;
 478 }
 479 
 480 /*
 481  * Free up a lock...
 482  */
 483 
 484 static void free_lock(struct file_lock **fl_p)
     /* [previous][next][first][last][top][bottom][index][help] */
 485 {
 486         struct file_lock *fl;
 487 
 488         fl = *fl_p;
 489         *fl_p = (*fl_p)->fl_next;
 490 
 491         if (fl->fl_nextlink != NULL)
 492                 fl->fl_nextlink->fl_prevlink = fl->fl_prevlink;
 493 
 494         if (fl->fl_prevlink != NULL)
 495                 fl->fl_prevlink->fl_nextlink = fl->fl_nextlink;
 496         else
 497                 file_lock_table = fl->fl_nextlink;
 498 
 499         wake_up(&fl->fl_wait);
 500 
 501         kfree(fl);
 502 
 503         return;
 504 }

/* [previous][next][first][last][top][bottom][index][help] */