root/fs/locks.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fcntl_init_locks
  2. fcntl_getlk
  3. fcntl_setlk
  4. locks_deadlocked
  5. fcntl_remove_locks
  6. copy_flock
  7. conflict
  8. overlap
  9. lock_it
  10. alloc_lock
  11. free_lock

   1 /*
   2  *  linux/fs/locks.c
   3  *
   4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5  *  Doug Evans, 92Aug07, dje@sspiff.uucp.
   6  *
   7  *  Deadlock Detection added by Kelly Carmichael, kelly@[142.24.8.65]
   8  *  September 17, 1994.
   9  *
  10  * FIXME: one thing isn't handled yet:
  11  *      - mandatory locks (requires lots of changes elsewhere)
  12  *
  13  *  Edited by Kai Petzke, wpp@marie.physik.tu-berlin.de
  14  */
  15 #define DEADLOCK_DETECTION
  16 
  17 #include <asm/segment.h>
  18 
  19 #include <linux/sched.h>
  20 #include <linux/kernel.h>
  21 #include <linux/errno.h>
  22 #include <linux/stat.h>
  23 #include <linux/fcntl.h>
  24 
  25 #define OFFSET_MAX      ((off_t)0x7fffffff)     /* FIXME: move elsewhere? */
  26 
  27 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
  28                       unsigned int fd);
  29 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
  30 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
  31 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd);
  32 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl,
  33                                     unsigned int fd);
  34 static void free_lock(struct file_lock **fl);
  35 #ifdef DEADLOCK_DETECTION
  36 int locks_deadlocked(int my_pid,int blocked_pid);
  37 #endif
  38 
  39 static struct file_lock file_lock_table[NR_FILE_LOCKS];
  40 static struct file_lock *file_lock_free_list;
  41 
  42 /*
  43  * Called at boot time to initialize the lock table ...
  44  */
  45 
  46 void fcntl_init_locks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  47 {
  48         struct file_lock *fl;
  49 
  50         for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
  51                 fl->fl_next = fl + 1;
  52                 fl->fl_owner = NULL;
  53         }
  54         file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
  55         file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
  56         file_lock_free_list = &file_lock_table[0];
  57 }
  58 
  59 int fcntl_getlk(unsigned int fd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         int error;
  62         struct flock flock;
  63         struct file *filp;
  64         struct file_lock *fl,file_lock;
  65 
  66         if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
  67                 return -EBADF;
  68         error = verify_area(VERIFY_WRITE,l, sizeof(*l));
  69         if (error)
  70                 return error;
  71         memcpy_fromfs(&flock, l, sizeof(flock));
  72         if (flock.l_type == F_UNLCK)
  73                 return -EINVAL;
  74         if (!copy_flock(filp, &file_lock, &flock, fd))
  75                 return -EINVAL;
  76 
  77         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
  78                 if (conflict(&file_lock, fl)) {
  79                         flock.l_pid = fl->fl_owner->pid;
  80                         flock.l_start = fl->fl_start;
  81                         flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
  82                                 fl->fl_end - fl->fl_start + 1;
  83                         flock.l_whence = fl->fl_whence;
  84                         flock.l_type = fl->fl_type;
  85                         memcpy_tofs(l, &flock, sizeof(flock));
  86                         return 0;
  87                 }
  88         }
  89 
  90         flock.l_type = F_UNLCK;                 /* no conflict found */
  91         memcpy_tofs(l, &flock, sizeof(flock));
  92         return 0;
  93 }
  94 
  95 /*
  96  * This function implements both F_SETLK and F_SETLKW.
  97  */
  98 
  99 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
 100 {
 101         int error;
 102         struct file *filp;
 103         struct file_lock *fl,file_lock;
 104         struct flock flock;
 105 
 106         /*
 107          * Get arguments and validate them ...
 108          */
 109 
 110         if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
 111                 return -EBADF;
 112         error = verify_area(VERIFY_READ, l, sizeof(*l));
 113         if (error)
 114                 return error;
 115         memcpy_fromfs(&flock, l, sizeof(flock));
 116         if (!copy_flock(filp, &file_lock, &flock, fd))
 117                 return -EINVAL;
 118         switch (file_lock.fl_type) {
 119         case F_RDLCK :
 120                 if (!(filp->f_mode & 1))
 121                         return -EBADF;
 122                 break;
 123         case F_WRLCK :
 124                 if (!(filp->f_mode & 2))
 125                         return -EBADF;
 126                 break;
 127         case F_SHLCK :
 128                 if (!(filp->f_mode & 3))
 129                         return -EBADF;
 130                 file_lock.fl_type = F_RDLCK;
 131                 break;
 132         case F_EXLCK :
 133                 if (!(filp->f_mode & 3))
 134                         return -EBADF;
 135                 file_lock.fl_type = F_WRLCK;
 136                 break;
 137         case F_UNLCK :
 138                 break;
 139         }
 140 
 141         /*
 142          * Scan for a conflicting lock ...
 143          */
 144   
 145         if (file_lock.fl_type != F_UNLCK) {
 146 repeat:
 147                 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 148                         if (!conflict(&file_lock, fl))
 149                                 continue;
 150                         /*
 151                          * File is locked by another process. If this is
 152                          * F_SETLKW wait for the lock to be released.
 153                          */
 154                         if (cmd == F_SETLKW) {
 155                                 if (current->signal & ~current->blocked)
 156                                         return -ERESTARTSYS;
 157 #ifdef DEADLOCK_DETECTION
 158                                 if (locks_deadlocked(file_lock.fl_owner->pid,fl->fl_owner->pid)) return -EDEADLOCK;
 159 #endif
 160                                 interruptible_sleep_on(&fl->fl_wait);
 161                                 if (current->signal & ~current->blocked)
 162                                         return -ERESTARTSYS;
 163                                 goto repeat;
 164                         }
 165                         return -EAGAIN;
 166                 }
 167         }
 168 
 169         /*
 170          * Lock doesn't conflict with any other lock ...
 171          */
 172 
 173         return lock_it(filp, &file_lock, fd);
 174 }
 175 
 176 #ifdef DEADLOCK_DETECTION
 177 /*
 178  * This function tests for deadlock condition before putting a process to sleep
 179  * this detection scheme is recursive... we may need some test as to make it
 180  * exit if the function gets stuck due to bad lock data.
 181  */
 182 
 183 int locks_deadlocked(int my_pid,int blocked_pid)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185         int ret_val;
 186         struct wait_queue *dlock_wait;
 187         struct file_lock *fl;
 188         for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
 189                 if (fl->fl_owner == NULL) continue;     /* not a used lock */
 190                 if (fl->fl_owner->pid != my_pid) continue;
 191                 if (fl->fl_wait == NULL) continue;      /* no queues */
 192                 dlock_wait = fl->fl_wait;
 193                 do {
 194                         if (dlock_wait->task != NULL) {
 195                                 if (dlock_wait->task->pid == blocked_pid) return -EDEADLOCK;
 196                                 ret_val = locks_deadlocked(dlock_wait->task->pid,blocked_pid);
 197                                 if (ret_val) return -EDEADLOCK;
 198                         }
 199                         dlock_wait = dlock_wait->next;
 200                 } while (dlock_wait != fl->fl_wait);
 201         }
 202         return 0;
 203 }
 204 #endif
 205 
 206 /*
 207  * This function is called when the file is closed.
 208  */
 209 
 210 void fcntl_remove_locks(struct task_struct *task, struct file *filp,
     /* [previous][next][first][last][top][bottom][index][help] */
 211                         unsigned int fd)
 212 {
 213         struct file_lock *fl;
 214         struct file_lock **before;
 215 
 216         /* Find first lock owned by caller ... */
 217 
 218         before = &filp->f_inode->i_flock;
 219         while ((fl = *before) && (task != fl->fl_owner || fd != fl->fl_fd))
 220                 before = &fl->fl_next;
 221 
 222         /* The list is sorted by owner and fd ... */
 223 
 224         while ((fl = *before) && task == fl->fl_owner && fd == fl->fl_fd)
 225                 free_lock(before);
 226 }
 227 
 228 /*
 229  * Verify a "struct flock" and copy it to a "struct file_lock" ...
 230  * Result is a boolean indicating success.
 231  */
 232 
 233 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
     /* [previous][next][first][last][top][bottom][index][help] */
 234                       unsigned int fd)
 235 {
 236         off_t start;
 237 
 238         if (!filp->f_inode)     /* just in case */
 239                 return 0;
 240         if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
 241          && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
 242                 return 0;
 243         switch (l->l_whence) {
 244         case 0 /*SEEK_SET*/ : start = 0; break;
 245         case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
 246         case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
 247         default : return 0;
 248         }
 249         if ((start += l->l_start) < 0 || l->l_len < 0)
 250                 return 0;
 251         fl->fl_type = l->l_type;
 252         fl->fl_start = start;   /* we record the absolute position */
 253         fl->fl_whence = 0;      /* FIXME: do we record {l_start} as passed? */
 254         if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
 255                 fl->fl_end = OFFSET_MAX;
 256         fl->fl_owner = current;
 257         fl->fl_fd = fd;
 258         fl->fl_wait = NULL;             /* just for cleanliness */
 259         return 1;
 260 }
 261 
 262 /*
 263  * Determine if lock {sys_fl} blocks lock {caller_fl} ...
 264  */
 265 
 266 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268         if (   caller_fl->fl_owner == sys_fl->fl_owner
 269             && caller_fl->fl_fd == sys_fl->fl_fd)
 270                 return 0;
 271         if (!overlap(caller_fl, sys_fl))
 272                 return 0;
 273         switch (caller_fl->fl_type) {
 274         case F_RDLCK :
 275                 return sys_fl->fl_type != F_RDLCK;
 276         case F_WRLCK :
 277                 return 1;       /* overlapping region not owned by caller */
 278         }
 279         return 0;       /* shouldn't get here, but just in case */
 280 }
 281 
 282 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
     /* [previous][next][first][last][top][bottom][index][help] */
 283 {
 284         return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
 285 }
 286 
 287 /*
 288  * Add a lock to a file ...
 289  * Result is 0 for success or -ENOLCK.
 290  *
 291  * We merge adjacent locks whenever possible.
 292  *
 293  * WARNING: We assume the lock doesn't conflict with any other lock.
 294  */
 295   
 296 /*
 297  * Rewritten by Kai Petzke:
 298  * We sort the lock list first by owner, then by the starting address.
 299  *
 300  * To make freeing a lock much faster, we keep a pointer to the lock before the
 301  * actual one. But the real gain of the new coding was, that lock_it() and
 302  * unlock_it() became one function.
 303  *
 304  * To all purists: Yes, I use a few goto's. Just pass on to the next function.
 305  */
 306 
 307 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 308 {
 309         struct file_lock *fl;
 310         struct file_lock *left = 0;
 311         struct file_lock *right = 0;
 312         struct file_lock **before;
 313         int added = 0;
 314 
 315         /*
 316          * Find the first old lock with the same owner as the new lock.
 317          */
 318 
 319         before = &filp->f_inode->i_flock;
 320         while ((fl = *before) &&
 321             (caller->fl_owner != fl->fl_owner ||
 322              caller->fl_fd != fl->fl_fd))
 323                 before = &fl->fl_next;
 324 
 325         /*
 326          * Look up all locks of this owner.
 327          */
 328 
 329         while (   (fl = *before)
 330                && caller->fl_owner == fl->fl_owner
 331                && caller->fl_fd == fl->fl_fd) {
 332                 /*
 333                  * Detect adjacent or overlapping regions (if same lock type)
 334                  */
 335                 if (caller->fl_type == fl->fl_type) {
 336                         if (fl->fl_end < caller->fl_start - 1)
 337                                 goto next_lock;
 338                         /*
 339                          * If the next lock in the list has entirely bigger
 340                          * addresses than the new one, insert the lock here.
 341                          */
 342                         if (fl->fl_start > caller->fl_end + 1)
 343                                 break;
 344 
 345                         /*
 346                          * If we come here, the new and old lock are of the
 347                          * same type and adjacent or overlapping. Make one
 348                          * lock yielding from the lower start address of both
 349                          * locks to the higher end address.
 350                          */
 351                         if (fl->fl_start > caller->fl_start)
 352                                 fl->fl_start = caller->fl_start;
 353                         else
 354                                 caller->fl_start = fl->fl_start;
 355                         if (fl->fl_end < caller->fl_end)
 356                                 fl->fl_end = caller->fl_end;
 357                         else
 358                                 caller->fl_end = fl->fl_end;
 359                         if (added) {
 360                                 free_lock(before);
 361                                 continue;
 362                         }
 363                         caller = fl;
 364                         added = 1;
 365                         goto next_lock;
 366                 }
 367                 /*
 368                  * Processing for different lock types is a bit more complex.
 369                  */
 370                 if (fl->fl_end < caller->fl_start)
 371                         goto next_lock;
 372                 if (fl->fl_start > caller->fl_end)
 373                         break;
 374                 if (caller->fl_type == F_UNLCK)
 375                         added = 1;
 376                 if (fl->fl_start < caller->fl_start)
 377                         left = fl;
 378                 /*
 379                  * If the next lock in the list has a higher end address than
 380                  * the new one, insert the new one here.
 381                  */
 382                 if (fl->fl_end > caller->fl_end) {
 383                         right = fl;
 384                         break;
 385                 }
 386                 if (fl->fl_start >= caller->fl_start) {
 387                         /*
 388                          * The new lock completely replaces an old one (This may
 389                          * happen several times).
 390                          */
 391                         if (added) {
 392                                 free_lock(before);
 393                                 continue;
 394                         }
 395                         /*
 396                          * Replace the old lock with the new one. Wake up
 397                          * anybody waiting for the old one, as the change in
 398                          * lock type might satisfy his needs.
 399                          */
 400                         wake_up(&fl->fl_wait);
 401                         fl->fl_start = caller->fl_start;
 402                         fl->fl_end   = caller->fl_end;
 403                         fl->fl_type  = caller->fl_type;
 404                         caller = fl;
 405                         added = 1;
 406                 }
 407                 /*
 408                  * Go on to next lock.
 409                  */
 410 next_lock:
 411                 before = &(*before)->fl_next;
 412         }
 413 
 414         if (! added) {
 415                 if (caller->fl_type == F_UNLCK) {
 416 /*
 417  * XXX - under iBCS-2, attempting to unlock a not-locked region is 
 418  *      not considered an error condition, although I'm not sure if this 
 419  *      should be a default behavior (it makes porting to native Linux easy)
 420  *      or a personality option.
 421  *
 422  *      Does Xopen/1170 say anything about this?
 423  *      - drew@Colorado.EDU
 424  */
 425 #if 0
 426                         return -EINVAL;
 427 #else
 428                         return 0;
 429 #endif
 430                 }
 431                 if (! (caller = alloc_lock(before, caller, fd)))
 432                         return -ENOLCK;
 433         }
 434         if (right) {
 435                 if (left == right) {
 436                         /*
 437                          * The new lock breaks the old one in two pieces, so we
 438                          * have to allocate one more lock (in this case, even
 439                          * F_UNLCK may fail!).
 440                          */
 441                         if (! (left = alloc_lock(before, right, fd))) {
 442                                 if (! added)
 443                                         free_lock(before);
 444                                 return -ENOLCK;
 445                         }
 446                 }
 447                 right->fl_start = caller->fl_end + 1;
 448         }
 449         if (left)
 450                 left->fl_end = caller->fl_start - 1;
 451         return 0;
 452 }
 453 
 454 /*
 455  * File_lock() inserts a lock at the position pos of the linked list.
 456  */
 457 
 458 static struct file_lock *alloc_lock(struct file_lock **pos,
     /* [previous][next][first][last][top][bottom][index][help] */
 459                                     struct file_lock *fl,
 460                                     unsigned int     fd)
 461 {
 462         struct file_lock *tmp;
 463 
 464         tmp = file_lock_free_list;
 465         if (tmp == NULL)
 466                 return NULL;                    /* no available entry */
 467         if (tmp->fl_owner != NULL)
 468                 panic("alloc_lock: broken free list\n");
 469 
 470         /* remove from free list */
 471         file_lock_free_list = tmp->fl_next;
 472 
 473         *tmp = *fl;
 474 
 475         tmp->fl_next = *pos;    /* insert into file's list */
 476         *pos = tmp;
 477 
 478         tmp->fl_owner = current;        /* FIXME: needed? */
 479         tmp->fl_fd = fd;                /* FIXME: needed? */
 480         tmp->fl_wait = NULL;
 481         return tmp;
 482 }
 483 
 484 /*
 485  * Add a lock to the free list ...
 486  */
 487 
 488 static void free_lock(struct file_lock **fl_p)
     /* [previous][next][first][last][top][bottom][index][help] */
 489 {
 490         struct file_lock *fl;
 491 
 492         fl = *fl_p;
 493         if (fl->fl_owner == NULL)       /* sanity check */
 494                 panic("free_lock: broken lock list\n");
 495 
 496         *fl_p = (*fl_p)->fl_next;
 497 
 498         fl->fl_next = file_lock_free_list;      /* add to free list */
 499         file_lock_free_list = fl;
 500         fl->fl_owner = NULL;                    /* for sanity checks */
 501 
 502         wake_up(&fl->fl_wait);
 503 }

/* [previous][next][first][last][top][bottom][index][help] */