root/fs/locks.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fcntl_init_locks
  2. fcntl_getlk
  3. fcntl_setlk
  4. fcntl_remove_locks
  5. copy_flock
  6. conflict
  7. overlap
  8. lock_it
  9. alloc_lock
  10. free_lock

   1 /*
   2  *  linux/fs/locks.c
   3  *
   4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5  *  Doug Evans, 92Aug07, dje@sspiff.uucp.
   6  *
   7  * FIXME: two things aren't handled yet:
   8  *      - deadlock detection/avoidance (of dubious merit, but since it's in
   9  *        the definition, I guess it should be provided eventually)
  10  *      - mandatory locks (requires lots of changes elsewhere)
  11  *
  12  *  Edited by Kai Petzke, wpp@marie.physik.tu-berlin.de
  13  */
  14 
  15 #include <asm/segment.h>
  16 
  17 #include <linux/sched.h>
  18 #include <linux/kernel.h>
  19 #include <linux/errno.h>
  20 #include <linux/stat.h>
  21 #include <linux/fcntl.h>
  22 
  23 #define OFFSET_MAX      ((off_t)0x7fffffff)     /* FIXME: move elsewhere? */
  24 
  25 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
  26 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
  27 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
  28 static int lock_it(struct file *filp, struct file_lock *caller);
  29 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl);
  30 static void free_lock(struct file_lock **fl);
  31 
  32 static struct file_lock file_lock_table[NR_FILE_LOCKS];
  33 static struct file_lock *file_lock_free_list;
  34 
  35 /*
  36  * Called at boot time to initialize the lock table ...
  37  */
  38 
  39 void fcntl_init_locks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  40 {
  41         struct file_lock *fl;
  42 
  43         for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
  44                 fl->fl_next = fl + 1;
  45                 fl->fl_owner = NULL;
  46         }
  47         file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
  48         file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
  49         file_lock_free_list = &file_lock_table[0];
  50 }
  51 
  52 int fcntl_getlk(unsigned int fd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         int error;
  55         struct flock flock;
  56         struct file *filp;
  57         struct file_lock *fl,file_lock;
  58 
  59         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  60                 return -EBADF;
  61         error = verify_area(VERIFY_WRITE,l, sizeof(*l));
  62         if (error)
  63                 return error;
  64         memcpy_fromfs(&flock, l, sizeof(flock));
  65         if (flock.l_type == F_UNLCK)
  66                 return -EINVAL;
  67         if (!copy_flock(filp, &file_lock, &flock))
  68                 return -EINVAL;
  69 
  70         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
  71                 if (conflict(&file_lock, fl)) {
  72                         flock.l_pid = fl->fl_owner->pid;
  73                         flock.l_start = fl->fl_start;
  74                         flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
  75                                 fl->fl_end - fl->fl_start + 1;
  76                         flock.l_whence = fl->fl_whence;
  77                         flock.l_type = fl->fl_type;
  78                         memcpy_tofs(l, &flock, sizeof(flock));
  79                         return 0;
  80                 }
  81         }
  82 
  83         flock.l_type = F_UNLCK;                 /* no conflict found */
  84         memcpy_tofs(l, &flock, sizeof(flock));
  85         return 0;
  86 }
  87 
  88 /*
  89  * This function implements both F_SETLK and F_SETLKW.
  90  */
  91 
  92 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94         int error;
  95         struct file *filp;
  96         struct file_lock *fl,file_lock;
  97         struct flock flock;
  98 
  99         /*
 100          * Get arguments and validate them ...
 101          */
 102 
 103         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
 104                 return -EBADF;
 105         error = verify_area(VERIFY_WRITE, l, sizeof(*l));
 106         if (error)
 107                 return error;
 108         memcpy_fromfs(&flock, l, sizeof(flock));
 109         if (!copy_flock(filp, &file_lock, &flock))
 110                 return -EINVAL;
 111         switch (file_lock.fl_type) {
 112         case F_RDLCK :
 113                 if (!(filp->f_mode & 1))
 114                         return -EBADF;
 115                 break;
 116         case F_WRLCK :
 117                 if (!(filp->f_mode & 2))
 118                         return -EBADF;
 119                 break;
 120         case F_SHLCK :
 121                 if (!(filp->f_mode & 3))
 122                         return -EBADF;
 123                 file_lock.fl_type = F_RDLCK;
 124                 break;
 125         case F_EXLCK :
 126                 if (!(filp->f_mode & 3))
 127                         return -EBADF;
 128                 file_lock.fl_type = F_WRLCK;
 129                 break;
 130         case F_UNLCK :
 131                 break;
 132         }
 133 
 134         /*
 135          * Scan for a conflicting lock ...
 136          */
 137   
 138         if (file_lock.fl_type != F_UNLCK) {
 139 repeat:
 140                 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 141                         if (!conflict(&file_lock, fl))
 142                                 continue;
 143                         /*
 144                          * File is locked by another process. If this is
 145                          * F_SETLKW wait for the lock to be released.
 146                          * FIXME: We need to check for deadlocks here.
 147                          */
 148                         if (cmd == F_SETLKW) {
 149                                 if (current->signal & ~current->blocked)
 150                                         return -ERESTARTSYS;
 151                                 interruptible_sleep_on(&fl->fl_wait);
 152                                 if (current->signal & ~current->blocked)
 153                                         return -ERESTARTSYS;
 154                                 goto repeat;
 155                         }
 156                         return -EAGAIN;
 157                 }
 158         }
 159 
 160         /*
 161          * Lock doesn't conflict with any other lock ...
 162          */
 163 
 164         return lock_it(filp, &file_lock);
 165 }
 166 
 167 /*
 168  * This function is called when the file is closed.
 169  */
 170 
 171 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173         struct file_lock *fl;
 174         struct file_lock **before;
 175 
 176         /* Find first lock owned by caller ... */
 177 
 178         before = &filp->f_inode->i_flock;
 179         while ((fl = *before) && task != fl->fl_owner)
 180                 before = &fl->fl_next;
 181 
 182         /* The list is sorted by owner ... */
 183 
 184         while ((fl = *before) && task == fl->fl_owner)
 185                 free_lock(before);
 186 }
 187 
 188 /*
 189  * Verify a "struct flock" and copy it to a "struct file_lock" ...
 190  * Result is a boolean indicating success.
 191  */
 192 
 193 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         off_t start;
 196 
 197         if (!filp->f_inode)     /* just in case */
 198                 return 0;
 199         if (!S_ISREG(filp->f_inode->i_mode))
 200                 return 0;
 201         if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
 202          && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
 203                 return 0;
 204         switch (l->l_whence) {
 205         case 0 /*SEEK_SET*/ : start = 0; break;
 206         case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
 207         case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
 208         default : return 0;
 209         }
 210         if ((start += l->l_start) < 0 || l->l_len < 0)
 211                 return 0;
 212         fl->fl_type = l->l_type;
 213         fl->fl_start = start;   /* we record the absolute position */
 214         fl->fl_whence = 0;      /* FIXME: do we record {l_start} as passed? */
 215         if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
 216                 fl->fl_end = OFFSET_MAX;
 217         fl->fl_owner = current;
 218         fl->fl_wait = NULL;             /* just for cleanliness */
 219         return 1;
 220 }
 221 
 222 /*
 223  * Determine if lock {sys_fl} blocks lock {caller_fl} ...
 224  */
 225 
 226 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 227 {
 228         if (caller_fl->fl_owner == sys_fl->fl_owner)
 229                 return 0;
 230         if (!overlap(caller_fl, sys_fl))
 231                 return 0;
 232         switch (caller_fl->fl_type) {
 233         case F_RDLCK :
 234                 return sys_fl->fl_type != F_RDLCK;
 235         case F_WRLCK :
 236                 return 1;       /* overlapping region not owned by caller */
 237         }
 238         return 0;       /* shouldn't get here, but just in case */
 239 }
 240 
 241 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
 244 }
 245 
 246 /*
 247  * Add a lock to a file ...
 248  * Result is 0 for success or -ENOLCK.
 249  *
 250  * We merge adjacent locks whenever possible.
 251  *
 252  * WARNING: We assume the lock doesn't conflict with any other lock.
 253  */
 254   
 255 /*
 256  * Rewritten by Kai Petzke:
 257  * We sort the lock list first by owner, then by the starting address.
 258  *
 259  * To make freeing a lock much faster, we keep a pointer to the lock before the
 260  * actual one. But the real gain of the new coding was, that lock_it() and
 261  * unlock_it() became one function.
 262  *
 263  * To all purists: Yes, I use a few goto's. Just pass on to the next function.
 264  */
 265 
 266 static int lock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268         struct file_lock *fl;
 269         struct file_lock *left = 0;
 270         struct file_lock *right = 0;
 271         struct file_lock **before;
 272         int added = 0;
 273 
 274         /*
 275          * Find the first old lock with the same owner as the new lock.
 276          */
 277 
 278         before = &filp->f_inode->i_flock;
 279         while ((fl = *before) && caller->fl_owner != fl->fl_owner)
 280                 before = &fl->fl_next;
 281 
 282         /*
 283          * Look up all locks of this owner.
 284          */
 285 
 286         while ((fl = *before) && caller->fl_owner == fl->fl_owner) {
 287                 /*
 288                  * Detect adjacent or overlapping regions (if same lock type)
 289                  */
 290                 if (caller->fl_type == fl->fl_type) {
 291                         if (fl->fl_end < caller->fl_start - 1)
 292                                 goto next_lock;
 293                         /*
 294                          * If the next lock in the list has entirely bigger
 295                          * addresses than the new one, insert the lock here.
 296                          */
 297                         if (fl->fl_start > caller->fl_end + 1)
 298                                 break;
 299 
 300                         /*
 301                          * If we come here, the new and old lock are of the
 302                          * same type and adjacent or overlapping. Make one
 303                          * lock yielding from the lower start address of both
 304                          * locks to the higher end address.
 305                          */
 306                         if (fl->fl_start > caller->fl_start)
 307                                 fl->fl_start = caller->fl_start;
 308                         else
 309                                 caller->fl_start = fl->fl_start;
 310                         if (fl->fl_end < caller->fl_end)
 311                                 fl->fl_end = caller->fl_end;
 312                         else
 313                                 caller->fl_end = fl->fl_end;
 314                         if (added) {
 315                                 free_lock(before);
 316                                 continue;
 317                         }
 318                         caller = fl;
 319                         added = 1;
 320                         goto next_lock;
 321                 }
 322                 /*
 323                  * Processing for different lock types is a bit more complex.
 324                  */
 325                 if (fl->fl_end < caller->fl_start)
 326                         goto next_lock;
 327                 if (fl->fl_start > caller->fl_end)
 328                         break;
 329                 if (caller->fl_type == F_UNLCK)
 330                         added = 1;
 331                 if (fl->fl_start < caller->fl_start)
 332                         left = fl;
 333                 /*
 334                  * If the next lock in the list has a higher end address than
 335                  * the new one, insert the new one here.
 336                  */
 337                 if (fl->fl_end > caller->fl_end) {
 338                         right = fl;
 339                         break;
 340                 }
 341                 if (fl->fl_start >= caller->fl_start) {
 342                         /*
 343                          * The new lock completely replaces an old one (This may
 344                          * happen several times).
 345                          */
 346                         if (added) {
 347                                 free_lock(before);
 348                                 continue;
 349                         }
 350                         /*
 351                          * Replace the old lock with the new one. Wake up
 352                          * anybody waiting for the old one, as the change in
 353                          * lock type migth satisfy his needs.
 354                          */
 355                         wake_up(&fl->fl_wait);
 356                         fl->fl_start = caller->fl_start;
 357                         fl->fl_end   = caller->fl_end;
 358                         fl->fl_type  = caller->fl_type;
 359                         fl->fl_wait  = 0;
 360                         caller = fl;
 361                         added = 1;
 362                 }
 363                 /*
 364                  * Go on to next lock.
 365                  */
 366 next_lock:
 367                 before = &(*before)->fl_next;
 368         }
 369 
 370         if (! added) {
 371                 if (caller->fl_type == F_UNLCK)
 372                         return -EINVAL;
 373                 if (! (caller = alloc_lock(before, caller)))
 374                         return -ENOLCK;
 375         }
 376         if (right) {
 377                 if (left == right) {
 378                         /*
 379                          * The new lock breaks the old one in two pieces, so we
 380                          * have to allocate one more lock (in this case, even
 381                          * F_UNLCK may fail!).
 382                          */
 383                         if (! (left = alloc_lock(before, right))) {
 384                                 if (! added)
 385                                         free_lock(before);
 386                                 return -ENOLCK;
 387                         }
 388                 }
 389                 right->fl_start = caller->fl_end + 1;
 390         }
 391         if (left)
 392                 left->fl_end = caller->fl_start - 1;
 393         return 0;
 394 }
 395 
 396 /*
 397  * File_lock() inserts a lock at the position pos of the linked list.
 398  */
 399 
 400 static struct file_lock *alloc_lock(struct file_lock **pos,
     /* [previous][next][first][last][top][bottom][index][help] */
 401                                     struct file_lock *fl)
 402 {
 403         struct file_lock *tmp;
 404 
 405         tmp = file_lock_free_list;
 406         if (tmp == NULL)
 407                 return NULL;                    /* no available entry */
 408         if (tmp->fl_owner != NULL)
 409                 panic("alloc_lock: broken free list\n");
 410 
 411         /* remove from free list */
 412         file_lock_free_list = tmp->fl_next;
 413 
 414         *tmp = *fl;
 415 
 416         tmp->fl_next = *pos;    /* insert into file's list */
 417         *pos = tmp;
 418 
 419         tmp->fl_owner = current;        /* FIXME: needed? */
 420         tmp->fl_wait = NULL;
 421         return tmp;
 422 }
 423 
 424 /*
 425  * Add a lock to the free list ...
 426  */
 427 
 428 static void free_lock(struct file_lock **fl_p)
     /* [previous][next][first][last][top][bottom][index][help] */
 429 {
 430         struct file_lock *fl;
 431 
 432         fl = *fl_p;
 433         if (fl->fl_owner == NULL)       /* sanity check */
 434                 panic("free_lock: broken lock list\n");
 435 
 436         *fl_p = (*fl_p)->fl_next;
 437 
 438         fl->fl_next = file_lock_free_list;      /* add to free list */
 439         file_lock_free_list = fl;
 440         fl->fl_owner = NULL;                    /* for sanity checks */
 441 
 442         wake_up(&fl->fl_wait);
 443 }

/* [previous][next][first][last][top][bottom][index][help] */