root/fs/locks.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fcntl_init_locks
  2. fcntl_getlk
  3. fcntl_setlk
  4. fcntl_remove_locks
  5. copy_flock
  6. conflict
  7. overlap
  8. lock_it
  9. unlock_it
  10. alloc_lock
  11. free_lock

   1 /*
   2  *  linux/fs/locks.c
   3  *
   4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5  *  Doug Evans, 92Aug07, dje@sspiff.uucp.
   6  *
   7  * FIXME: two things aren't handled yet:
   8  *      - deadlock detection/avoidance (of dubious merit, but since it's in
   9  *        the definition, I guess it should be provided eventually)
  10  *      - mandatory locks (requires lots of changes elsewhere)
  11  */
  12 
  13 #include <asm/segment.h>
  14 
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/errno.h>
  18 #include <linux/stat.h>
  19 #include <linux/fcntl.h>
  20 
  21 #define OFFSET_MAX      0x7fffffff      /* FIXME: move elsewhere? */
  22 
  23 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
  24 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
  25 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
  26 static int lock_it(struct file *filp, struct file_lock *caller);
  27 static int unlock_it(struct file *filp, struct file_lock *caller);
  28 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template);
  29 static void free_lock(struct file *filp, struct file_lock *fl);
  30 
  31 static struct file_lock file_lock_table[NR_FILE_LOCKS];
  32 static struct file_lock *file_lock_free_list;
  33 
  34 /*
  35  * Called at boot time to initialize the lock table ...
  36  */
  37 
  38 void fcntl_init_locks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  39 {
  40         struct file_lock *fl;
  41 
  42         for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
  43                 fl->fl_next = fl + 1;
  44                 fl->fl_owner = NULL;
  45         }
  46         file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
  47         file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
  48         file_lock_free_list = &file_lock_table[0];
  49 }
  50 
  51 int fcntl_getlk(unsigned int fd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53         int error;
  54         struct flock flock;
  55         struct file *filp;
  56         struct file_lock *fl,file_lock;
  57 
  58         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  59                 return -EBADF;
  60         error = verify_area(VERIFY_WRITE,l, sizeof(*l));
  61         if (error)
  62                 return error;
  63         memcpy_fromfs(&flock, l, sizeof(flock));
  64         if (flock.l_type == F_UNLCK)
  65                 return -EINVAL;
  66         if (!copy_flock(filp, &file_lock, &flock))
  67                 return -EINVAL;
  68 
  69         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
  70                 if (conflict(&file_lock, fl)) {
  71                         flock.l_pid = fl->fl_owner->pid;
  72                         flock.l_start = fl->fl_start;
  73                         flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
  74                                 fl->fl_end - fl->fl_start + 1;
  75                         flock.l_whence = fl->fl_whence;
  76                         flock.l_type = fl->fl_type;
  77                         memcpy_tofs(l, &flock, sizeof(flock));
  78                         return 0;
  79                 }
  80         }
  81 
  82         flock.l_type = F_UNLCK;                 /* no conflict found */
  83         memcpy_tofs(l, &flock, sizeof(flock));
  84         return 0;
  85 }
  86 
  87 /*
  88  * This function implements both F_SETLK and F_SETLKW.
  89  */
  90 
  91 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  92 {
  93         int error;
  94         struct file *filp;
  95         struct file_lock *fl,file_lock;
  96         struct flock flock;
  97 
  98         /*
  99          * Get arguments and validate them ...
 100          */
 101 
 102         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
 103                 return -EBADF;
 104         error = verify_area(VERIFY_WRITE, l, sizeof(*l));
 105         if (error)
 106                 return error;
 107         memcpy_fromfs(&flock, l, sizeof(flock));
 108         if (!copy_flock(filp, &file_lock, &flock))
 109                 return -EINVAL;
 110         switch (file_lock.fl_type) {
 111         case F_RDLCK :
 112                 if (!(filp->f_mode & 1))
 113                         return -EBADF;
 114                 break;
 115         case F_WRLCK :
 116                 if (!(filp->f_mode & 2))
 117                         return -EBADF;
 118                 break;
 119         case F_SHLCK :
 120                 if (!(filp->f_mode & 3))
 121                         return -EBADF;
 122                 file_lock.fl_type = F_RDLCK;
 123                 break;
 124         case F_EXLCK :
 125                 if (!(filp->f_mode & 3))
 126                         return -EBADF;
 127                 file_lock.fl_type = F_WRLCK;
 128                 break;
 129         case F_UNLCK :
 130                 break;
 131         }
 132 
 133         /*
 134          * F_UNLCK needs to be handled differently ...
 135          */
 136 
 137         if (file_lock.fl_type == F_UNLCK)
 138                 return unlock_it(filp, &file_lock);
 139 
 140         /*
 141          * Scan for a conflicting lock ...
 142          */
 143 
 144 repeat:
 145         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 146                 if (!conflict(&file_lock, fl))
 147                         continue;
 148                 /*
 149                  * File is locked by another process. If this is F_SETLKW
 150                  * wait for the lock to be released.
 151                  * FIXME: We need to check for deadlocks here.
 152                  */
 153                 if (cmd == F_SETLKW) {
 154                         if (current->signal & ~current->blocked)
 155                                 return -ERESTARTSYS;
 156                         interruptible_sleep_on(&fl->fl_wait);
 157                         if (current->signal & ~current->blocked)
 158                                 return -ERESTARTSYS;
 159                         goto repeat;
 160                 }
 161                 return -EAGAIN;
 162         }
 163 
 164         /*
 165          * Lock doesn't conflict with any other lock ...
 166          */
 167 
 168         return lock_it(filp, &file_lock);
 169 }
 170 
 171 /*
 172  * This function is called when the file is closed.
 173  */
 174 
 175 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         struct file_lock *fl,*next;
 178 
 179         for (fl = filp->f_inode->i_flock; fl != NULL; ) {
 180                 /*
 181                  * If this one is freed, {fl_next} gets clobbered when the
 182                  * entry is moved to the free list, so grab it now ...
 183                  */
 184                 next = fl->fl_next;
 185                 if (fl->fl_owner == task)
 186                         free_lock(filp, fl);
 187                 fl = next;
 188         }
 189 }
 190 
 191 /*
 192  * Verify a "struct flock" and copy it to a "struct file_lock" ...
 193  * Result is a boolean indicating success.
 194  */
 195 
 196 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         off_t start;
 199 
 200         if (!filp->f_inode)     /* just in case */
 201                 return 0;
 202         if (!S_ISREG(filp->f_inode->i_mode))
 203                 return 0;
 204         if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
 205          && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
 206                 return 0;
 207         switch (l->l_whence) {
 208         case 0 /*SEEK_SET*/ : start = 0; break;
 209         case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
 210         case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
 211         default : return 0;
 212         }
 213         if ((start += l->l_start) < 0 || l->l_len < 0)
 214                 return 0;
 215         fl->fl_type = l->l_type;
 216         fl->fl_start = start;   /* we record the absolute position */
 217         fl->fl_whence = 0;      /* FIXME: do we record {l_start} as passed? */
 218         if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
 219                 fl->fl_end = OFFSET_MAX;
 220         fl->fl_owner = current;
 221         fl->fl_wait = NULL;             /* just for cleanliness */
 222         return 1;
 223 }
 224 
 225 /*
 226  * Determine if lock {sys_fl} blocks lock {caller_fl} ...
 227  */
 228 
 229 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         if (caller_fl->fl_owner == sys_fl->fl_owner)
 232                 return 0;
 233         if (!overlap(caller_fl, sys_fl))
 234                 return 0;
 235         switch (caller_fl->fl_type) {
 236         case F_RDLCK :
 237                 return sys_fl->fl_type != F_RDLCK;
 238         case F_WRLCK :
 239                 return 1;       /* overlapping region not owned by caller */
 240         }
 241         return 0;       /* shouldn't get here, but just in case */
 242 }
 243 
 244 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
     /* [previous][next][first][last][top][bottom][index][help] */
 245 {
 246         if (fl1->fl_start <= fl2->fl_start) {
 247                 return fl1->fl_end >= fl2->fl_start;
 248         } else {
 249                 return fl2->fl_end >= fl1->fl_start;
 250         }
 251 }
 252 
 253 /*
 254  * Add a lock to a file ...
 255  * Result is 0 for success or -ENOLCK.
 256  *
 257  * We try to be real clever here and always minimize the number of table
 258  * entries we use. For example we merge adjacent locks whenever possible. This
 259  * consumes a bit of cpu and code space, is it really worth it? Beats me.
 260  *
 261  * I've tried to keep the following as small and simple as possible. If you can
 262  * make it smaller or simpler, please do. /dje 92Aug11
 263  *
 264  * WARNING: We assume the lock doesn't conflict with any other lock.
 265  */
 266 
 267 static int lock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 268 {
 269         struct file_lock *fl,*new;
 270 
 271         /*
 272          * It's easier if we allocate a slot for the lock first, and then
 273          * release it later if we have to (IE: if it can be merged with
 274          * another). This way the for() loop always knows that {caller} is an
 275          * existing entry. This will cause the routine to fail unnecessarily
 276          * in rare cases, but perfection can be pushed too far. :-)
 277          */
 278 
 279         if ((caller = alloc_lock(filp, caller)) == NULL)
 280                 return -ENOLCK;
 281 
 282         /*
 283          * First scan to see if we are changing/augmenting an existing lock ...
 284          */
 285 
 286         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 287                 if (caller->fl_owner != fl->fl_owner)
 288                         continue;
 289                 if (caller == fl)
 290                         continue;
 291                 if (!overlap(caller, fl)) {
 292                         /*
 293                          * Detect adjacent regions (if same lock type) ...
 294                          */
 295                         if (caller->fl_type != fl->fl_type)
 296                                 continue;
 297                         if (caller->fl_end + 1 == fl->fl_start) {
 298                                 fl->fl_start = caller->fl_start;
 299                                 free_lock(filp, caller);
 300                                 caller = fl;
 301                                 /* must continue, may overlap others now */
 302                         } else if (caller->fl_start - 1 == fl->fl_end) {
 303                                 fl->fl_end = caller->fl_end;
 304                                 free_lock(filp, caller);
 305                                 caller = fl;
 306                                 /* must continue, may overlap others now */
 307                         }
 308                         continue;
 309                 }
 310                 /*
 311                  * We've found an overlapping region. Is it a change of lock
 312                  * type, or are we changing the size of the locked space?
 313                  */
 314                 if (caller->fl_type != fl->fl_type) {
 315                         if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
 316                                 /*
 317                                  * The new lock splits the old one in two ...
 318                                  * {fl} is the bottom piece, {caller} is the
 319                                  * new lock, and {new} is the top piece.
 320                                  */
 321                                 if ((new = alloc_lock(filp, fl)) == NULL) {
 322                                         free_lock(filp, caller);
 323                                         return -ENOLCK;
 324                                 }
 325                                 fl->fl_end = caller->fl_start - 1;
 326                                 new->fl_start = caller->fl_end + 1;
 327                                 return 0;
 328                         }
 329                         if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
 330                                 /*
 331                                  * The new lock completely replaces old one ...
 332                                  */
 333                                 free_lock(filp, fl);
 334                                 return 0;
 335                         }
 336                         if (caller->fl_end < fl->fl_end) {
 337                                 fl->fl_start = caller->fl_end + 1;
 338                                 /* must continue, may be more overlaps */
 339                         } else if (caller->fl_start > fl->fl_start) {
 340                                 fl->fl_end = caller->fl_start - 1;
 341                                 /* must continue, may be more overlaps */
 342                         } else {
 343                                 printk("VFS: lock_it: program bug: unanticipated overlap\n");
 344                                 free_lock(filp, caller);
 345                                 return -ENOLCK;
 346                         }
 347                 } else {        /* The new lock augments an existing lock ... */
 348                         int grew = 0;
 349 
 350                         if (caller->fl_start < fl->fl_start) {
 351                                 fl->fl_start = caller->fl_start;
 352                                 grew = 1;
 353                         }
 354                         if (caller->fl_end > fl->fl_end) {
 355                                 fl->fl_end = caller->fl_end;
 356                                 grew = 1;
 357                         }
 358                         free_lock(filp, caller);
 359                         caller = fl;
 360                         if (!grew)
 361                                 return 0;
 362                         /* must continue, may be more overlaps */
 363                 }
 364         }
 365 
 366         /*
 367          * New lock doesn't overlap any regions ...
 368          * alloc_lock() has already been called, so we're done!
 369          */
 370 
 371         return 0;
 372 }
 373 
 374 /*
 375  * Handle F_UNLCK ...
 376  * Result is 0 for success, or -EINVAL or -ENOLCK.
 377  * ENOLCK can happen when a lock is split into two.
 378  */
 379 
 380 static int unlock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382         int one_unlocked = 0;
 383         struct file_lock *fl,*next;
 384 
 385         for (fl = filp->f_inode->i_flock; fl != NULL; ) {
 386                 if (caller->fl_owner != fl->fl_owner || !overlap(caller, fl)) {
 387                         fl = fl->fl_next;
 388                         continue;
 389                 }
 390                 one_unlocked = 1;
 391                 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
 392                         /*
 393                          * Lock is split in two ...
 394                          * {fl} is the bottom piece, {next} is the top piece.
 395                          */
 396                         if ((next = alloc_lock(filp, fl)) == NULL)
 397                                 return -ENOLCK;
 398                         fl->fl_end = caller->fl_start - 1;
 399                         next->fl_start = caller->fl_end + 1;
 400                         return 0;
 401                 }
 402                 /*
 403                  * At this point we know there is an overlap and we know the
 404                  * lock isn't split into two ...
 405                  *
 406                  * Unless the lock table is broken, entries will not overlap.
 407                  * IE: User X won't have an entry locking bytes 1-3 and another
 408                  * entry locking bytes 3-5. Therefore, if the area being
 409                  * unlocked is a subset of the total area, we don't need to
 410                  * traverse any more of the list. The code is a tad more
 411                  * complicated by this optimization. Perhaps it's not worth it.
 412                  *
 413                  * WARNING: We assume free_lock() does not alter
 414                  *      {fl_start, fl_end}.
 415                  *
 416                  * {fl_next} gets clobbered when the entry is moved to
 417                  * the free list, so grab it now ...
 418                  */
 419                 next = fl->fl_next;
 420                 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
 421                         free_lock(filp, fl);
 422                 } else if (caller->fl_start > fl->fl_start) {
 423                         fl->fl_end = caller->fl_start - 1;
 424                 } else {
 425                         /* caller->fl_end < fl->fl_end */
 426                         fl->fl_start = caller->fl_end + 1;
 427                 }
 428                 if (caller->fl_start >= fl->fl_start && caller->fl_end <= fl->fl_end)
 429                         return 0;               /* no more to be found */
 430                 fl = next;
 431                 /* must continue, there may be more to unlock */
 432         }
 433 
 434         return one_unlocked ? 0 : -EINVAL;
 435 }
 436 
 437 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template)
     /* [previous][next][first][last][top][bottom][index][help] */
 438 {
 439         struct file_lock *new;
 440 
 441         if (file_lock_free_list == NULL)
 442                 return NULL;                    /* no available entry */
 443         if (file_lock_free_list->fl_owner != NULL)
 444                 panic("VFS: alloc_lock: broken free list\n");
 445 
 446         new = file_lock_free_list;              /* remove from free list */
 447         file_lock_free_list = file_lock_free_list->fl_next;
 448 
 449         *new = *template;
 450 
 451         new->fl_next = filp->f_inode->i_flock;  /* insert into file's list */
 452         filp->f_inode->i_flock = new;
 453 
 454         new->fl_owner = current;        /* FIXME: needed? */
 455         new->fl_wait = NULL;
 456         return new;
 457 }
 458 
 459 /*
 460  * Add a lock to the free list ...
 461  *
 462  * WARNING: We must not alter {fl_start, fl_end}. See unlock_it().
 463  */
 464 
 465 static void free_lock(struct file *filp, struct file_lock *fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 466 {
 467         struct file_lock **fl_p;
 468 
 469         if (fl->fl_owner == NULL)       /* sanity check */
 470                 panic("VFS: free_lock: broken lock list\n");
 471 
 472         /*
 473          * We only use a singly linked list to save some memory space
 474          * (the only place we'd use a doubly linked list is here).
 475          */
 476 
 477         for (fl_p = &filp->f_inode->i_flock; *fl_p != NULL; fl_p = &(*fl_p)->fl_next) {
 478                 if (*fl_p == fl)
 479                         break;
 480         }
 481         if (*fl_p == NULL) {
 482                 printk("VFS: free_lock: lock is not in file's lock list\n");
 483         } else {
 484                 *fl_p = (*fl_p)->fl_next;
 485         }
 486 
 487         fl->fl_next = file_lock_free_list;      /* add to free list */
 488         file_lock_free_list = fl;
 489         fl->fl_owner = NULL;                    /* for sanity checks */
 490 
 491         wake_up(&fl->fl_wait);
 492 }

/* [previous][next][first][last][top][bottom][index][help] */