root/fs/locks.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. fcntl_init_locks
  2. fcntl_getlk
  3. fcntl_setlk
  4. fcntl_remove_locks
  5. copy_flock
  6. conflict
  7. overlap
  8. lock_it
  9. unlock_it
  10. alloc_lock
  11. free_lock

   1 /*
   2  *  linux/fs/locks.c
   3  *
   4  *  Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
   5  *  Doug Evans, 92Aug07, dje@sspiff.uucp.
   6  *
   7  * FIXME: two things aren't handled yet:
   8  *      - deadlock detection/avoidance (of dubious merit, but since it's in
   9  *        the definition, I guess it should be provided eventually)
  10  *      - mandatory locks (requires lots of changes elsewhere)
  11  */
  12 
  13 #include <asm/segment.h>
  14 
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/errno.h>
  18 #include <linux/stat.h>
  19 #include <linux/fcntl.h>
  20 
  21 #define OFFSET_MAX      0x7fffffff      /* FIXME: move elsewhere? */
  22 
  23 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
  24 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
  25 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
  26 static int lock_it(struct file *filp, struct file_lock *caller);
  27 static int unlock_it(struct file *filp, struct file_lock *caller);
  28 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template);
  29 static void free_lock(struct file *filp, struct file_lock *fl);
  30 
  31 static struct file_lock file_lock_table[NR_FILE_LOCKS];
  32 static struct file_lock *file_lock_free_list;
  33 
  34 /*
  35  * Called at boot time to initialize the lock table ...
  36  */
  37 
  38 void fcntl_init_locks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  39 {
  40         struct file_lock *fl;
  41 
  42         for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
  43                 fl->fl_next = fl + 1;
  44                 fl->fl_owner = NULL;
  45         }
  46         file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
  47         file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
  48         file_lock_free_list = &file_lock_table[0];
  49 }
  50 
  51 int fcntl_getlk(unsigned int fd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {       
  53         struct flock flock;
  54         struct file *filp;
  55         struct file_lock *fl,file_lock;
  56 
  57         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  58                 return -EBADF;
  59         verify_area(l, sizeof(*l));
  60         memcpy_fromfs(&flock, l, sizeof(flock));
  61         if (flock.l_type == F_UNLCK)
  62                 return -EINVAL;
  63         if (!copy_flock(filp, &file_lock, &flock))
  64                 return -EINVAL;
  65 
  66         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
  67                 if (conflict(&file_lock, fl)) {
  68                         flock.l_pid = fl->fl_owner->pid;
  69                         flock.l_start = fl->fl_start;
  70                         flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
  71                                 fl->fl_end - fl->fl_start + 1;
  72                         flock.l_whence = fl->fl_whence;
  73                         flock.l_type = fl->fl_type;
  74                         memcpy_tofs(l, &flock, sizeof(flock));
  75                         return 0;
  76                 }
  77         }
  78 
  79         flock.l_type = F_UNLCK;                 /* no conflict found */
  80         memcpy_tofs(l, &flock, sizeof(flock));
  81         return 0;
  82 }
  83 
  84 /*
  85  * This function implements both F_SETLK and F_SETLKW.
  86  */
  87 
  88 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {       
  90         struct file *filp;
  91         struct file_lock *fl,file_lock;
  92         struct flock flock;
  93 
  94         /*
  95          * Get arguments and validate them ...
  96          */
  97 
  98         if (fd >= NR_OPEN || !(filp = current->filp[fd]))
  99                 return -EBADF;
 100         verify_area(l, sizeof(*l));
 101         memcpy_fromfs(&flock, l, sizeof(flock));
 102         if (!copy_flock(filp, &file_lock, &flock))
 103                 return -EINVAL;
 104         switch (file_lock.fl_type) {
 105         case F_RDLCK :
 106                 if (!(filp->f_mode & 1))
 107                         return -EBADF;
 108                 break;
 109         case F_WRLCK :
 110                 if (!(filp->f_mode & 2))
 111                         return -EBADF;
 112                 break;
 113         case F_SHLCK :
 114                 if (!(filp->f_mode & 3))
 115                         return -EBADF;
 116                 file_lock.fl_type = F_RDLCK;
 117                 break;
 118         case F_EXLCK :
 119                 if (!(filp->f_mode & 3))
 120                         return -EBADF;
 121                 file_lock.fl_type = F_WRLCK;
 122                 break;
 123         case F_UNLCK :
 124                 break;
 125         }
 126 
 127         /*
 128          * F_UNLCK needs to be handled differently ...
 129          */
 130 
 131         if (file_lock.fl_type == F_UNLCK)
 132                 return unlock_it(filp, &file_lock);
 133 
 134         /*
 135          * Scan for a conflicting lock ...
 136          */
 137 
 138 repeat:
 139         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 140                 if (!conflict(&file_lock, fl))
 141                         continue;
 142                 /*
 143                  * File is locked by another process. If this is F_SETLKW
 144                  * wait for the lock to be released.
 145                  * FIXME: We need to check for deadlocks here.
 146                  */
 147                 if (cmd == F_SETLKW) {
 148                         interruptible_sleep_on(&fl->fl_wait);
 149                         goto repeat;
 150                 }
 151                 return -EAGAIN;
 152         }
 153 
 154         /*
 155          * Lock doesn't conflict with any other lock ...
 156          */
 157 
 158         return lock_it(filp, &file_lock);
 159 }
 160 
 161 /*
 162  * This function is called when the file is closed.
 163  */
 164 
 165 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167         struct file_lock *fl,*next;
 168 
 169         for (fl = filp->f_inode->i_flock; fl != NULL; ) {
 170                 /*
 171                  * If this one is freed, {fl_next} gets clobbered when the
 172                  * entry is moved to the free list, so grab it now ...
 173                  */
 174                 next = fl->fl_next;
 175                 if (fl->fl_owner == task)
 176                         free_lock(filp, fl);
 177                 fl = next;
 178         }
 179 }
 180 
 181 /*
 182  * Verify a "struct flock" and copy it to a "struct file_lock" ...
 183  * Result is a boolean indicating success.
 184  */
 185 
 186 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         off_t start;
 189 
 190         if (!filp->f_inode)     /* just in case */
 191                 return 0;
 192         if (!S_ISREG(filp->f_inode->i_mode))
 193                 return 0;
 194         if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
 195          && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
 196                 return 0;
 197         switch (l->l_whence) {
 198         case 0 /*SEEK_SET*/ : start = 0; break;
 199         case 1 /*SEEK_CUR*/ : start = filp->f_pos; break;
 200         case 2 /*SEEK_END*/ : start = filp->f_inode->i_size; break;
 201         default : return 0;
 202         }
 203         if ((start += l->l_start) < 0 || l->l_len < 0)
 204                 return 0;
 205         fl->fl_type = l->l_type;
 206         fl->fl_start = start;   /* we record the absolute position */
 207         fl->fl_whence = 0;      /* FIXME: do we record {l_start} as passed? */
 208         if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
 209                 fl->fl_end = OFFSET_MAX;
 210         fl->fl_owner = current;
 211         fl->fl_wait = NULL;             /* just for cleanliness */
 212         return 1;
 213 }
 214 
 215 /*
 216  * Determine if lock {sys_fl} blocks lock {caller_fl} ...
 217  */
 218 
 219 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 {
 221         if (caller_fl->fl_owner == sys_fl->fl_owner)
 222                 return 0;
 223         if (!overlap(caller_fl, sys_fl))
 224                 return 0;
 225         switch (caller_fl->fl_type) {
 226         case F_RDLCK :
 227                 return sys_fl->fl_type != F_RDLCK;
 228         case F_WRLCK :
 229                 return 1;       /* overlapping region not owned by caller */
 230         }
 231         return 0;       /* shouldn't get here, but just in case */
 232 }
 233 
 234 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         if (fl1->fl_start <= fl2->fl_start) {
 237                 return fl1->fl_end >= fl2->fl_start;
 238         } else {
 239                 return fl2->fl_end >= fl1->fl_start;
 240         }
 241 }
 242 
 243 /*
 244  * Add a lock to a file ...
 245  * Result is 0 for success or -ENOLCK.
 246  *
 247  * We try to be real clever here and always minimize the number of table
 248  * entries we use. For example we merge adjacent locks whenever possible. This
 249  * consumes a bit of cpu and code space, is it really worth it? Beats me.
 250  *
 251  * I've tried to keep the following as small and simple as possible. If you can
 252  * make it smaller or simpler, please do. /dje 92Aug11
 253  *
 254  * WARNING: We assume the lock doesn't conflict with any other lock.
 255  */
 256 
 257 static int lock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 258 {
 259         struct file_lock *fl,*new;
 260 
 261         /*
 262          * It's easier if we allocate a slot for the lock first, and then
 263          * release it later if we have to (IE: if it can be merged with
 264          * another). This way the for() loop always knows that {caller} is an
 265          * existing entry. This will cause the routine to fail unnecessarily
 266          * in rare cases, but perfection can be pushed too far. :-)
 267          */
 268 
 269         if ((caller = alloc_lock(filp, caller)) == NULL)
 270                 return -ENOLCK;
 271 
 272         /*
 273          * First scan to see if we are changing/augmenting an existing lock ...
 274          */
 275 
 276         for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
 277                 if (caller->fl_owner != fl->fl_owner)
 278                         continue;
 279                 if (caller == fl)
 280                         continue;
 281                 if (!overlap(caller, fl)) {
 282                         /*
 283                          * Detect adjacent regions (if same lock type) ...
 284                          */
 285                         if (caller->fl_type != fl->fl_type)
 286                                 continue;
 287                         if (caller->fl_end + 1 == fl->fl_start) {
 288                                 fl->fl_start = caller->fl_start;
 289                                 free_lock(filp, caller);
 290                                 caller = fl;
 291                                 /* must continue, may overlap others now */
 292                         } else if (caller->fl_start - 1 == fl->fl_end) {
 293                                 fl->fl_end = caller->fl_end;
 294                                 free_lock(filp, caller);
 295                                 caller = fl;
 296                                 /* must continue, may overlap others now */
 297                         }
 298                         continue;
 299                 }
 300                 /*
 301                  * We've found an overlapping region. Is it a change of lock
 302                  * type, or are we changing the size of the locked space?
 303                  */
 304                 if (caller->fl_type != fl->fl_type) {
 305                         if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
 306                                 /*
 307                                  * The new lock splits the old one in two ...
 308                                  * {fl} is the bottom piece, {caller} is the
 309                                  * new lock, and {new} is the top piece.
 310                                  */
 311                                 if ((new = alloc_lock(filp, fl)) == NULL) {
 312                                         free_lock(filp, caller);
 313                                         return -ENOLCK;
 314                                 }
 315                                 fl->fl_end = caller->fl_start - 1;
 316                                 new->fl_start = caller->fl_end + 1;
 317                                 return 0;
 318                         }
 319                         if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
 320                                 /*
 321                                  * The new lock completely replaces old one ...
 322                                  */
 323                                 free_lock(filp, fl);
 324                                 return 0;
 325                         }
 326                         if (caller->fl_end < fl->fl_end) {
 327                                 fl->fl_start = caller->fl_end + 1;
 328                                 /* must continue, may be more overlaps */
 329                         } else if (caller->fl_start > fl->fl_start) {
 330                                 fl->fl_end = caller->fl_start - 1;
 331                                 /* must continue, may be more overlaps */
 332                         } else {
 333                                 printk("lock_it: program bug: unanticipated overlap\n");
 334                                 free_lock(filp, caller);
 335                                 return -ENOLCK;
 336                         }
 337                 } else {        /* The new lock augments an existing lock ... */
 338                         int grew = 0;
 339 
 340                         if (caller->fl_start < fl->fl_start) {
 341                                 fl->fl_start = caller->fl_start;
 342                                 grew = 1;
 343                         }
 344                         if (caller->fl_end > fl->fl_end) {
 345                                 fl->fl_end = caller->fl_end;
 346                                 grew = 1;
 347                         }
 348                         free_lock(filp, caller);
 349                         caller = fl;
 350                         if (!grew)
 351                                 return 0;
 352                         /* must continue, may be more overlaps */
 353                 }
 354         }
 355 
 356         /*
 357          * New lock doesn't overlap any regions ...
 358          * alloc_lock() has already been called, so we're done!
 359          */
 360 
 361         return 0;
 362 }
 363 
 364 /*
 365  * Handle F_UNLCK ...
 366  * Result is 0 for success, or -EINVAL or -ENOLCK.
 367  * ENOLCK can happen when a lock is split into two.
 368  */
 369 
 370 static int unlock_it(struct file *filp, struct file_lock *caller)
     /* [previous][next][first][last][top][bottom][index][help] */
 371 {
 372         int one_unlocked = 0;
 373         struct file_lock *fl,*next;
 374 
 375         for (fl = filp->f_inode->i_flock; fl != NULL; ) {
 376                 if (caller->fl_owner != fl->fl_owner || !overlap(caller, fl)) {
 377                         fl = fl->fl_next;
 378                         continue;
 379                 }
 380                 one_unlocked = 1;
 381                 if (caller->fl_start > fl->fl_start && caller->fl_end < fl->fl_end) {
 382                         /*
 383                          * Lock is split in two ...
 384                          * {fl} is the bottom piece, {next} is the top piece.
 385                          */
 386                         if ((next = alloc_lock(filp, fl)) == NULL)
 387                                 return -ENOLCK;
 388                         fl->fl_end = caller->fl_start - 1;
 389                         next->fl_start = caller->fl_end + 1;
 390                         return 0;
 391                 }
 392                 /*
 393                  * At this point we know there is an overlap and we know the
 394                  * lock isn't split into two ...
 395                  *
 396                  * Unless the lock table is broken, entries will not overlap.
 397                  * IE: User X won't have an entry locking bytes 1-3 and another
 398                  * entry locking bytes 3-5. Therefore, if the area being
 399                  * unlocked is a subset of the total area, we don't need to
 400                  * traverse any more of the list. The code is a tad more
 401                  * complicated by this optimization. Perhaps it's not worth it.
 402                  *
 403                  * WARNING: We assume free_lock() does not alter
 404                  *      {fl_start, fl_end}.
 405                  *
 406                  * {fl_next} gets clobbered when the entry is moved to
 407                  * the free list, so grab it now ...
 408                  */
 409                 next = fl->fl_next;
 410                 if (caller->fl_start <= fl->fl_start && caller->fl_end >= fl->fl_end) {
 411                         free_lock(filp, fl);
 412                 } else if (caller->fl_start > fl->fl_start) {
 413                         fl->fl_end = caller->fl_start - 1;
 414                 } else {
 415                         /* caller->fl_end < fl->fl_end */
 416                         fl->fl_start = caller->fl_end + 1;
 417                 }
 418                 if (caller->fl_start >= fl->fl_start && caller->fl_end <= fl->fl_end)
 419                         return 0;               /* no more to be found */
 420                 fl = next;
 421                 /* must continue, there may be more to unlock */
 422         }
 423 
 424         return one_unlocked ? 0 : -EINVAL;
 425 }
 426 
 427 static struct file_lock *alloc_lock(struct file *filp, struct file_lock *template)
     /* [previous][next][first][last][top][bottom][index][help] */
 428 {
 429         struct file_lock *new;
 430 
 431         if (file_lock_free_list == NULL)
 432                 return NULL;                    /* no available entry */
 433         if (file_lock_free_list->fl_owner != NULL)
 434                 panic("alloc_lock: broken free list\n");
 435 
 436         new = file_lock_free_list;              /* remove from free list */
 437         file_lock_free_list = file_lock_free_list->fl_next;
 438 
 439         *new = *template;
 440 
 441         new->fl_next = filp->f_inode->i_flock;  /* insert into file's list */
 442         filp->f_inode->i_flock = new;
 443 
 444         new->fl_owner = current;        /* FIXME: needed? */
 445         new->fl_wait = NULL;
 446         return new;
 447 }
 448 
 449 /*
 450  * Add a lock to the free list ...
 451  *
 452  * WARNING: We must not alter {fl_start, fl_end}. See unlock_it().
 453  */
 454 
 455 static void free_lock(struct file *filp, struct file_lock *fl)
     /* [previous][next][first][last][top][bottom][index][help] */
 456 {
 457         struct file_lock **fl_p;
 458 
 459         if (fl->fl_owner == NULL)       /* sanity check */
 460                 panic("free_lock: broken lock list\n");
 461 
 462         /*
 463          * We only use a singly linked list to save some memory space
 464          * (the only place we'd use a doubly linked list is here).
 465          */
 466 
 467         for (fl_p = &filp->f_inode->i_flock; *fl_p != NULL; fl_p = &(*fl_p)->fl_next) {
 468                 if (*fl_p == fl)
 469                         break;
 470         }
 471         if (*fl_p == NULL) {
 472                 printk("free_lock: lock is not in file's lock list\n");
 473         } else {
 474                 *fl_p = (*fl_p)->fl_next;
 475         }
 476 
 477         fl->fl_next = file_lock_free_list;      /* add to free list */
 478         file_lock_free_list = fl;
 479         fl->fl_owner = NULL;                    /* for sanity checks */
 480 
 481         wake_up(&fl->fl_wait);
 482 }

/* [previous][next][first][last][top][bottom][index][help] */