root/fs/inode.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hashfn
  2. insert_inode_free
  3. remove_inode_free
  4. insert_inode_hash
  5. remove_inode_hash
  6. put_last_free
  7. grow_inodes
  8. inode_init
  9. wait_on_inode
  10. lock_inode
  11. unlock_inode
  12. clear_inode
  13. fs_may_mount
  14. fs_may_umount
  15. fs_may_remount_ro
  16. write_inode
  17. read_inode
  18. inode_change_ok
  19. inode_setattr
  20. notify_change
  21. bmap
  22. invalidate_inodes
  23. sync_inodes
  24. iput
  25. value
  26. get_empty_inode
  27. get_pipe_inode
  28. __iget
  29. __wait_on_inode

   1 /*
   2  *  linux/fs/inode.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 #include <linux/stat.h>
   8 #include <linux/sched.h>
   9 #include <linux/kernel.h>
  10 #include <linux/mm.h>
  11 #include <linux/string.h>
  12 
  13 #include <asm/system.h>
  14 
  15 #define NR_IHASH 512
  16 
  17 /*
  18  * Be VERY careful when you access the inode hash table. There
  19  * are some rather scary race conditions you need to take care of:
  20  *  - P1 tries to open file "xx", calls "iget()" with the proper
  21  *    inode number, but blocks because it's not on the list.
  22  *  - P2 deletes file "xx", gets the inode (which P1 has just read,
  23  *    but P1 hasn't woken up to the fact yet)
  24  *  - P2 iput()'s the inode, which now has i_nlink = 0
  25  *  - P1 wakes up and has the inode, but now P2 has made that
  26  *    inode invalid (but P1 has no way of knowing that).
  27  *
  28  * The "updating" counter makes sure that when P1 blocks on the
  29  * iget(), P2 can't delete the inode from under it because P2
  30  * will wait until P1 has been able to update the inode usage
  31  * count so that the inode will stay in use until everybody has
  32  * closed it..
  33  */
  34 static struct inode_hash_entry {
  35         struct inode * inode;
  36         int updating;
  37 } hash_table[NR_IHASH];
  38 
  39 static struct inode * first_inode;
  40 static struct wait_queue * inode_wait = NULL;
  41 /* Keep these next two contiguous in memory for sysctl.c */
  42 int nr_inodes = 0, nr_free_inodes = 0;
  43 int max_inodes = NR_INODE;
  44 
  45 static inline int const hashfn(kdev_t dev, unsigned int i)
     /* [previous][next][first][last][top][bottom][index][help] */
  46 {
  47         return (HASHDEV(dev) ^ i) % NR_IHASH;
  48 }
  49 
  50 static inline struct inode_hash_entry * const hash(kdev_t dev, int i)
  51 {
  52         return hash_table + hashfn(dev, i);
  53 }
  54 
  55 static inline void insert_inode_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  56 {
  57         struct inode * prev, * next = first_inode;
  58 
  59         first_inode = inode;
  60         prev = next->i_prev;
  61         inode->i_next = next;
  62         inode->i_prev = prev;
  63         prev->i_next = inode;
  64         next->i_prev = inode;
  65 }
  66 
  67 static inline void remove_inode_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  68 {
  69         if (first_inode == inode)
  70                 first_inode = first_inode->i_next;
  71         if (inode->i_next)
  72                 inode->i_next->i_prev = inode->i_prev;
  73         if (inode->i_prev)
  74                 inode->i_prev->i_next = inode->i_next;
  75         inode->i_next = inode->i_prev = NULL;
  76 }
  77 
  78 void insert_inode_hash(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  79 {
  80         struct inode_hash_entry *h;
  81         h = hash(inode->i_dev, inode->i_ino);
  82 
  83         inode->i_hash_next = h->inode;
  84         inode->i_hash_prev = NULL;
  85         if (inode->i_hash_next)
  86                 inode->i_hash_next->i_hash_prev = inode;
  87         h->inode = inode;
  88 }
  89 
  90 static inline void remove_inode_hash(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         struct inode_hash_entry *h;
  93         h = hash(inode->i_dev, inode->i_ino);
  94 
  95         if (h->inode == inode)
  96                 h->inode = inode->i_hash_next;
  97         if (inode->i_hash_next)
  98                 inode->i_hash_next->i_hash_prev = inode->i_hash_prev;
  99         if (inode->i_hash_prev)
 100                 inode->i_hash_prev->i_hash_next = inode->i_hash_next;
 101         inode->i_hash_prev = inode->i_hash_next = NULL;
 102 }
 103 
 104 static inline void put_last_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 105 {
 106         remove_inode_free(inode);
 107         inode->i_prev = first_inode->i_prev;
 108         inode->i_prev->i_next = inode;
 109         inode->i_next = first_inode;
 110         inode->i_next->i_prev = inode;
 111 }
 112 
 113 int grow_inodes(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115         struct inode * inode;
 116         int i;
 117 
 118         if (!(inode = (struct inode*) get_free_page(GFP_KERNEL)))
 119                 return -ENOMEM;
 120 
 121         i=PAGE_SIZE / sizeof(struct inode);
 122         nr_inodes += i;
 123         nr_free_inodes += i;
 124 
 125         if (!first_inode)
 126                 inode->i_next = inode->i_prev = first_inode = inode++, i--;
 127 
 128         for ( ; i ; i-- )
 129                 insert_inode_free(inode++);
 130         return 0;
 131 }
 132 
 133 unsigned long inode_init(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         memset(hash_table, 0, sizeof(hash_table));
 136         first_inode = NULL;
 137         return start;
 138 }
 139 
 140 static void __wait_on_inode(struct inode *);
 141 
 142 static inline void wait_on_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         if (inode->i_lock)
 145                 __wait_on_inode(inode);
 146 }
 147 
 148 static inline void lock_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         wait_on_inode(inode);
 151         inode->i_lock = 1;
 152 }
 153 
 154 static inline void unlock_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         inode->i_lock = 0;
 157         wake_up(&inode->i_wait);
 158 }
 159 
 160 /*
 161  * Note that we don't want to disturb any wait-queues when we discard
 162  * an inode.
 163  *
 164  * Argghh. Got bitten by a gcc problem with inlining: no way to tell
 165  * the compiler that the inline asm function 'memset' changes 'inode'.
 166  * I've been searching for the bug for days, and was getting desperate.
 167  * Finally looked at the assembler output... Grrr.
 168  *
 169  * The solution is the weird use of 'volatile'. Ho humm. Have to report
 170  * it to the gcc lists, and hope we can do this more cleanly some day..
 171  */
 172 void clear_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 173 {
 174         struct wait_queue * wait;
 175 
 176         truncate_inode_pages(inode, 0);
 177         wait_on_inode(inode);
 178         if (IS_WRITABLE(inode)) {
 179                 if (inode->i_sb && inode->i_sb->dq_op)
 180                         inode->i_sb->dq_op->drop(inode);
 181         }
 182         remove_inode_hash(inode);
 183         remove_inode_free(inode);
 184         wait = ((volatile struct inode *) inode)->i_wait;
 185         if (inode->i_count)
 186                 nr_free_inodes++;
 187         memset(inode,0,sizeof(*inode));
 188         ((volatile struct inode *) inode)->i_wait = wait;
 189         insert_inode_free(inode);
 190 }
 191 
 192 int fs_may_mount(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 193 {
 194         struct inode * inode, * next;
 195         int i;
 196 
 197         next = first_inode;
 198         for (i = nr_inodes ; i > 0 ; i--) {
 199                 inode = next;
 200                 next = inode->i_next;   /* clear_inode() changes the queues.. */
 201                 if (inode->i_dev != dev)
 202                         continue;
 203                 if (inode->i_count || inode->i_dirt || inode->i_lock)
 204                         return 0;
 205                 clear_inode(inode);
 206         }
 207         return 1;
 208 }
 209 
 210 int fs_may_umount(kdev_t dev, struct inode * mount_root)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         struct inode * inode;
 213         int i;
 214 
 215         inode = first_inode;
 216         for (i=0 ; i < nr_inodes ; i++, inode = inode->i_next) {
 217                 if (inode->i_dev != dev || !inode->i_count)
 218                         continue;
 219                 if (inode == mount_root && inode->i_count ==
 220                     (inode->i_mount != inode ? 1 : 2))
 221                         continue;
 222                 return 0;
 223         }
 224         return 1;
 225 }
 226 
 227 int fs_may_remount_ro(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 228 {
 229         struct file * file;
 230         int i;
 231 
 232         /* Check that no files are currently opened for writing. */
 233         for (file = first_file, i=0; i<nr_files; i++, file=file->f_next) {
 234                 if (!file->f_count || !file->f_inode ||
 235                     file->f_inode->i_dev != dev)
 236                         continue;
 237                 if (S_ISREG(file->f_inode->i_mode) && (file->f_mode & 2))
 238                         return 0;
 239         }
 240         return 1;
 241 }
 242 
 243 static void write_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 244 {
 245         if (!inode->i_dirt)
 246                 return;
 247         wait_on_inode(inode);
 248         if (!inode->i_dirt)
 249                 return;
 250         if (!inode->i_sb || !inode->i_sb->s_op || !inode->i_sb->s_op->write_inode) {
 251                 inode->i_dirt = 0;
 252                 return;
 253         }
 254         inode->i_lock = 1;      
 255         inode->i_sb->s_op->write_inode(inode);
 256         unlock_inode(inode);
 257 }
 258 
 259 static inline void read_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 260 {
 261         lock_inode(inode);
 262         if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->read_inode)
 263                 inode->i_sb->s_op->read_inode(inode);
 264         unlock_inode(inode);
 265 }
 266 
 267 /* POSIX UID/GID verification for setting inode attributes */
 268 int inode_change_ok(struct inode *inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 269 {
 270         /*
 271          *      If force is set do it anyway.
 272          */
 273          
 274         if (attr->ia_valid & ATTR_FORCE)
 275                 return 0;
 276 
 277         /* Make sure a caller can chown */
 278         if ((attr->ia_valid & ATTR_UID) &&
 279             (current->fsuid != inode->i_uid ||
 280              attr->ia_uid != inode->i_uid) && !fsuser())
 281                 return -EPERM;
 282 
 283         /* Make sure caller can chgrp */
 284         if ((attr->ia_valid & ATTR_GID) &&
 285             (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid) &&
 286             !fsuser())
 287                 return -EPERM;
 288 
 289         /* Make sure a caller can chmod */
 290         if (attr->ia_valid & ATTR_MODE) {
 291                 if ((current->fsuid != inode->i_uid) && !fsuser())
 292                         return -EPERM;
 293                 /* Also check the setgid bit! */
 294                 if (!fsuser() && !in_group_p((attr->ia_valid & ATTR_GID) ? attr->ia_gid :
 295                                              inode->i_gid))
 296                         attr->ia_mode &= ~S_ISGID;
 297         }
 298 
 299         /* Check for setting the inode time */
 300         if ((attr->ia_valid & ATTR_ATIME_SET) &&
 301             ((current->fsuid != inode->i_uid) && !fsuser()))
 302                 return -EPERM;
 303         if ((attr->ia_valid & ATTR_MTIME_SET) &&
 304             ((current->fsuid != inode->i_uid) && !fsuser()))
 305                 return -EPERM;
 306         return 0;
 307 }
 308 
 309 /*
 310  * Set the appropriate attributes from an attribute structure into
 311  * the inode structure.
 312  */
 313 void inode_setattr(struct inode *inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 314 {
 315         if (attr->ia_valid & ATTR_UID)
 316                 inode->i_uid = attr->ia_uid;
 317         if (attr->ia_valid & ATTR_GID)
 318                 inode->i_gid = attr->ia_gid;
 319         if (attr->ia_valid & ATTR_SIZE)
 320                 inode->i_size = attr->ia_size;
 321         if (attr->ia_valid & ATTR_ATIME)
 322                 inode->i_atime = attr->ia_atime;
 323         if (attr->ia_valid & ATTR_MTIME)
 324                 inode->i_mtime = attr->ia_mtime;
 325         if (attr->ia_valid & ATTR_CTIME)
 326                 inode->i_ctime = attr->ia_ctime;
 327         if (attr->ia_valid & ATTR_MODE) {
 328                 inode->i_mode = attr->ia_mode;
 329                 if (!fsuser() && !in_group_p(inode->i_gid))
 330                         inode->i_mode &= ~S_ISGID;
 331         }
 332         inode->i_dirt = 1;
 333 }
 334 
 335 /*
 336  * notify_change is called for inode-changing operations such as
 337  * chown, chmod, utime, and truncate.  It is guaranteed (unlike
 338  * write_inode) to be called from the context of the user requesting
 339  * the change.
 340  */
 341 
 342 int notify_change(struct inode * inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 343 {
 344         int retval;
 345 
 346         attr->ia_ctime = CURRENT_TIME;
 347         if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
 348                 if (!(attr->ia_valid & ATTR_ATIME_SET))
 349                         attr->ia_atime = attr->ia_ctime;
 350                 if (!(attr->ia_valid & ATTR_MTIME_SET))
 351                         attr->ia_mtime = attr->ia_ctime;
 352         }
 353 
 354         if (inode->i_sb && inode->i_sb->s_op  &&
 355             inode->i_sb->s_op->notify_change) 
 356                 return inode->i_sb->s_op->notify_change(inode, attr);
 357 
 358         if ((retval = inode_change_ok(inode, attr)) != 0)
 359                 return retval;
 360 
 361         inode_setattr(inode, attr);
 362         return 0;
 363 }
 364 
 365 /*
 366  * bmap is needed for demand-loading and paging: if this function
 367  * doesn't exist for a filesystem, then those things are impossible:
 368  * executables cannot be run from the filesystem etc...
 369  *
 370  * This isn't as bad as it sounds: the read-routines might still work,
 371  * so the filesystem would be otherwise ok (for example, you might have
 372  * a DOS filesystem, which doesn't lend itself to bmap very well, but
 373  * you could still transfer files to/from the filesystem)
 374  */
 375 int bmap(struct inode * inode, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 376 {
 377         if (inode->i_op && inode->i_op->bmap)
 378                 return inode->i_op->bmap(inode,block);
 379         return 0;
 380 }
 381 
 382 void invalidate_inodes(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 383 {
 384         struct inode * inode, * next;
 385         int i;
 386 
 387         next = first_inode;
 388         for(i = nr_inodes ; i > 0 ; i--) {
 389                 inode = next;
 390                 next = inode->i_next;           /* clear_inode() changes the queues.. */
 391                 if (inode->i_dev != dev)
 392                         continue;
 393                 if (inode->i_count || inode->i_dirt || inode->i_lock) {
 394                         printk("VFS: inode busy on removed device %s\n",
 395                                kdevname(dev));
 396                         continue;
 397                 }
 398                 clear_inode(inode);
 399         }
 400 }
 401 
 402 void sync_inodes(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 403 {
 404         int i;
 405         struct inode * inode;
 406 
 407         inode = first_inode;
 408         for(i = 0; i < nr_inodes*2; i++, inode = inode->i_next) {
 409                 if (dev && inode->i_dev != dev)
 410                         continue;
 411                 wait_on_inode(inode);
 412                 if (inode->i_dirt)
 413                         write_inode(inode);
 414         }
 415 }
 416 
 417 void iput(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 418 {
 419         if (!inode)
 420                 return;
 421         wait_on_inode(inode);
 422         if (!inode->i_count) {
 423                 printk("VFS: iput: trying to free free inode\n");
 424                 printk("VFS: device %s, inode %lu, mode=0%07o\n",
 425                         kdevname(inode->i_rdev), inode->i_ino, inode->i_mode);
 426                 return;
 427         }
 428         if (inode->i_pipe)
 429                 wake_up_interruptible(&PIPE_WAIT(*inode));
 430 repeat:
 431         if (inode->i_count>1) {
 432                 inode->i_count--;
 433                 return;
 434         }
 435 
 436         wake_up(&inode_wait);
 437         if (inode->i_pipe) {
 438                 unsigned long page = (unsigned long) PIPE_BASE(*inode);
 439                 PIPE_BASE(*inode) = NULL;
 440                 free_page(page);
 441         }
 442 
 443         if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->put_inode) {
 444                 inode->i_sb->s_op->put_inode(inode);
 445                 if (!inode->i_nlink)
 446                         return;
 447         }
 448 
 449         if (inode->i_dirt) {
 450                 write_inode(inode);     /* we can sleep - so do again */
 451                 wait_on_inode(inode);
 452                 goto repeat;
 453         }
 454 
 455         if (IS_WRITABLE(inode)) {
 456                 if (inode->i_sb && inode->i_sb->dq_op) {
 457                         /* Here we can sleep also. Let's do it again
 458                          * Dmitry Gorodchanin 02/11/96 
 459                          */
 460                         inode->i_lock = 1;
 461                         inode->i_sb->dq_op->drop(inode);
 462                         unlock_inode(inode);
 463                         goto repeat;
 464                 }
 465         }
 466         
 467         inode->i_count--;
 468 
 469         if (inode->i_mmap) {
 470                 printk("iput: inode %lu on device %s still has mappings.\n",
 471                         inode->i_ino, kdevname(inode->i_dev));
 472                 inode->i_mmap = NULL;
 473         }
 474 
 475         nr_free_inodes++;
 476         return;
 477 }
 478 
 479 static inline unsigned long value(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 480 {
 481         if (inode->i_lock)  
 482                 return 1000;
 483         if (inode->i_dirt)
 484                 return 1000;
 485         return inode->i_nrpages;
 486 }
 487 
 488 struct inode * get_empty_inode(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 489 {
 490         static int ino = 0;
 491         struct inode * inode, * best;
 492         unsigned long badness = 1000;
 493         int i;
 494 
 495         if (nr_inodes < max_inodes && nr_free_inodes < (nr_inodes >> 1))
 496                 grow_inodes();
 497 repeat:
 498         inode = first_inode;
 499         best = NULL;
 500         for (i = 0; i<nr_inodes; inode = inode->i_next, i++) {
 501                 if (!inode->i_count) {
 502                         unsigned long i = value(inode);
 503                         if (i < badness) {
 504                                 best = inode;
 505                                 if ((badness = i) == 0)
 506                                         break;
 507                         }
 508                 }
 509         }
 510         if (badness)
 511                 if (nr_inodes < max_inodes) {
 512                         if (grow_inodes() == 0)
 513                                 goto repeat;
 514                 }
 515         inode = best;
 516         if (!inode) {
 517                 printk("VFS: No free inodes - contact Linus\n");
 518                 sleep_on(&inode_wait);
 519                 goto repeat;
 520         }
 521         if (inode->i_lock) {
 522                 wait_on_inode(inode);
 523                 goto repeat;
 524         }
 525         if (inode->i_dirt) {
 526                 write_inode(inode);
 527                 goto repeat;
 528         }
 529         if (inode->i_count)
 530                 goto repeat;
 531         clear_inode(inode);
 532         inode->i_count = 1;
 533         inode->i_nlink = 1;
 534         inode->i_version = ++event;
 535         inode->i_sem.count = 1;
 536         inode->i_ino = ++ino;
 537         inode->i_dev = 0;
 538         nr_free_inodes--;
 539         if (nr_free_inodes < 0) {
 540                 printk ("VFS: get_empty_inode: bad free inode count.\n");
 541                 nr_free_inodes = 0;
 542         }
 543         return inode;
 544 }
 545 
 546 struct inode * get_pipe_inode(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548         struct inode * inode;
 549         extern struct inode_operations pipe_inode_operations;
 550 
 551         if (!(inode = get_empty_inode()))
 552                 return NULL;
 553         if (!(PIPE_BASE(*inode) = (char*) __get_free_page(GFP_USER))) {
 554                 iput(inode);
 555                 return NULL;
 556         }
 557         inode->i_op = &pipe_inode_operations;
 558         inode->i_count = 2;     /* sum of readers/writers */
 559         PIPE_WAIT(*inode) = NULL;
 560         PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
 561         PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
 562         PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
 563         PIPE_LOCK(*inode) = 0;
 564         inode->i_pipe = 1;
 565         inode->i_mode |= S_IFIFO | S_IRUSR | S_IWUSR;
 566         inode->i_uid = current->fsuid;
 567         inode->i_gid = current->fsgid;
 568         inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 569         inode->i_blksize = PAGE_SIZE;
 570         return inode;
 571 }
 572 
 573 struct inode *__iget(struct super_block * sb, int nr, int crossmntp)
     /* [previous][next][first][last][top][bottom][index][help] */
 574 {
 575         static struct wait_queue * update_wait = NULL;
 576         struct inode_hash_entry * h;
 577         struct inode * inode;
 578         struct inode * empty = NULL;
 579 
 580         if (!sb)
 581                 panic("VFS: iget with sb==NULL");
 582         h = hash(sb->s_dev, nr);
 583 repeat:
 584         for (inode = h->inode; inode ; inode = inode->i_hash_next)
 585                 if (inode->i_dev == sb->s_dev && inode->i_ino == nr)
 586                         goto found_it;
 587         if (!empty) {
 588                 /*
 589                  * If we sleep here before we have found an inode
 590                  * we need to make sure nobody does anything bad
 591                  * to the inode while we sleep, because otherwise
 592                  * we may return an inode that is not valid any
 593                  * more when we wake up..
 594                  */
 595                 h->updating++;
 596                 empty = get_empty_inode();
 597                 if (!--h->updating)
 598                         wake_up(&update_wait);
 599                 if (empty)
 600                         goto repeat;
 601                 return (NULL);
 602         }
 603         inode = empty;
 604         inode->i_sb = sb;
 605         inode->i_dev = sb->s_dev;
 606         inode->i_ino = nr;
 607         inode->i_flags = sb->s_flags;
 608         put_last_free(inode);
 609         insert_inode_hash(inode);
 610         read_inode(inode);
 611         goto return_it;
 612 
 613 found_it:
 614         if (!inode->i_count)
 615                 nr_free_inodes--;
 616         inode->i_count++;
 617         wait_on_inode(inode);
 618         if (inode->i_dev != sb->s_dev || inode->i_ino != nr) {
 619                 printk("Whee.. inode changed from under us. Tell Linus\n");
 620                 iput(inode);
 621                 goto repeat;
 622         }
 623         if (crossmntp && inode->i_mount) {
 624                 struct inode * tmp = inode->i_mount;
 625                 tmp->i_count++;
 626                 iput(inode);
 627                 inode = tmp;
 628                 wait_on_inode(inode);
 629         }
 630         if (empty)
 631                 iput(empty);
 632 
 633 return_it:
 634         while (h->updating)
 635                 sleep_on(&update_wait);
 636         return inode;
 637 }
 638 
 639 /*
 640  * The "new" scheduling primitives (new as of 0.97 or so) allow this to
 641  * be done without disabling interrupts (other than in the actual queue
 642  * updating things: only a couple of 386 instructions). This should be
 643  * much better for interrupt latency.
 644  */
 645 static void __wait_on_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 646 {
 647         struct wait_queue wait = { current, NULL };
 648 
 649         add_wait_queue(&inode->i_wait, &wait);
 650 repeat:
 651         current->state = TASK_UNINTERRUPTIBLE;
 652         if (inode->i_lock) {
 653                 schedule();
 654                 goto repeat;
 655         }
 656         remove_wait_queue(&inode->i_wait, &wait);
 657         current->state = TASK_RUNNING;
 658 }

/* [previous][next][first][last][top][bottom][index][help] */