root/fs/inode.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hashfn
  2. insert_inode_free
  3. remove_inode_free
  4. insert_inode_hash
  5. remove_inode_hash
  6. put_last_free
  7. grow_inodes
  8. inode_init
  9. wait_on_inode
  10. lock_inode
  11. unlock_inode
  12. clear_inode
  13. fs_may_mount
  14. fs_may_umount
  15. fs_may_remount_ro
  16. write_inode
  17. read_inode
  18. inode_change_ok
  19. inode_setattr
  20. notify_change
  21. bmap
  22. invalidate_inodes
  23. sync_inodes
  24. iput
  25. value
  26. get_empty_inode
  27. get_pipe_inode
  28. __iget
  29. __wait_on_inode

   1 /*
   2  *  linux/fs/inode.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 #include <linux/stat.h>
   8 #include <linux/sched.h>
   9 #include <linux/kernel.h>
  10 #include <linux/mm.h>
  11 #include <linux/string.h>
  12 
  13 #include <asm/system.h>
  14 
  15 #define NR_IHASH 512
  16 
  17 /*
  18  * Be VERY careful when you access the inode hash table. There
  19  * are some rather scary race conditions you need to take care of:
  20  *  - P1 tries to open file "xx", calls "iget()" with the proper
  21  *    inode number, but blocks because it's not on the list.
  22  *  - P2 deletes file "xx", gets the inode (which P1 has just read,
  23  *    but P1 hasn't woken up to the fact yet)
  24  *  - P2 iput()'s the inode, which now has i_nlink = 0
  25  *  - P1 wakes up and has the inode, but now P2 has made that
  26  *    inode invalid (but P1 has no way of knowing that).
  27  *
  28  * The "updating" counter makes sure that when P1 blocks on the
  29  * iget(), P2 can't delete the inode from under it because P2
  30  * will wait until P1 has been able to update the inode usage
  31  * count so that the inode will stay in use until everybody has
  32  * closed it..
  33  */
  34 static struct inode_hash_entry {
  35         struct inode * inode;
  36         int updating;
  37 } hash_table[NR_IHASH];
  38 
  39 static struct inode * first_inode;
  40 static struct wait_queue * inode_wait = NULL;
  41 /* Keep these next two contiguous in memory for sysctl.c */
  42 int nr_inodes = 0, nr_free_inodes = 0;
  43 int max_inodes = NR_INODE;
  44 
  45 static inline int const hashfn(kdev_t dev, unsigned int i)
     /* [previous][next][first][last][top][bottom][index][help] */
  46 {
  47         return (HASHDEV(dev) ^ i) % NR_IHASH;
  48 }
  49 
  50 static inline struct inode_hash_entry * const hash(kdev_t dev, int i)
  51 {
  52         return hash_table + hashfn(dev, i);
  53 }
  54 
  55 static inline void insert_inode_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  56 {
  57         inode->i_next = first_inode;
  58         inode->i_prev = first_inode->i_prev;
  59         inode->i_next->i_prev = inode;
  60         inode->i_prev->i_next = inode;
  61         first_inode = inode;
  62 }
  63 
  64 static inline void remove_inode_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         if (first_inode == inode)
  67                 first_inode = first_inode->i_next;
  68         if (inode->i_next)
  69                 inode->i_next->i_prev = inode->i_prev;
  70         if (inode->i_prev)
  71                 inode->i_prev->i_next = inode->i_next;
  72         inode->i_next = inode->i_prev = NULL;
  73 }
  74 
  75 void insert_inode_hash(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         struct inode_hash_entry *h;
  78         h = hash(inode->i_dev, inode->i_ino);
  79 
  80         inode->i_hash_next = h->inode;
  81         inode->i_hash_prev = NULL;
  82         if (inode->i_hash_next)
  83                 inode->i_hash_next->i_hash_prev = inode;
  84         h->inode = inode;
  85 }
  86 
  87 static inline void remove_inode_hash(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         struct inode_hash_entry *h;
  90         h = hash(inode->i_dev, inode->i_ino);
  91 
  92         if (h->inode == inode)
  93                 h->inode = inode->i_hash_next;
  94         if (inode->i_hash_next)
  95                 inode->i_hash_next->i_hash_prev = inode->i_hash_prev;
  96         if (inode->i_hash_prev)
  97                 inode->i_hash_prev->i_hash_next = inode->i_hash_next;
  98         inode->i_hash_prev = inode->i_hash_next = NULL;
  99 }
 100 
 101 static inline void put_last_free(struct inode *inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 102 {
 103         remove_inode_free(inode);
 104         inode->i_prev = first_inode->i_prev;
 105         inode->i_prev->i_next = inode;
 106         inode->i_next = first_inode;
 107         inode->i_next->i_prev = inode;
 108 }
 109 
 110 int grow_inodes(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         struct inode * inode;
 113         int i;
 114 
 115         if (!(inode = (struct inode*) get_free_page(GFP_KERNEL)))
 116                 return -ENOMEM;
 117 
 118         i=PAGE_SIZE / sizeof(struct inode);
 119         nr_inodes += i;
 120         nr_free_inodes += i;
 121 
 122         if (!first_inode)
 123                 inode->i_next = inode->i_prev = first_inode = inode++, i--;
 124 
 125         for ( ; i ; i-- )
 126                 insert_inode_free(inode++);
 127         return 0;
 128 }
 129 
 130 unsigned long inode_init(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 131 {
 132         memset(hash_table, 0, sizeof(hash_table));
 133         first_inode = NULL;
 134         return start;
 135 }
 136 
 137 static void __wait_on_inode(struct inode *);
 138 
 139 static inline void wait_on_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 140 {
 141         if (inode->i_lock)
 142                 __wait_on_inode(inode);
 143 }
 144 
 145 static inline void lock_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 146 {
 147         wait_on_inode(inode);
 148         inode->i_lock = 1;
 149 }
 150 
 151 static inline void unlock_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         inode->i_lock = 0;
 154         wake_up(&inode->i_wait);
 155 }
 156 
 157 /*
 158  * Note that we don't want to disturb any wait-queues when we discard
 159  * an inode.
 160  *
 161  * Argghh. Got bitten by a gcc problem with inlining: no way to tell
 162  * the compiler that the inline asm function 'memset' changes 'inode'.
 163  * I've been searching for the bug for days, and was getting desperate.
 164  * Finally looked at the assembler output... Grrr.
 165  *
 166  * The solution is the weird use of 'volatile'. Ho humm. Have to report
 167  * it to the gcc lists, and hope we can do this more cleanly some day..
 168  */
 169 void clear_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         struct wait_queue * wait;
 172 
 173         truncate_inode_pages(inode, 0);
 174         wait_on_inode(inode);
 175         if (IS_WRITABLE(inode)) {
 176                 if (inode->i_sb && inode->i_sb->dq_op)
 177                         inode->i_sb->dq_op->drop(inode);
 178         }
 179         remove_inode_hash(inode);
 180         remove_inode_free(inode);
 181         wait = ((volatile struct inode *) inode)->i_wait;
 182         if (inode->i_count)
 183                 nr_free_inodes++;
 184         memset(inode,0,sizeof(*inode));
 185         ((volatile struct inode *) inode)->i_wait = wait;
 186         insert_inode_free(inode);
 187 }
 188 
 189 int fs_may_mount(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         struct inode * inode, * next;
 192         int i;
 193 
 194         next = first_inode;
 195         for (i = nr_inodes ; i > 0 ; i--) {
 196                 inode = next;
 197                 next = inode->i_next;   /* clear_inode() changes the queues.. */
 198                 if (inode->i_dev != dev)
 199                         continue;
 200                 if (inode->i_count || inode->i_dirt || inode->i_lock)
 201                         return 0;
 202                 clear_inode(inode);
 203         }
 204         return 1;
 205 }
 206 
 207 int fs_may_umount(kdev_t dev, struct inode * mount_root)
     /* [previous][next][first][last][top][bottom][index][help] */
 208 {
 209         struct inode * inode;
 210         int i;
 211 
 212         inode = first_inode;
 213         for (i=0 ; i < nr_inodes ; i++, inode = inode->i_next) {
 214                 if (inode->i_dev != dev || !inode->i_count)
 215                         continue;
 216                 if (inode == mount_root && inode->i_count ==
 217                     (inode->i_mount != inode ? 1 : 2))
 218                         continue;
 219                 return 0;
 220         }
 221         return 1;
 222 }
 223 
 224 int fs_may_remount_ro(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 {
 226         struct file * file;
 227         int i;
 228 
 229         /* Check that no files are currently opened for writing. */
 230         for (file = first_file, i=0; i<nr_files; i++, file=file->f_next) {
 231                 if (!file->f_count || !file->f_inode ||
 232                     file->f_inode->i_dev != dev)
 233                         continue;
 234                 if (S_ISREG(file->f_inode->i_mode) && (file->f_mode & 2))
 235                         return 0;
 236         }
 237         return 1;
 238 }
 239 
 240 static void write_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         if (!inode->i_dirt)
 243                 return;
 244         wait_on_inode(inode);
 245         if (!inode->i_dirt)
 246                 return;
 247         if (!inode->i_sb || !inode->i_sb->s_op || !inode->i_sb->s_op->write_inode) {
 248                 inode->i_dirt = 0;
 249                 return;
 250         }
 251         inode->i_lock = 1;      
 252         inode->i_sb->s_op->write_inode(inode);
 253         unlock_inode(inode);
 254 }
 255 
 256 static inline void read_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 257 {
 258         lock_inode(inode);
 259         if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->read_inode)
 260                 inode->i_sb->s_op->read_inode(inode);
 261         unlock_inode(inode);
 262 }
 263 
 264 /* POSIX UID/GID verification for setting inode attributes */
 265 int inode_change_ok(struct inode *inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         /*
 268          *      If force is set do it anyway.
 269          */
 270          
 271         if (attr->ia_valid & ATTR_FORCE)
 272                 return 0;
 273 
 274         /* Make sure a caller can chown */
 275         if ((attr->ia_valid & ATTR_UID) &&
 276             (current->fsuid != inode->i_uid ||
 277              attr->ia_uid != inode->i_uid) && !fsuser())
 278                 return -EPERM;
 279 
 280         /* Make sure caller can chgrp */
 281         if ((attr->ia_valid & ATTR_GID) &&
 282             (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid) &&
 283             !fsuser())
 284                 return -EPERM;
 285 
 286         /* Make sure a caller can chmod */
 287         if (attr->ia_valid & ATTR_MODE) {
 288                 if ((current->fsuid != inode->i_uid) && !fsuser())
 289                         return -EPERM;
 290                 /* Also check the setgid bit! */
 291                 if (!fsuser() && !in_group_p((attr->ia_valid & ATTR_GID) ? attr->ia_gid :
 292                                              inode->i_gid))
 293                         attr->ia_mode &= ~S_ISGID;
 294         }
 295 
 296         /* Check for setting the inode time */
 297         if ((attr->ia_valid & ATTR_ATIME_SET) &&
 298             ((current->fsuid != inode->i_uid) && !fsuser()))
 299                 return -EPERM;
 300         if ((attr->ia_valid & ATTR_MTIME_SET) &&
 301             ((current->fsuid != inode->i_uid) && !fsuser()))
 302                 return -EPERM;
 303         return 0;
 304 }
 305 
 306 /*
 307  * Set the appropriate attributes from an attribute structure into
 308  * the inode structure.
 309  */
 310 void inode_setattr(struct inode *inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 311 {
 312         if (attr->ia_valid & ATTR_UID)
 313                 inode->i_uid = attr->ia_uid;
 314         if (attr->ia_valid & ATTR_GID)
 315                 inode->i_gid = attr->ia_gid;
 316         if (attr->ia_valid & ATTR_SIZE)
 317                 inode->i_size = attr->ia_size;
 318         if (attr->ia_valid & ATTR_ATIME)
 319                 inode->i_atime = attr->ia_atime;
 320         if (attr->ia_valid & ATTR_MTIME)
 321                 inode->i_mtime = attr->ia_mtime;
 322         if (attr->ia_valid & ATTR_CTIME)
 323                 inode->i_ctime = attr->ia_ctime;
 324         if (attr->ia_valid & ATTR_MODE) {
 325                 inode->i_mode = attr->ia_mode;
 326                 if (!fsuser() && !in_group_p(inode->i_gid))
 327                         inode->i_mode &= ~S_ISGID;
 328         }
 329         inode->i_dirt = 1;
 330 }
 331 
 332 /*
 333  * notify_change is called for inode-changing operations such as
 334  * chown, chmod, utime, and truncate.  It is guaranteed (unlike
 335  * write_inode) to be called from the context of the user requesting
 336  * the change.
 337  */
 338 
 339 int notify_change(struct inode * inode, struct iattr *attr)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         int retval;
 342 
 343         attr->ia_ctime = CURRENT_TIME;
 344         if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
 345                 if (!(attr->ia_valid & ATTR_ATIME_SET))
 346                         attr->ia_atime = attr->ia_ctime;
 347                 if (!(attr->ia_valid & ATTR_MTIME_SET))
 348                         attr->ia_mtime = attr->ia_ctime;
 349         }
 350 
 351         if (inode->i_sb && inode->i_sb->s_op  &&
 352             inode->i_sb->s_op->notify_change) 
 353                 return inode->i_sb->s_op->notify_change(inode, attr);
 354 
 355         if ((retval = inode_change_ok(inode, attr)) != 0)
 356                 return retval;
 357 
 358         inode_setattr(inode, attr);
 359         return 0;
 360 }
 361 
 362 /*
 363  * bmap is needed for demand-loading and paging: if this function
 364  * doesn't exist for a filesystem, then those things are impossible:
 365  * executables cannot be run from the filesystem etc...
 366  *
 367  * This isn't as bad as it sounds: the read-routines might still work,
 368  * so the filesystem would be otherwise ok (for example, you might have
 369  * a DOS filesystem, which doesn't lend itself to bmap very well, but
 370  * you could still transfer files to/from the filesystem)
 371  */
 372 int bmap(struct inode * inode, int block)
     /* [previous][next][first][last][top][bottom][index][help] */
 373 {
 374         if (inode->i_op && inode->i_op->bmap)
 375                 return inode->i_op->bmap(inode,block);
 376         return 0;
 377 }
 378 
 379 void invalidate_inodes(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 380 {
 381         struct inode * inode, * next;
 382         int i;
 383 
 384         next = first_inode;
 385         for(i = nr_inodes ; i > 0 ; i--) {
 386                 inode = next;
 387                 next = inode->i_next;           /* clear_inode() changes the queues.. */
 388                 if (inode->i_dev != dev)
 389                         continue;
 390                 if (inode->i_count || inode->i_dirt || inode->i_lock) {
 391                         printk("VFS: inode busy on removed device %s\n",
 392                                kdevname(dev));
 393                         continue;
 394                 }
 395                 clear_inode(inode);
 396         }
 397 }
 398 
 399 void sync_inodes(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 400 {
 401         int i;
 402         struct inode * inode;
 403 
 404         inode = first_inode;
 405         for(i = 0; i < nr_inodes*2; i++, inode = inode->i_next) {
 406                 if (dev && inode->i_dev != dev)
 407                         continue;
 408                 wait_on_inode(inode);
 409                 if (inode->i_dirt)
 410                         write_inode(inode);
 411         }
 412 }
 413 
 414 void iput(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 415 {
 416         if (!inode)
 417                 return;
 418         wait_on_inode(inode);
 419         if (!inode->i_count) {
 420                 printk("VFS: iput: trying to free free inode\n");
 421                 printk("VFS: device %s, inode %lu, mode=0%07o\n",
 422                         kdevname(inode->i_rdev), inode->i_ino, inode->i_mode);
 423                 return;
 424         }
 425         if (inode->i_pipe)
 426                 wake_up_interruptible(&PIPE_WAIT(*inode));
 427 repeat:
 428         if (inode->i_count>1) {
 429                 inode->i_count--;
 430                 return;
 431         }
 432 
 433         wake_up(&inode_wait);
 434         if (inode->i_pipe) {
 435                 unsigned long page = (unsigned long) PIPE_BASE(*inode);
 436                 PIPE_BASE(*inode) = NULL;
 437                 free_page(page);
 438         }
 439 
 440         if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->put_inode) {
 441                 inode->i_sb->s_op->put_inode(inode);
 442                 if (!inode->i_nlink)
 443                         return;
 444         }
 445 
 446         if (inode->i_dirt) {
 447                 write_inode(inode);     /* we can sleep - so do again */
 448                 wait_on_inode(inode);
 449                 goto repeat;
 450         }
 451 
 452         if (IS_WRITABLE(inode)) {
 453                 if (inode->i_sb && inode->i_sb->dq_op) {
 454                         /* Here we can sleep also. Let's do it again
 455                          * Dmitry Gorodchanin 02/11/96 
 456                          */
 457                         inode->i_lock = 1;
 458                         inode->i_sb->dq_op->drop(inode);
 459                         unlock_inode(inode);
 460                         goto repeat;
 461                 }
 462         }
 463         
 464         inode->i_count--;
 465 
 466         if (inode->i_mmap) {
 467                 printk("iput: inode %lu on device %s still has mappings.\n",
 468                         inode->i_ino, kdevname(inode->i_dev));
 469                 inode->i_mmap = NULL;
 470         }
 471 
 472         nr_free_inodes++;
 473         return;
 474 }
 475 
 476 static inline unsigned long value(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 477 {
 478         if (inode->i_lock)  
 479                 return 1000;
 480         if (inode->i_dirt)
 481                 return 1000;
 482         return inode->i_nrpages;
 483 }
 484 
 485 struct inode * get_empty_inode(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 486 {
 487         static int ino = 0;
 488         struct inode * inode, * best;
 489         unsigned long badness = 1000;
 490         int i;
 491 
 492         if (nr_inodes < max_inodes && nr_free_inodes < (nr_inodes >> 1))
 493                 grow_inodes();
 494 repeat:
 495         inode = first_inode;
 496         best = NULL;
 497         for (i = 0; i<nr_inodes; inode = inode->i_next, i++) {
 498                 if (!inode->i_count) {
 499                         unsigned long i = value(inode);
 500                         if (i < badness) {
 501                                 best = inode;
 502                                 if ((badness = i) == 0)
 503                                         break;
 504                         }
 505                 }
 506         }
 507         if (badness)
 508                 if (nr_inodes < max_inodes) {
 509                         if (grow_inodes() == 0)
 510                                 goto repeat;
 511                 }
 512         inode = best;
 513         if (!inode) {
 514                 printk("VFS: No free inodes - contact Linus\n");
 515                 sleep_on(&inode_wait);
 516                 goto repeat;
 517         }
 518         if (inode->i_lock) {
 519                 wait_on_inode(inode);
 520                 goto repeat;
 521         }
 522         if (inode->i_dirt) {
 523                 write_inode(inode);
 524                 goto repeat;
 525         }
 526         if (inode->i_count)
 527                 goto repeat;
 528         clear_inode(inode);
 529         inode->i_count = 1;
 530         inode->i_nlink = 1;
 531         inode->i_version = ++event;
 532         inode->i_sem.count = 1;
 533         inode->i_ino = ++ino;
 534         inode->i_dev = 0;
 535         nr_free_inodes--;
 536         if (nr_free_inodes < 0) {
 537                 printk ("VFS: get_empty_inode: bad free inode count.\n");
 538                 nr_free_inodes = 0;
 539         }
 540         return inode;
 541 }
 542 
 543 struct inode * get_pipe_inode(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 544 {
 545         struct inode * inode;
 546         extern struct inode_operations pipe_inode_operations;
 547 
 548         if (!(inode = get_empty_inode()))
 549                 return NULL;
 550         if (!(PIPE_BASE(*inode) = (char*) __get_free_page(GFP_USER))) {
 551                 iput(inode);
 552                 return NULL;
 553         }
 554         inode->i_op = &pipe_inode_operations;
 555         inode->i_count = 2;     /* sum of readers/writers */
 556         PIPE_WAIT(*inode) = NULL;
 557         PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
 558         PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
 559         PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
 560         PIPE_LOCK(*inode) = 0;
 561         inode->i_pipe = 1;
 562         inode->i_mode |= S_IFIFO | S_IRUSR | S_IWUSR;
 563         inode->i_uid = current->fsuid;
 564         inode->i_gid = current->fsgid;
 565         inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 566         inode->i_blksize = PAGE_SIZE;
 567         return inode;
 568 }
 569 
 570 struct inode *__iget(struct super_block * sb, int nr, int crossmntp)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         static struct wait_queue * update_wait = NULL;
 573         struct inode_hash_entry * h;
 574         struct inode * inode;
 575         struct inode * empty = NULL;
 576 
 577         if (!sb)
 578                 panic("VFS: iget with sb==NULL");
 579         h = hash(sb->s_dev, nr);
 580 repeat:
 581         for (inode = h->inode; inode ; inode = inode->i_hash_next)
 582                 if (inode->i_dev == sb->s_dev && inode->i_ino == nr)
 583                         goto found_it;
 584         if (!empty) {
 585                 /*
 586                  * If we sleep here before we have found an inode
 587                  * we need to make sure nobody does anything bad
 588                  * to the inode while we sleep, because otherwise
 589                  * we may return an inode that is not valid any
 590                  * more when we wake up..
 591                  */
 592                 h->updating++;
 593                 empty = get_empty_inode();
 594                 if (!--h->updating)
 595                         wake_up(&update_wait);
 596                 if (empty)
 597                         goto repeat;
 598                 return (NULL);
 599         }
 600         inode = empty;
 601         inode->i_sb = sb;
 602         inode->i_dev = sb->s_dev;
 603         inode->i_ino = nr;
 604         inode->i_flags = sb->s_flags;
 605         put_last_free(inode);
 606         insert_inode_hash(inode);
 607         read_inode(inode);
 608         goto return_it;
 609 
 610 found_it:
 611         if (!inode->i_count)
 612                 nr_free_inodes--;
 613         inode->i_count++;
 614         wait_on_inode(inode);
 615         if (inode->i_dev != sb->s_dev || inode->i_ino != nr) {
 616                 printk("Whee.. inode changed from under us. Tell Linus\n");
 617                 iput(inode);
 618                 goto repeat;
 619         }
 620         if (crossmntp && inode->i_mount) {
 621                 struct inode * tmp = inode->i_mount;
 622                 tmp->i_count++;
 623                 iput(inode);
 624                 inode = tmp;
 625                 wait_on_inode(inode);
 626         }
 627         if (empty)
 628                 iput(empty);
 629 
 630 return_it:
 631         while (h->updating)
 632                 sleep_on(&update_wait);
 633         return inode;
 634 }
 635 
 636 /*
 637  * The "new" scheduling primitives (new as of 0.97 or so) allow this to
 638  * be done without disabling interrupts (other than in the actual queue
 639  * updating things: only a couple of 386 instructions). This should be
 640  * much better for interrupt latency.
 641  */
 642 static void __wait_on_inode(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
 643 {
 644         struct wait_queue wait = { current, NULL };
 645 
 646         add_wait_queue(&inode->i_wait, &wait);
 647 repeat:
 648         current->state = TASK_UNINTERRUPTIBLE;
 649         if (inode->i_lock) {
 650                 schedule();
 651                 goto repeat;
 652         }
 653         remove_wait_queue(&inode->i_wait, &wait);
 654         current->state = TASK_RUNNING;
 655 }

/* [previous][next][first][last][top][bottom][index][help] */