root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. invalidate_inode_pages
  2. shrink_mmap
  3. page_unuse
  4. update_vm_cache
  5. fill_page
  6. try_to_read_ahead
  7. generic_file_read
  8. filemap_nopage
  9. filemap_write_page
  10. filemap_swapout
  11. filemap_swapin
  12. filemap_sync_pte
  13. filemap_sync_pte_range
  14. filemap_sync_pmd_range
  15. filemap_sync
  16. filemap_unmap
  17. generic_mmap
  18. msync_interval
  19. sys_msync

   1 /*
   2  *      linux/mm/filemap.c
   3  *
   4  * Copyright (C) 1994, 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 #include <linux/fs.h>
  22 #include <linux/locks.h>
  23 #include <linux/pagemap.h>
  24 
  25 #include <asm/segment.h>
  26 #include <asm/system.h>
  27 #include <asm/pgtable.h>
  28 
  29 /*
  30  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  31  * though.
  32  *
  33  * Shared mappings now work. 15.8.1995  Bruno.
  34  */
  35 
  36 unsigned long page_cache_size = 0;
  37 struct page * page_hash_table[PAGE_HASH_SIZE];
  38 
  39 /*
  40  * Simple routines for both non-shared and shared mappings.
  41  */
  42 
  43 void invalidate_inode_pages(struct inode * inode, unsigned long start)
     /* [previous][next][first][last][top][bottom][index][help] */
  44 {
  45         struct page ** p = &inode->i_pages;
  46         struct page * page;
  47 
  48         while ((page = *p) != NULL) {
  49                 unsigned long offset = page->offset;
  50 
  51                 /* page wholly truncated - free it */
  52                 if (offset >= start) {
  53                         inode->i_nrpages--;
  54                         if ((*p = page->next) != NULL)
  55                                 (*p)->prev = page->prev;
  56                         page->dirty = 0;
  57                         page->next = NULL;
  58                         page->prev = NULL;
  59                         remove_page_from_hash_queue(page);
  60                         page->inode = NULL;
  61                         free_page(page_address(page));
  62                         continue;
  63                 }
  64                 p = &page->next;
  65                 offset = start - offset;
  66                 /* partial truncate, clear end of page */
  67                 if (offset < PAGE_SIZE)
  68                         memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
  69         }
  70 }
  71 
  72 int shrink_mmap(int priority, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         static int clock = 0;
  75         struct page * page;
  76 
  77         if (limit > high_memory)
  78                 limit = high_memory;
  79         limit = MAP_NR(limit);
  80         if (clock >= limit)
  81                 clock = 0;
  82         priority = (limit<<2) >> priority;
  83         page = mem_map + clock;
  84         while (priority-- > 0) {
  85                 if (page->inode) {
  86                         unsigned age = page->age;
  87                         /* if the page is shared, we juvenate it */
  88                         if (page->count != 1)
  89                                 age |= PAGE_AGE_VALUE << 1;
  90                         page->age = age >> 1;
  91                         if (age <= PAGE_AGE_VALUE/2) {
  92                                 remove_page_from_hash_queue(page);
  93                                 remove_page_from_inode_queue(page);
  94                                 free_page(page_address(page));
  95                                 return 1;
  96                         }
  97                 }
  98                 page++;
  99                 clock++;
 100                 if (clock >= limit) {
 101                         clock = 0;
 102                         page = mem_map;
 103                 }
 104         }
 105         return 0;
 106 }
 107 
 108 /*
 109  * This is called from try_to_swap_out() when we try to egt rid of some
 110  * pages..  If we're unmapping the last occurrence of this page, we also
 111  * free it from the page hash-queues etc, as we don't want to keep it
 112  * in-core unnecessarily.
 113  */
 114 unsigned long page_unuse(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 115 {
 116         struct page * p = mem_map + MAP_NR(page);
 117         int count = p->count;
 118 
 119         if (count != 2)
 120                 return count;
 121         if (!p->inode)
 122                 return count;
 123         remove_page_from_hash_queue(p);
 124         remove_page_from_inode_queue(p);
 125         free_page(page);
 126         return 1;
 127 }
 128 
 129 /*
 130  * Update a page cache copy, when we're doing a "write()" system call
 131  * See also "update_vm_cache()".
 132  */
 133 void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         struct page * page;
 136 
 137         page = find_page(inode, pos & PAGE_MASK);
 138         if (page) {
 139                 unsigned long addr;
 140 
 141                 page->count++;
 142                 if (!page->uptodate)
 143                         sleep_on(&page->wait);
 144                 addr = page_address(page);
 145                 memcpy((void *) ((pos & ~PAGE_MASK) + addr), buf, count);
 146                 free_page(addr);
 147         }
 148 }
 149 
 150 /*
 151  * Find a cached page and wait for it to become up-to-date, return
 152  * the page address.
 153  *
 154  * If no cached page can be found, create one using the supplied
 155  * new page instead (and return zero to indicate that we used the
 156  * supplied page in doing so).
 157  */
 158 static unsigned long fill_page(struct inode * inode, unsigned long offset, unsigned long newpage)
     /* [previous][next][first][last][top][bottom][index][help] */
 159 {
 160         struct page * page;
 161 
 162         page = find_page(inode, offset);
 163         if (page) {
 164                 if (!page->uptodate)
 165                         sleep_on(&page->wait);
 166                 return page_address(page);
 167         }
 168         page = mem_map + MAP_NR(newpage);
 169         page->count++;
 170         page->uptodate = 0;
 171         page->error = 0;
 172         page->offset = offset;
 173         add_page_to_inode_queue(inode, page);
 174         add_page_to_hash_queue(inode, page);
 175         inode->i_op->readpage(inode, page);
 176         page->uptodate = 1;
 177         wake_up(&page->wait);
 178         return 0;
 179 }
 180 
 181 /*
 182  * Try to read ahead in the file. "page_cache" is a potentially free page
 183  * that we could use for the cache (if it is 0 we can try to create one,
 184  * this is all overlapped with the IO on the previous page finishing anyway)
 185  */
 186 static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         if (!page_cache)
 189                 page_cache = __get_free_page(GFP_KERNEL);
 190         offset = (offset + PAGE_SIZE) & PAGE_MASK;
 191         /*
 192          * read-ahead is not implemented yet, but this is
 193          * where we should start..
 194          */
 195         return page_cache;
 196 }
 197 
 198 /*
 199  * This is a generic file read routine, and uses the
 200  * inode->i_op->readpage() function for the actual low-level
 201  * stuff.
 202  */
 203 int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 204 {
 205         int read = 0;
 206         unsigned long pos;
 207         unsigned long page_cache = 0;
 208 
 209         if (count <= 0)
 210                 return 0;
 211 
 212         pos = filp->f_pos;
 213         do {
 214                 struct page *page;
 215                 unsigned long offset, addr, nr;
 216 
 217                 if (pos >= inode->i_size)
 218                         break;
 219                 offset = pos & ~PAGE_MASK;
 220                 nr = PAGE_SIZE - offset;
 221                 /*
 222                  * Try to find the data in the page cache..
 223                  */
 224                 page = find_page(inode, pos & PAGE_MASK);
 225                 if (page)
 226                         goto found_page;
 227 
 228                 /*
 229                  * Ok, it wasn't cached, so we need to create a new
 230                  * page..
 231                  */
 232                 if (!page_cache) {
 233                         page_cache = __get_free_page(GFP_KERNEL);
 234                         if (!page_cache) {
 235                                 if (!read)
 236                                         read = -ENOMEM;
 237                                 break;
 238                         }
 239                 }
 240 
 241                 /*
 242                  * That could have slept, so we need to check again..
 243                  */
 244                 if (pos >= inode->i_size)
 245                         break;
 246                 page = find_page(inode, pos & PAGE_MASK);
 247                 if (page)
 248                         goto found_page;
 249 
 250                 /*
 251                  * Ok, add the new page to the hash-queues...
 252                  */
 253                 page = mem_map + MAP_NR(page_cache);
 254                 page_cache = 0;
 255                 page->count++;
 256                 page->uptodate = 0;
 257                 page->error = 0;
 258                 page->offset = pos & PAGE_MASK;
 259                 add_page_to_inode_queue(inode, page);
 260                 add_page_to_hash_queue(inode, page);
 261 
 262                 /* 
 263                  * And start IO on it..
 264                  * (this should be asynchronous, but currently isn't)
 265                  */
 266                 inode->i_op->readpage(inode, page);
 267 
 268 found_page:
 269                 addr = page_address(page);
 270                 if (nr > count)
 271                         nr = count;
 272                 if (!page->uptodate) {
 273                         page_cache = try_to_read_ahead(inode, offset, page_cache);
 274                         if (!page->uptodate)
 275                                 sleep_on(&page->wait);
 276                 }
 277                 if (nr > inode->i_size - pos)
 278                         nr = inode->i_size - pos;
 279                 memcpy_tofs(buf, (void *) (addr + offset), nr);
 280                 free_page(addr);
 281                 buf += nr;
 282                 pos += nr;
 283                 read += nr;
 284                 count -= nr;
 285         } while (count);
 286 
 287         filp->f_pos = pos;
 288         if (page_cache)
 289                 free_page(page_cache);
 290         if (!IS_RDONLY(inode)) {
 291                 inode->i_atime = CURRENT_TIME;
 292                 inode->i_dirt = 1;
 293         }
 294         return read;
 295 }
 296 
 297 /*
 298  * Semantics for shared and private memory areas are different past the end
 299  * of the file. A shared mapping past the last page of the file is an error
 300  * and results in a SIBGUS, while a private mapping just maps in a zero page.
 301  */
 302 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 303         unsigned long page, int no_share)
 304 {
 305         unsigned long offset;
 306         struct inode * inode = area->vm_inode;
 307         unsigned long new_page;
 308 
 309         offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
 310         if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
 311                 send_sig(SIGBUS, current, 1);
 312 
 313         new_page = fill_page(inode, offset, page);
 314         if (new_page) {
 315                 if (no_share) {
 316                         memcpy((void *) page, (void *) new_page, PAGE_SIZE);
 317                         free_page(new_page);
 318                         return page;
 319                 }
 320                 free_page(page);
 321                 return new_page;
 322         }
 323 
 324         if (no_share) {
 325                 new_page = __get_free_page(GFP_USER);
 326                 if (!new_page) {
 327                         oom(current);
 328                         new_page = pte_page(BAD_PAGE);
 329                 }
 330                 memcpy((void *) new_page, (void *) page, PAGE_SIZE);
 331                 free_page(page);
 332                 page = new_page;
 333         }
 334         return page;
 335 }
 336 
 337 /*
 338  * Tries to write a shared mapped page to its backing store. May return -EIO
 339  * if the disk is full.
 340  */
 341 static int filemap_write_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 342         unsigned long offset,
 343         unsigned long page)
 344 {
 345         int old_fs;
 346         unsigned long size, result;
 347         struct file file;
 348         struct inode * inode;
 349         struct buffer_head * bh;
 350 
 351         bh = buffer_pages[MAP_NR(page)];
 352         if (bh) {
 353                 /* whee.. just mark the buffer heads dirty */
 354                 struct buffer_head * tmp = bh;
 355                 do {
 356                         mark_buffer_dirty(tmp, 0);
 357                         tmp = tmp->b_this_page;
 358                 } while (tmp != bh);
 359                 return 0;
 360         }
 361 
 362         inode = vma->vm_inode;
 363         file.f_op = inode->i_op->default_file_ops;
 364         if (!file.f_op->write)
 365                 return -EIO;
 366         size = offset + PAGE_SIZE;
 367         /* refuse to extend file size.. */
 368         if (S_ISREG(inode->i_mode)) {
 369                 if (size > inode->i_size)
 370                         size = inode->i_size;
 371                 /* Ho humm.. We should have tested for this earlier */
 372                 if (size < offset)
 373                         return -EIO;
 374         }
 375         size -= offset;
 376         file.f_mode = 3;
 377         file.f_flags = 0;
 378         file.f_count = 1;
 379         file.f_inode = inode;
 380         file.f_pos = offset;
 381         file.f_reada = 0;
 382         old_fs = get_fs();
 383         set_fs(KERNEL_DS);
 384         result = file.f_op->write(inode, &file, (const char *) page, size);
 385         set_fs(old_fs);
 386         if (result != size)
 387                 return -EIO;
 388         return 0;
 389 }
 390 
 391 
 392 /*
 393  * Swapping to a shared file: while we're busy writing out the page
 394  * (and the page still exists in memory), we save the page information
 395  * in the page table, so that "filemap_swapin()" can re-use the page
 396  * immediately if it is called while we're busy swapping it out..
 397  *
 398  * Once we've written it all out, we mark the page entry "empty", which
 399  * will result in a normal page-in (instead of a swap-in) from the now
 400  * up-to-date disk file.
 401  */
 402 int filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 403         unsigned long offset,
 404         pte_t *page_table)
 405 {
 406         int error;
 407         unsigned long page = pte_page(*page_table);
 408         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 409 
 410         set_pte(page_table, __pte(entry));
 411         /* Yuck, perhaps a slightly modified swapout parameter set? */
 412         invalidate_page(vma, (offset + vma->vm_start - vma->vm_offset));
 413         error = filemap_write_page(vma, offset, page);
 414         if (pte_val(*page_table) == entry)
 415                 pte_clear(page_table);
 416         return error;
 417 }
 418 
 419 /*
 420  * filemap_swapin() is called only if we have something in the page
 421  * tables that is non-zero (but not present), which we know to be the
 422  * page index of a page that is busy being swapped out (see above).
 423  * So we just use it directly..
 424  */
 425 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 426         unsigned long offset,
 427         unsigned long entry)
 428 {
 429         unsigned long page = SWP_OFFSET(entry);
 430 
 431         mem_map[page].count++;
 432         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 433         return mk_pte(page,vma->vm_page_prot);
 434 }
 435 
 436 
 437 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 438         unsigned long address, unsigned int flags)
 439 {
 440         pte_t pte = *ptep;
 441         unsigned long page;
 442         int error;
 443 
 444         if (!(flags & MS_INVALIDATE)) {
 445                 if (!pte_present(pte))
 446                         return 0;
 447                 if (!pte_dirty(pte))
 448                         return 0;
 449                 set_pte(ptep, pte_mkclean(pte));
 450                 invalidate_page(vma, address);
 451                 page = pte_page(pte);
 452                 mem_map[MAP_NR(page)].count++;
 453         } else {
 454                 if (pte_none(pte))
 455                         return 0;
 456                 pte_clear(ptep);
 457                 invalidate_page(vma, address);
 458                 if (!pte_present(pte)) {
 459                         swap_free(pte_val(pte));
 460                         return 0;
 461                 }
 462                 page = pte_page(pte);
 463                 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
 464                         free_page(page);
 465                         return 0;
 466                 }
 467         }
 468         error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
 469         free_page(page);
 470         return error;
 471 }
 472 
 473 static inline int filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 474         unsigned long address, unsigned long size, 
 475         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 476 {
 477         pte_t * pte;
 478         unsigned long end;
 479         int error;
 480 
 481         if (pmd_none(*pmd))
 482                 return 0;
 483         if (pmd_bad(*pmd)) {
 484                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 485                 pmd_clear(pmd);
 486                 return 0;
 487         }
 488         pte = pte_offset(pmd, address);
 489         offset += address & PMD_MASK;
 490         address &= ~PMD_MASK;
 491         end = address + size;
 492         if (end > PMD_SIZE)
 493                 end = PMD_SIZE;
 494         error = 0;
 495         do {
 496                 error |= filemap_sync_pte(pte, vma, address + offset, flags);
 497                 address += PAGE_SIZE;
 498                 pte++;
 499         } while (address < end);
 500         return error;
 501 }
 502 
 503 static inline int filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 504         unsigned long address, unsigned long size, 
 505         struct vm_area_struct *vma, unsigned int flags)
 506 {
 507         pmd_t * pmd;
 508         unsigned long offset, end;
 509         int error;
 510 
 511         if (pgd_none(*pgd))
 512                 return 0;
 513         if (pgd_bad(*pgd)) {
 514                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 515                 pgd_clear(pgd);
 516                 return 0;
 517         }
 518         pmd = pmd_offset(pgd, address);
 519         offset = address & PMD_MASK;
 520         address &= ~PMD_MASK;
 521         end = address + size;
 522         if (end > PGDIR_SIZE)
 523                 end = PGDIR_SIZE;
 524         error = 0;
 525         do {
 526                 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 527                 address = (address + PMD_SIZE) & PMD_MASK;
 528                 pmd++;
 529         } while (address < end);
 530         return error;
 531 }
 532 
 533 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 534         size_t size, unsigned int flags)
 535 {
 536         pgd_t * dir;
 537         unsigned long end = address + size;
 538         int error = 0;
 539 
 540         dir = pgd_offset(current->mm, address);
 541         while (address < end) {
 542                 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 543                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 544                 dir++;
 545         }
 546         invalidate_range(vma->vm_mm, end - size, end);
 547         return error;
 548 }
 549 
 550 /*
 551  * This handles (potentially partial) area unmaps..
 552  */
 553 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 554 {
 555         filemap_sync(vma, start, len, MS_ASYNC);
 556 }
 557 
 558 /*
 559  * Shared mappings need to be able to do the right thing at
 560  * close/unmap/sync. They will also use the private file as
 561  * backing-store for swapping..
 562  */
 563 static struct vm_operations_struct file_shared_mmap = {
 564         NULL,                   /* no special open */
 565         NULL,                   /* no special close */
 566         filemap_unmap,          /* unmap - we need to sync the pages */
 567         NULL,                   /* no special protect */
 568         filemap_sync,           /* sync */
 569         NULL,                   /* advise */
 570         filemap_nopage,         /* nopage */
 571         NULL,                   /* wppage */
 572         filemap_swapout,        /* swapout */
 573         filemap_swapin,         /* swapin */
 574 };
 575 
 576 /*
 577  * Private mappings just need to be able to load in the map.
 578  *
 579  * (This is actually used for shared mappings as well, if we
 580  * know they can't ever get write permissions..)
 581  */
 582 static struct vm_operations_struct file_private_mmap = {
 583         NULL,                   /* open */
 584         NULL,                   /* close */
 585         NULL,                   /* unmap */
 586         NULL,                   /* protect */
 587         NULL,                   /* sync */
 588         NULL,                   /* advise */
 589         filemap_nopage,         /* nopage */
 590         NULL,                   /* wppage */
 591         NULL,                   /* swapout */
 592         NULL,                   /* swapin */
 593 };
 594 
 595 /* This is used for a general mmap of a disk file */
 596 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 597 {
 598         struct vm_operations_struct * ops;
 599 
 600         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
 601                 ops = &file_shared_mmap;
 602                 /* share_page() can only guarantee proper page sharing if
 603                  * the offsets are all page aligned. */
 604                 if (vma->vm_offset & (PAGE_SIZE - 1))
 605                         return -EINVAL;
 606         } else {
 607                 ops = &file_private_mmap;
 608                 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 609                         return -EINVAL;
 610         }
 611         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 612                 return -EACCES;
 613         if (!inode->i_op || !inode->i_op->readpage)
 614                 return -ENOEXEC;
 615         if (!IS_RDONLY(inode)) {
 616                 inode->i_atime = CURRENT_TIME;
 617                 inode->i_dirt = 1;
 618         }
 619         vma->vm_inode = inode;
 620         inode->i_count++;
 621         vma->vm_ops = ops;
 622         return 0;
 623 }
 624 
 625 
 626 /*
 627  * The msync() system call.
 628  */
 629 
 630 static int msync_interval(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 631         unsigned long start, unsigned long end, int flags)
 632 {
 633         if (!vma->vm_inode)
 634                 return 0;
 635         if (vma->vm_ops->sync) {
 636                 int error;
 637                 error = vma->vm_ops->sync(vma, start, end-start, flags);
 638                 if (error)
 639                         return error;
 640                 if (flags & MS_SYNC)
 641                         return file_fsync(vma->vm_inode, NULL);
 642                 return 0;
 643         }
 644         return 0;
 645 }
 646 
 647 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 648 {
 649         unsigned long end;
 650         struct vm_area_struct * vma;
 651         int unmapped_error, error;
 652 
 653         if (start & ~PAGE_MASK)
 654                 return -EINVAL;
 655         len = (len + ~PAGE_MASK) & PAGE_MASK;
 656         end = start + len;
 657         if (end < start)
 658                 return -EINVAL;
 659         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
 660                 return -EINVAL;
 661         if (end == start)
 662                 return 0;
 663         /*
 664          * If the interval [start,end) covers some unmapped address ranges,
 665          * just ignore them, but return -EFAULT at the end.
 666          */
 667         vma = find_vma(current, start);
 668         unmapped_error = 0;
 669         for (;;) {
 670                 /* Still start < end. */
 671                 if (!vma)
 672                         return -EFAULT;
 673                 /* Here start < vma->vm_end. */
 674                 if (start < vma->vm_start) {
 675                         unmapped_error = -EFAULT;
 676                         start = vma->vm_start;
 677                 }
 678                 /* Here vma->vm_start <= start < vma->vm_end. */
 679                 if (end <= vma->vm_end) {
 680                         if (start < end) {
 681                                 error = msync_interval(vma, start, end, flags);
 682                                 if (error)
 683                                         return error;
 684                         }
 685                         return unmapped_error;
 686                 }
 687                 /* Here vma->vm_start <= start < vma->vm_end < end. */
 688                 error = msync_interval(vma, start, vma->vm_end, flags);
 689                 if (error)
 690                         return error;
 691                 start = vma->vm_end;
 692                 vma = vma->vm_next;
 693         }
 694 }

/* [previous][next][first][last][top][bottom][index][help] */