root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. invalidate_inode_pages
  2. shrink_mmap
  3. page_unuse
  4. update_vm_cache
  5. try_to_read_ahead
  6. generic_file_read
  7. fill_page
  8. filemap_nopage
  9. filemap_write_page
  10. filemap_swapout
  11. filemap_swapin
  12. filemap_sync_pte
  13. filemap_sync_pte_range
  14. filemap_sync_pmd_range
  15. filemap_sync
  16. filemap_unmap
  17. generic_mmap
  18. msync_interval
  19. sys_msync

   1 /*
   2  *      linux/mm/filemap.c
   3  *
   4  * Copyright (C) 1994, 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 #include <linux/fs.h>
  22 #include <linux/locks.h>
  23 #include <linux/pagemap.h>
  24 
  25 #include <asm/segment.h>
  26 #include <asm/system.h>
  27 #include <asm/pgtable.h>
  28 
  29 /*
  30  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  31  * though.
  32  *
  33  * Shared mappings now work. 15.8.1995  Bruno.
  34  */
  35 
  36 unsigned long page_cache_size = 0;
  37 struct page * page_hash_table[PAGE_HASH_SIZE];
  38 
  39 /*
  40  * Simple routines for both non-shared and shared mappings.
  41  */
  42 
  43 void invalidate_inode_pages(struct inode * inode, unsigned long start)
     /* [previous][next][first][last][top][bottom][index][help] */
  44 {
  45         struct page ** p = &inode->i_pages;
  46         struct page * page;
  47 
  48         while ((page = *p) != NULL) {
  49                 unsigned long offset = page->offset;
  50 
  51                 /* page wholly truncated - free it */
  52                 if (offset >= start) {
  53                         inode->i_nrpages--;
  54                         if ((*p = page->next) != NULL)
  55                                 (*p)->prev = page->prev;
  56                         page->dirty = 0;
  57                         page->next = NULL;
  58                         page->prev = NULL;
  59                         remove_page_from_hash_queue(page);
  60                         page->inode = NULL;
  61                         free_page(page_address(page));
  62                         continue;
  63                 }
  64                 p = &page->next;
  65                 offset = start - offset;
  66                 /* partial truncate, clear end of page */
  67                 if (offset < PAGE_SIZE)
  68                         memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
  69         }
  70 }
  71 
  72 int shrink_mmap(int priority, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         static int clock = 0;
  75         struct page * page;
  76 
  77         if (limit > high_memory)
  78                 limit = high_memory;
  79         limit = MAP_NR(limit);
  80         if (clock >= limit)
  81                 clock = 0;
  82         priority = (limit<<2) >> priority;
  83         page = mem_map + clock;
  84         while (priority-- > 0) {
  85                 if (page->inode) {
  86                         unsigned age = page->age;
  87                         /* if the page is shared, we juvenate it */
  88                         if (page->count != 1)
  89                                 age |= PAGE_AGE_VALUE << 1;
  90                         page->age = age >> 1;
  91                         if (age < PAGE_AGE_VALUE) {
  92                                 remove_page_from_hash_queue(page);
  93                                 remove_page_from_inode_queue(page);
  94                                 free_page(page_address(page));
  95                                 return 1;
  96                         }
  97                 }
  98                 page++;
  99                 clock++;
 100                 if (clock >= limit) {
 101                         clock = 0;
 102                         page = mem_map;
 103                 }
 104         }
 105         return 0;
 106 }
 107 
 108 /*
 109  * This is called from try_to_swap_out() when we try to egt rid of some
 110  * pages..  If we're unmapping the last occurrence of this page, we also
 111  * free it from the page hash-queues etc, as we don't want to keep it
 112  * in-core unnecessarily.
 113  */
 114 unsigned long page_unuse(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 115 {
 116         struct page * p = mem_map + MAP_NR(page);
 117         int count = p->count;
 118 
 119         if (count != 2)
 120                 return count;
 121         if (!p->inode)
 122                 return count;
 123         remove_page_from_hash_queue(p);
 124         remove_page_from_inode_queue(p);
 125         free_page(page);
 126         return 1;
 127 }
 128 
 129 /*
 130  * Update a page cache copy, when we're doing a "write()" system call
 131  * See also "update_vm_cache()".
 132  */
 133 void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         struct page * page;
 136 
 137         page = find_page(inode, pos & PAGE_MASK);
 138         if (page) {
 139                 unsigned long addr;
 140 
 141                 if (!page->uptodate)
 142                         sleep_on(&page->wait);
 143                 addr = page_address(page);
 144                 memcpy((void *) ((pos & ~PAGE_MASK) + addr), buf, count);
 145                 free_page(addr);
 146         }
 147 }
 148 
 149 /*
 150  * Try to read ahead in the file. "page_cache" is a potentially free page
 151  * that we could use for the cache (if it is 0 we can try to create one,
 152  * this is all overlapped with the IO on the previous page finishing anyway)
 153  */
 154 static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         struct page * page;
 157 
 158         if (!page_cache) {
 159                 page_cache = __get_free_page(GFP_KERNEL);
 160                 if (!page_cache)
 161                         return 0;
 162         }
 163 #ifdef readahead_makes_sense_due_to_asynchronous_reads
 164         offset = (offset + PAGE_SIZE) & PAGE_MASK;
 165         page = find_page(inode, offset);
 166         if (page) {
 167                 page->count--;
 168                 return page_cache;
 169         }
 170         /*
 171          * Ok, add the new page to the hash-queues...
 172          */
 173         page = mem_map + MAP_NR(page_cache);
 174         page->count++;
 175         page->uptodate = 0;
 176         page->error = 0;
 177         page->offset = offset;
 178         add_page_to_inode_queue(inode, page);
 179         add_page_to_hash_queue(inode, page);
 180 
 181         /* 
 182          * And start IO on it..
 183          * (this should be asynchronous, but currently isn't)
 184          */
 185         inode->i_op->readpage(inode, page);
 186         free_page(page_cache);
 187         return 0;
 188 #else
 189         return page_cache;
 190 #endif
 191 }
 192 
 193 /*
 194  * This is a generic file read routine, and uses the
 195  * inode->i_op->readpage() function for the actual low-level
 196  * stuff.
 197  */
 198 int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         int read = 0;
 201         unsigned long pos;
 202         unsigned long page_cache = 0;
 203 
 204         if (count <= 0)
 205                 return 0;
 206 
 207         pos = filp->f_pos;
 208         do {
 209                 struct page *page;
 210                 unsigned long offset, addr, nr;
 211 
 212                 if (pos >= inode->i_size)
 213                         break;
 214                 offset = pos & ~PAGE_MASK;
 215                 nr = PAGE_SIZE - offset;
 216                 /*
 217                  * Try to find the data in the page cache..
 218                  */
 219                 page = find_page(inode, pos & PAGE_MASK);
 220                 if (page)
 221                         goto found_page;
 222 
 223                 /*
 224                  * Ok, it wasn't cached, so we need to create a new
 225                  * page..
 226                  */
 227                 if (!page_cache) {
 228                         page_cache = __get_free_page(GFP_KERNEL);
 229                         if (!page_cache) {
 230                                 if (!read)
 231                                         read = -ENOMEM;
 232                                 break;
 233                         }
 234                 }
 235 
 236                 /*
 237                  * That could have slept, so we need to check again..
 238                  */
 239                 if (pos >= inode->i_size)
 240                         break;
 241                 page = find_page(inode, pos & PAGE_MASK);
 242                 if (page)
 243                         goto found_page;
 244 
 245                 /*
 246                  * Ok, add the new page to the hash-queues...
 247                  */
 248                 page = mem_map + MAP_NR(page_cache);
 249                 page_cache = 0;
 250                 page->count++;
 251                 page->uptodate = 0;
 252                 page->error = 0;
 253                 page->offset = pos & PAGE_MASK;
 254                 add_page_to_inode_queue(inode, page);
 255                 add_page_to_hash_queue(inode, page);
 256 
 257                 /* 
 258                  * And start IO on it..
 259                  * (this should be asynchronous, but currently isn't)
 260                  */
 261                 inode->i_op->readpage(inode, page);
 262 
 263 found_page:
 264                 addr = page_address(page);
 265                 if (nr > count)
 266                         nr = count;
 267                 if (!page->uptodate) {
 268                         page_cache = try_to_read_ahead(inode, offset, page_cache);
 269                         if (!page->uptodate)
 270                                 sleep_on(&page->wait);
 271                 }
 272                 if (nr > inode->i_size - pos)
 273                         nr = inode->i_size - pos;
 274                 memcpy_tofs(buf, (void *) (addr + offset), nr);
 275                 free_page(addr);
 276                 buf += nr;
 277                 pos += nr;
 278                 read += nr;
 279                 count -= nr;
 280         } while (count);
 281 
 282         filp->f_pos = pos;
 283         if (page_cache)
 284                 free_page(page_cache);
 285         if (!IS_RDONLY(inode)) {
 286                 inode->i_atime = CURRENT_TIME;
 287                 inode->i_dirt = 1;
 288         }
 289         return read;
 290 }
 291 
 292 /*
 293  * Find a cached page and wait for it to become up-to-date, return
 294  * the page address.
 295  */
 296 static inline unsigned long fill_page(struct inode * inode, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 297 {
 298         struct page * page;
 299         unsigned long new_page;
 300 
 301         page = find_page(inode, offset);
 302         if (page)
 303                 goto found_page;
 304         new_page = __get_free_page(GFP_KERNEL);
 305         page = find_page(inode, offset);
 306         if (page) {
 307                 if (new_page)
 308                         free_page(new_page);
 309                 goto found_page;
 310         }
 311         if (!new_page)
 312                 return 0;
 313         page = mem_map + MAP_NR(new_page);
 314         new_page = 0;
 315         page->count++;
 316         page->uptodate = 0;
 317         page->error = 0;
 318         page->offset = offset;
 319         add_page_to_inode_queue(inode, page);
 320         add_page_to_hash_queue(inode, page);
 321         inode->i_op->readpage(inode, page);
 322 found_page:
 323         if (!page->uptodate)
 324                 sleep_on(&page->wait);
 325         return page_address(page);
 326 }
 327 
 328 /*
 329  * Semantics for shared and private memory areas are different past the end
 330  * of the file. A shared mapping past the last page of the file is an error
 331  * and results in a SIBGUS, while a private mapping just maps in a zero page.
 332  */
 333 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
 334 {
 335         unsigned long offset;
 336         struct inode * inode = area->vm_inode;
 337         unsigned long page;
 338 
 339         offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
 340         if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
 341                 return 0;
 342 
 343         page = fill_page(inode, offset);
 344         if (page && no_share) {
 345                 unsigned long new_page = __get_free_page(GFP_KERNEL);
 346                 if (new_page)
 347                         memcpy((void *) new_page, (void *) page, PAGE_SIZE);
 348                 free_page(page);
 349                 return new_page;
 350         }
 351         return page;
 352 }
 353 
 354 /*
 355  * Tries to write a shared mapped page to its backing store. May return -EIO
 356  * if the disk is full.
 357  */
 358 static int filemap_write_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 359         unsigned long offset,
 360         unsigned long page)
 361 {
 362         int old_fs;
 363         unsigned long size, result;
 364         struct file file;
 365         struct inode * inode;
 366         struct buffer_head * bh;
 367 
 368         bh = buffer_pages[MAP_NR(page)];
 369         if (bh) {
 370                 /* whee.. just mark the buffer heads dirty */
 371                 struct buffer_head * tmp = bh;
 372                 do {
 373                         mark_buffer_dirty(tmp, 0);
 374                         tmp = tmp->b_this_page;
 375                 } while (tmp != bh);
 376                 return 0;
 377         }
 378 
 379         inode = vma->vm_inode;
 380         file.f_op = inode->i_op->default_file_ops;
 381         if (!file.f_op->write)
 382                 return -EIO;
 383         size = offset + PAGE_SIZE;
 384         /* refuse to extend file size.. */
 385         if (S_ISREG(inode->i_mode)) {
 386                 if (size > inode->i_size)
 387                         size = inode->i_size;
 388                 /* Ho humm.. We should have tested for this earlier */
 389                 if (size < offset)
 390                         return -EIO;
 391         }
 392         size -= offset;
 393         file.f_mode = 3;
 394         file.f_flags = 0;
 395         file.f_count = 1;
 396         file.f_inode = inode;
 397         file.f_pos = offset;
 398         file.f_reada = 0;
 399         old_fs = get_fs();
 400         set_fs(KERNEL_DS);
 401         result = file.f_op->write(inode, &file, (const char *) page, size);
 402         set_fs(old_fs);
 403         if (result != size)
 404                 return -EIO;
 405         return 0;
 406 }
 407 
 408 
 409 /*
 410  * Swapping to a shared file: while we're busy writing out the page
 411  * (and the page still exists in memory), we save the page information
 412  * in the page table, so that "filemap_swapin()" can re-use the page
 413  * immediately if it is called while we're busy swapping it out..
 414  *
 415  * Once we've written it all out, we mark the page entry "empty", which
 416  * will result in a normal page-in (instead of a swap-in) from the now
 417  * up-to-date disk file.
 418  */
 419 int filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 420         unsigned long offset,
 421         pte_t *page_table)
 422 {
 423         int error;
 424         unsigned long page = pte_page(*page_table);
 425         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 426 
 427         set_pte(page_table, __pte(entry));
 428         /* Yuck, perhaps a slightly modified swapout parameter set? */
 429         invalidate_page(vma, (offset + vma->vm_start - vma->vm_offset));
 430         error = filemap_write_page(vma, offset, page);
 431         if (pte_val(*page_table) == entry)
 432                 pte_clear(page_table);
 433         return error;
 434 }
 435 
 436 /*
 437  * filemap_swapin() is called only if we have something in the page
 438  * tables that is non-zero (but not present), which we know to be the
 439  * page index of a page that is busy being swapped out (see above).
 440  * So we just use it directly..
 441  */
 442 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 443         unsigned long offset,
 444         unsigned long entry)
 445 {
 446         unsigned long page = SWP_OFFSET(entry);
 447 
 448         mem_map[page].count++;
 449         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 450         return mk_pte(page,vma->vm_page_prot);
 451 }
 452 
 453 
 454 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 455         unsigned long address, unsigned int flags)
 456 {
 457         pte_t pte = *ptep;
 458         unsigned long page;
 459         int error;
 460 
 461         if (!(flags & MS_INVALIDATE)) {
 462                 if (!pte_present(pte))
 463                         return 0;
 464                 if (!pte_dirty(pte))
 465                         return 0;
 466                 set_pte(ptep, pte_mkclean(pte));
 467                 invalidate_page(vma, address);
 468                 page = pte_page(pte);
 469                 mem_map[MAP_NR(page)].count++;
 470         } else {
 471                 if (pte_none(pte))
 472                         return 0;
 473                 pte_clear(ptep);
 474                 invalidate_page(vma, address);
 475                 if (!pte_present(pte)) {
 476                         swap_free(pte_val(pte));
 477                         return 0;
 478                 }
 479                 page = pte_page(pte);
 480                 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
 481                         free_page(page);
 482                         return 0;
 483                 }
 484         }
 485         error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
 486         free_page(page);
 487         return error;
 488 }
 489 
 490 static inline int filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 491         unsigned long address, unsigned long size, 
 492         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 493 {
 494         pte_t * pte;
 495         unsigned long end;
 496         int error;
 497 
 498         if (pmd_none(*pmd))
 499                 return 0;
 500         if (pmd_bad(*pmd)) {
 501                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 502                 pmd_clear(pmd);
 503                 return 0;
 504         }
 505         pte = pte_offset(pmd, address);
 506         offset += address & PMD_MASK;
 507         address &= ~PMD_MASK;
 508         end = address + size;
 509         if (end > PMD_SIZE)
 510                 end = PMD_SIZE;
 511         error = 0;
 512         do {
 513                 error |= filemap_sync_pte(pte, vma, address + offset, flags);
 514                 address += PAGE_SIZE;
 515                 pte++;
 516         } while (address < end);
 517         return error;
 518 }
 519 
 520 static inline int filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 521         unsigned long address, unsigned long size, 
 522         struct vm_area_struct *vma, unsigned int flags)
 523 {
 524         pmd_t * pmd;
 525         unsigned long offset, end;
 526         int error;
 527 
 528         if (pgd_none(*pgd))
 529                 return 0;
 530         if (pgd_bad(*pgd)) {
 531                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 532                 pgd_clear(pgd);
 533                 return 0;
 534         }
 535         pmd = pmd_offset(pgd, address);
 536         offset = address & PMD_MASK;
 537         address &= ~PMD_MASK;
 538         end = address + size;
 539         if (end > PGDIR_SIZE)
 540                 end = PGDIR_SIZE;
 541         error = 0;
 542         do {
 543                 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 544                 address = (address + PMD_SIZE) & PMD_MASK;
 545                 pmd++;
 546         } while (address < end);
 547         return error;
 548 }
 549 
 550 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 551         size_t size, unsigned int flags)
 552 {
 553         pgd_t * dir;
 554         unsigned long end = address + size;
 555         int error = 0;
 556 
 557         dir = pgd_offset(current->mm, address);
 558         while (address < end) {
 559                 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 560                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 561                 dir++;
 562         }
 563         invalidate_range(vma->vm_mm, end - size, end);
 564         return error;
 565 }
 566 
 567 /*
 568  * This handles (potentially partial) area unmaps..
 569  */
 570 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         filemap_sync(vma, start, len, MS_ASYNC);
 573 }
 574 
 575 /*
 576  * Shared mappings need to be able to do the right thing at
 577  * close/unmap/sync. They will also use the private file as
 578  * backing-store for swapping..
 579  */
 580 static struct vm_operations_struct file_shared_mmap = {
 581         NULL,                   /* no special open */
 582         NULL,                   /* no special close */
 583         filemap_unmap,          /* unmap - we need to sync the pages */
 584         NULL,                   /* no special protect */
 585         filemap_sync,           /* sync */
 586         NULL,                   /* advise */
 587         filemap_nopage,         /* nopage */
 588         NULL,                   /* wppage */
 589         filemap_swapout,        /* swapout */
 590         filemap_swapin,         /* swapin */
 591 };
 592 
 593 /*
 594  * Private mappings just need to be able to load in the map.
 595  *
 596  * (This is actually used for shared mappings as well, if we
 597  * know they can't ever get write permissions..)
 598  */
 599 static struct vm_operations_struct file_private_mmap = {
 600         NULL,                   /* open */
 601         NULL,                   /* close */
 602         NULL,                   /* unmap */
 603         NULL,                   /* protect */
 604         NULL,                   /* sync */
 605         NULL,                   /* advise */
 606         filemap_nopage,         /* nopage */
 607         NULL,                   /* wppage */
 608         NULL,                   /* swapout */
 609         NULL,                   /* swapin */
 610 };
 611 
 612 /* This is used for a general mmap of a disk file */
 613 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 614 {
 615         struct vm_operations_struct * ops;
 616 
 617         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
 618                 ops = &file_shared_mmap;
 619                 /* share_page() can only guarantee proper page sharing if
 620                  * the offsets are all page aligned. */
 621                 if (vma->vm_offset & (PAGE_SIZE - 1))
 622                         return -EINVAL;
 623         } else {
 624                 ops = &file_private_mmap;
 625                 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 626                         return -EINVAL;
 627         }
 628         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 629                 return -EACCES;
 630         if (!inode->i_op || !inode->i_op->readpage)
 631                 return -ENOEXEC;
 632         if (!IS_RDONLY(inode)) {
 633                 inode->i_atime = CURRENT_TIME;
 634                 inode->i_dirt = 1;
 635         }
 636         vma->vm_inode = inode;
 637         inode->i_count++;
 638         vma->vm_ops = ops;
 639         return 0;
 640 }
 641 
 642 
 643 /*
 644  * The msync() system call.
 645  */
 646 
 647 static int msync_interval(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 648         unsigned long start, unsigned long end, int flags)
 649 {
 650         if (!vma->vm_inode)
 651                 return 0;
 652         if (vma->vm_ops->sync) {
 653                 int error;
 654                 error = vma->vm_ops->sync(vma, start, end-start, flags);
 655                 if (error)
 656                         return error;
 657                 if (flags & MS_SYNC)
 658                         return file_fsync(vma->vm_inode, NULL);
 659                 return 0;
 660         }
 661         return 0;
 662 }
 663 
 664 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 665 {
 666         unsigned long end;
 667         struct vm_area_struct * vma;
 668         int unmapped_error, error;
 669 
 670         if (start & ~PAGE_MASK)
 671                 return -EINVAL;
 672         len = (len + ~PAGE_MASK) & PAGE_MASK;
 673         end = start + len;
 674         if (end < start)
 675                 return -EINVAL;
 676         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
 677                 return -EINVAL;
 678         if (end == start)
 679                 return 0;
 680         /*
 681          * If the interval [start,end) covers some unmapped address ranges,
 682          * just ignore them, but return -EFAULT at the end.
 683          */
 684         vma = find_vma(current, start);
 685         unmapped_error = 0;
 686         for (;;) {
 687                 /* Still start < end. */
 688                 if (!vma)
 689                         return -EFAULT;
 690                 /* Here start < vma->vm_end. */
 691                 if (start < vma->vm_start) {
 692                         unmapped_error = -EFAULT;
 693                         start = vma->vm_start;
 694                 }
 695                 /* Here vma->vm_start <= start < vma->vm_end. */
 696                 if (end <= vma->vm_end) {
 697                         if (start < end) {
 698                                 error = msync_interval(vma, start, end, flags);
 699                                 if (error)
 700                                         return error;
 701                         }
 702                         return unmapped_error;
 703                 }
 704                 /* Here vma->vm_start <= start < vma->vm_end < end. */
 705                 error = msync_interval(vma, start, vma->vm_end, flags);
 706                 if (error)
 707                         return error;
 708                 start = vma->vm_end;
 709                 vma = vma->vm_next;
 710         }
 711 }

/* [previous][next][first][last][top][bottom][index][help] */