root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. invalidate_inode_pages
  2. shrink_mmap
  3. page_unuse
  4. update_vm_cache
  5. try_to_read_ahead
  6. generic_file_read
  7. fill_page
  8. filemap_nopage
  9. filemap_write_page
  10. filemap_swapout
  11. filemap_swapin
  12. filemap_sync_pte
  13. filemap_sync_pte_range
  14. filemap_sync_pmd_range
  15. filemap_sync
  16. filemap_unmap
  17. generic_mmap
  18. msync_interval
  19. sys_msync

   1 /*
   2  *      linux/mm/filemap.c
   3  *
   4  * Copyright (C) 1994, 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 #include <linux/fs.h>
  22 #include <linux/locks.h>
  23 #include <linux/pagemap.h>
  24 #include <linux/swap.h>
  25 
  26 #include <asm/segment.h>
  27 #include <asm/system.h>
  28 #include <asm/pgtable.h>
  29 
  30 /*
  31  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  32  * though.
  33  *
  34  * Shared mappings now work. 15.8.1995  Bruno.
  35  */
  36 
  37 unsigned long page_cache_size = 0;
  38 struct page * page_hash_table[PAGE_HASH_SIZE];
  39 
  40 /*
  41  * Simple routines for both non-shared and shared mappings.
  42  */
  43 
  44 void invalidate_inode_pages(struct inode * inode, unsigned long start)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46         struct page ** p = &inode->i_pages;
  47         struct page * page;
  48 
  49         while ((page = *p) != NULL) {
  50                 unsigned long offset = page->offset;
  51 
  52                 /* page wholly truncated - free it */
  53                 if (offset >= start) {
  54                         inode->i_nrpages--;
  55                         if ((*p = page->next) != NULL)
  56                                 (*p)->prev = page->prev;
  57                         page->dirty = 0;
  58                         page->next = NULL;
  59                         page->prev = NULL;
  60                         remove_page_from_hash_queue(page);
  61                         page->inode = NULL;
  62                         free_page(page_address(page));
  63                         continue;
  64                 }
  65                 p = &page->next;
  66                 offset = start - offset;
  67                 /* partial truncate, clear end of page */
  68                 if (offset < PAGE_SIZE)
  69                         memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
  70         }
  71 }
  72 
  73 int shrink_mmap(int priority, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         static int clock = 0;
  76         struct page * page;
  77         struct buffer_head *tmp, *bh;
  78 
  79         if (limit > high_memory)
  80                 limit = high_memory;
  81         limit = MAP_NR(limit);
  82         if (clock >= limit)
  83                 clock = 0;
  84         priority = (limit<<2) >> priority;
  85         page = mem_map + clock;
  86         while (priority-- > 0) {
  87                 /* First of all, regenerate the page's referenced bit
  88                    from any buffers in the page */
  89                 bh = buffer_pages[MAP_NR(page_address(page))];
  90                 if (bh) {
  91                         tmp = bh;
  92                         do {
  93                                 if (buffer_touched(tmp)) {
  94                                         clear_bit(BH_Touched, &tmp->b_state);
  95                                         page->referenced = 1;
  96                                 }
  97                                 tmp = tmp->b_this_page;
  98                         } while (tmp != bh);
  99                 }
 100 
 101                 /* We can't throw away shared pages, but we do mark
 102                    them as referenced.  This relies on the fact that
 103                    no page is currently in both the page cache and the
 104                    buffer cache; we'd have to modify the following
 105                    test to allow for that case. */
 106                 if (page->count > 1)
 107                         page->referenced = 1;
 108                 else if (page->referenced)
 109                         page->referenced = 0;
 110                 else if (page->count) {
 111                         /* The page is an old, unshared page --- try
 112                            to discard it. */
 113                         if (page->inode) {
 114                                 remove_page_from_hash_queue(page);
 115                                 remove_page_from_inode_queue(page);
 116                                 free_page(page_address(page));
 117                                 return 1;
 118                         }
 119                         if (bh && try_to_free_buffer(bh, &bh, 6))
 120                                 return 1;
 121                 }
 122                 page++;
 123                 clock++;
 124                 if (clock >= limit) {
 125                         clock = 0;
 126                         page = mem_map;
 127                 }
 128         }
 129         return 0;
 130 }
 131 
 132 /*
 133  * This is called from try_to_swap_out() when we try to egt rid of some
 134  * pages..  If we're unmapping the last occurrence of this page, we also
 135  * free it from the page hash-queues etc, as we don't want to keep it
 136  * in-core unnecessarily.
 137  */
 138 unsigned long page_unuse(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         struct page * p = mem_map + MAP_NR(page);
 141         int count = p->count;
 142 
 143         if (count != 2)
 144                 return count;
 145         if (!p->inode)
 146                 return count;
 147         remove_page_from_hash_queue(p);
 148         remove_page_from_inode_queue(p);
 149         free_page(page);
 150         return 1;
 151 }
 152 
 153 /*
 154  * Update a page cache copy, when we're doing a "write()" system call
 155  * See also "update_vm_cache()".
 156  */
 157 void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 {
 159         struct page * page;
 160 
 161         page = find_page(inode, pos & PAGE_MASK);
 162         if (page) {
 163                 unsigned long addr;
 164 
 165                 if (!page->uptodate)
 166                         sleep_on(&page->wait);
 167                 addr = page_address(page);
 168                 memcpy((void *) ((pos & ~PAGE_MASK) + addr), buf, count);
 169                 free_page(addr);
 170         }
 171 }
 172 
 173 /*
 174  * Try to read ahead in the file. "page_cache" is a potentially free page
 175  * that we could use for the cache (if it is 0 we can try to create one,
 176  * this is all overlapped with the IO on the previous page finishing anyway)
 177  */
 178 static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         struct page * page;
 181 
 182         if (!page_cache) {
 183                 page_cache = __get_free_page(GFP_KERNEL);
 184                 if (!page_cache)
 185                         return 0;
 186         }
 187 #ifdef readahead_makes_sense_due_to_asynchronous_reads
 188         offset = (offset + PAGE_SIZE) & PAGE_MASK;
 189         page = find_page(inode, offset);
 190         if (page) {
 191                 page->count--;
 192                 return page_cache;
 193         }
 194         /*
 195          * Ok, add the new page to the hash-queues...
 196          */
 197         page = mem_map + MAP_NR(page_cache);
 198         page->count++;
 199         page->uptodate = 0;
 200         page->error = 0;
 201         page->offset = offset;
 202         add_page_to_inode_queue(inode, page);
 203         add_page_to_hash_queue(inode, page);
 204 
 205         /* 
 206          * And start IO on it..
 207          * (this should be asynchronous, but currently isn't)
 208          */
 209         inode->i_op->readpage(inode, page);
 210         free_page(page_cache);
 211         return 0;
 212 #else
 213         return page_cache;
 214 #endif
 215 }
 216 
 217 /*
 218  * This is a generic file read routine, and uses the
 219  * inode->i_op->readpage() function for the actual low-level
 220  * stuff.
 221  */
 222 int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         int read = 0;
 225         unsigned long pos;
 226         unsigned long page_cache = 0;
 227 
 228         if (count <= 0)
 229                 return 0;
 230 
 231         pos = filp->f_pos;
 232         do {
 233                 struct page *page;
 234                 unsigned long offset, addr, nr;
 235 
 236                 if (pos >= inode->i_size)
 237                         break;
 238                 offset = pos & ~PAGE_MASK;
 239                 nr = PAGE_SIZE - offset;
 240                 /*
 241                  * Try to find the data in the page cache..
 242                  */
 243                 page = find_page(inode, pos & PAGE_MASK);
 244                 if (page)
 245                         goto found_page;
 246 
 247                 /*
 248                  * Ok, it wasn't cached, so we need to create a new
 249                  * page..
 250                  */
 251                 if (!page_cache) {
 252                         page_cache = __get_free_page(GFP_KERNEL);
 253                         if (!page_cache) {
 254                                 if (!read)
 255                                         read = -ENOMEM;
 256                                 break;
 257                         }
 258                 }
 259 
 260                 /*
 261                  * That could have slept, so we need to check again..
 262                  */
 263                 if (pos >= inode->i_size)
 264                         break;
 265                 page = find_page(inode, pos & PAGE_MASK);
 266                 if (page)
 267                         goto found_page;
 268 
 269                 /*
 270                  * Ok, add the new page to the hash-queues...
 271                  */
 272                 page = mem_map + MAP_NR(page_cache);
 273                 page_cache = 0;
 274                 page->count++;
 275                 page->uptodate = 0;
 276                 page->error = 0;
 277                 page->offset = pos & PAGE_MASK;
 278                 add_page_to_inode_queue(inode, page);
 279                 add_page_to_hash_queue(inode, page);
 280 
 281                 /* 
 282                  * And start IO on it..
 283                  * (this should be asynchronous, but currently isn't)
 284                  */
 285                 inode->i_op->readpage(inode, page);
 286 
 287 found_page:
 288                 addr = page_address(page);
 289                 if (nr > count)
 290                         nr = count;
 291                 if (!page->uptodate) {
 292                         page_cache = try_to_read_ahead(inode, offset, page_cache);
 293                         if (!page->uptodate)
 294                                 sleep_on(&page->wait);
 295                 }
 296                 if (nr > inode->i_size - pos)
 297                         nr = inode->i_size - pos;
 298                 memcpy_tofs(buf, (void *) (addr + offset), nr);
 299                 free_page(addr);
 300                 buf += nr;
 301                 pos += nr;
 302                 read += nr;
 303                 count -= nr;
 304         } while (count);
 305 
 306         filp->f_pos = pos;
 307         if (page_cache)
 308                 free_page(page_cache);
 309         if (!IS_RDONLY(inode)) {
 310                 inode->i_atime = CURRENT_TIME;
 311                 inode->i_dirt = 1;
 312         }
 313         return read;
 314 }
 315 
 316 /*
 317  * Find a cached page and wait for it to become up-to-date, return
 318  * the page address.
 319  */
 320 static inline unsigned long fill_page(struct inode * inode, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322         struct page * page;
 323         unsigned long new_page;
 324 
 325         page = find_page(inode, offset);
 326         if (page)
 327                 goto found_page;
 328         new_page = __get_free_page(GFP_KERNEL);
 329         page = find_page(inode, offset);
 330         if (page) {
 331                 if (new_page)
 332                         free_page(new_page);
 333                 goto found_page;
 334         }
 335         if (!new_page)
 336                 return 0;
 337         page = mem_map + MAP_NR(new_page);
 338         new_page = 0;
 339         page->count++;
 340         page->uptodate = 0;
 341         page->error = 0;
 342         page->offset = offset;
 343         add_page_to_inode_queue(inode, page);
 344         add_page_to_hash_queue(inode, page);
 345         inode->i_op->readpage(inode, page);
 346 found_page:
 347         if (!page->uptodate)
 348                 sleep_on(&page->wait);
 349         return page_address(page);
 350 }
 351 
 352 /*
 353  * Semantics for shared and private memory areas are different past the end
 354  * of the file. A shared mapping past the last page of the file is an error
 355  * and results in a SIBGUS, while a private mapping just maps in a zero page.
 356  */
 357 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
 358 {
 359         unsigned long offset;
 360         struct inode * inode = area->vm_inode;
 361         unsigned long page;
 362 
 363         offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
 364         if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
 365                 return 0;
 366 
 367         page = fill_page(inode, offset);
 368         if (page && no_share) {
 369                 unsigned long new_page = __get_free_page(GFP_KERNEL);
 370                 if (new_page)
 371                         memcpy((void *) new_page, (void *) page, PAGE_SIZE);
 372                 free_page(page);
 373                 return new_page;
 374         }
 375         return page;
 376 }
 377 
 378 /*
 379  * Tries to write a shared mapped page to its backing store. May return -EIO
 380  * if the disk is full.
 381  */
 382 static int filemap_write_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 383         unsigned long offset,
 384         unsigned long page)
 385 {
 386         int old_fs;
 387         unsigned long size, result;
 388         struct file file;
 389         struct inode * inode;
 390         struct buffer_head * bh;
 391 
 392         bh = buffer_pages[MAP_NR(page)];
 393         if (bh) {
 394                 /* whee.. just mark the buffer heads dirty */
 395                 struct buffer_head * tmp = bh;
 396                 do {
 397                         mark_buffer_dirty(tmp, 0);
 398                         tmp = tmp->b_this_page;
 399                 } while (tmp != bh);
 400                 return 0;
 401         }
 402 
 403         inode = vma->vm_inode;
 404         file.f_op = inode->i_op->default_file_ops;
 405         if (!file.f_op->write)
 406                 return -EIO;
 407         size = offset + PAGE_SIZE;
 408         /* refuse to extend file size.. */
 409         if (S_ISREG(inode->i_mode)) {
 410                 if (size > inode->i_size)
 411                         size = inode->i_size;
 412                 /* Ho humm.. We should have tested for this earlier */
 413                 if (size < offset)
 414                         return -EIO;
 415         }
 416         size -= offset;
 417         file.f_mode = 3;
 418         file.f_flags = 0;
 419         file.f_count = 1;
 420         file.f_inode = inode;
 421         file.f_pos = offset;
 422         file.f_reada = 0;
 423         old_fs = get_fs();
 424         set_fs(KERNEL_DS);
 425         result = file.f_op->write(inode, &file, (const char *) page, size);
 426         set_fs(old_fs);
 427         if (result != size)
 428                 return -EIO;
 429         return 0;
 430 }
 431 
 432 
 433 /*
 434  * Swapping to a shared file: while we're busy writing out the page
 435  * (and the page still exists in memory), we save the page information
 436  * in the page table, so that "filemap_swapin()" can re-use the page
 437  * immediately if it is called while we're busy swapping it out..
 438  *
 439  * Once we've written it all out, we mark the page entry "empty", which
 440  * will result in a normal page-in (instead of a swap-in) from the now
 441  * up-to-date disk file.
 442  */
 443 int filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 444         unsigned long offset,
 445         pte_t *page_table)
 446 {
 447         int error;
 448         unsigned long page = pte_page(*page_table);
 449         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 450 
 451         set_pte(page_table, __pte(entry));
 452         /* Yuck, perhaps a slightly modified swapout parameter set? */
 453         invalidate_page(vma, (offset + vma->vm_start - vma->vm_offset));
 454         error = filemap_write_page(vma, offset, page);
 455         if (pte_val(*page_table) == entry)
 456                 pte_clear(page_table);
 457         return error;
 458 }
 459 
 460 /*
 461  * filemap_swapin() is called only if we have something in the page
 462  * tables that is non-zero (but not present), which we know to be the
 463  * page index of a page that is busy being swapped out (see above).
 464  * So we just use it directly..
 465  */
 466 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 467         unsigned long offset,
 468         unsigned long entry)
 469 {
 470         unsigned long page = SWP_OFFSET(entry);
 471 
 472         mem_map[page].count++;
 473         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 474         return mk_pte(page,vma->vm_page_prot);
 475 }
 476 
 477 
 478 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 479         unsigned long address, unsigned int flags)
 480 {
 481         pte_t pte = *ptep;
 482         unsigned long page;
 483         int error;
 484 
 485         if (!(flags & MS_INVALIDATE)) {
 486                 if (!pte_present(pte))
 487                         return 0;
 488                 if (!pte_dirty(pte))
 489                         return 0;
 490                 set_pte(ptep, pte_mkclean(pte));
 491                 invalidate_page(vma, address);
 492                 page = pte_page(pte);
 493                 mem_map[MAP_NR(page)].count++;
 494         } else {
 495                 if (pte_none(pte))
 496                         return 0;
 497                 pte_clear(ptep);
 498                 invalidate_page(vma, address);
 499                 if (!pte_present(pte)) {
 500                         swap_free(pte_val(pte));
 501                         return 0;
 502                 }
 503                 page = pte_page(pte);
 504                 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
 505                         free_page(page);
 506                         return 0;
 507                 }
 508         }
 509         error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
 510         free_page(page);
 511         return error;
 512 }
 513 
 514 static inline int filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 515         unsigned long address, unsigned long size, 
 516         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 517 {
 518         pte_t * pte;
 519         unsigned long end;
 520         int error;
 521 
 522         if (pmd_none(*pmd))
 523                 return 0;
 524         if (pmd_bad(*pmd)) {
 525                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 526                 pmd_clear(pmd);
 527                 return 0;
 528         }
 529         pte = pte_offset(pmd, address);
 530         offset += address & PMD_MASK;
 531         address &= ~PMD_MASK;
 532         end = address + size;
 533         if (end > PMD_SIZE)
 534                 end = PMD_SIZE;
 535         error = 0;
 536         do {
 537                 error |= filemap_sync_pte(pte, vma, address + offset, flags);
 538                 address += PAGE_SIZE;
 539                 pte++;
 540         } while (address < end);
 541         return error;
 542 }
 543 
 544 static inline int filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 545         unsigned long address, unsigned long size, 
 546         struct vm_area_struct *vma, unsigned int flags)
 547 {
 548         pmd_t * pmd;
 549         unsigned long offset, end;
 550         int error;
 551 
 552         if (pgd_none(*pgd))
 553                 return 0;
 554         if (pgd_bad(*pgd)) {
 555                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 556                 pgd_clear(pgd);
 557                 return 0;
 558         }
 559         pmd = pmd_offset(pgd, address);
 560         offset = address & PMD_MASK;
 561         address &= ~PMD_MASK;
 562         end = address + size;
 563         if (end > PGDIR_SIZE)
 564                 end = PGDIR_SIZE;
 565         error = 0;
 566         do {
 567                 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 568                 address = (address + PMD_SIZE) & PMD_MASK;
 569                 pmd++;
 570         } while (address < end);
 571         return error;
 572 }
 573 
 574 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 575         size_t size, unsigned int flags)
 576 {
 577         pgd_t * dir;
 578         unsigned long end = address + size;
 579         int error = 0;
 580 
 581         dir = pgd_offset(current->mm, address);
 582         while (address < end) {
 583                 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 584                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 585                 dir++;
 586         }
 587         invalidate_range(vma->vm_mm, end - size, end);
 588         return error;
 589 }
 590 
 591 /*
 592  * This handles (potentially partial) area unmaps..
 593  */
 594 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 595 {
 596         filemap_sync(vma, start, len, MS_ASYNC);
 597 }
 598 
 599 /*
 600  * Shared mappings need to be able to do the right thing at
 601  * close/unmap/sync. They will also use the private file as
 602  * backing-store for swapping..
 603  */
 604 static struct vm_operations_struct file_shared_mmap = {
 605         NULL,                   /* no special open */
 606         NULL,                   /* no special close */
 607         filemap_unmap,          /* unmap - we need to sync the pages */
 608         NULL,                   /* no special protect */
 609         filemap_sync,           /* sync */
 610         NULL,                   /* advise */
 611         filemap_nopage,         /* nopage */
 612         NULL,                   /* wppage */
 613         filemap_swapout,        /* swapout */
 614         filemap_swapin,         /* swapin */
 615 };
 616 
 617 /*
 618  * Private mappings just need to be able to load in the map.
 619  *
 620  * (This is actually used for shared mappings as well, if we
 621  * know they can't ever get write permissions..)
 622  */
 623 static struct vm_operations_struct file_private_mmap = {
 624         NULL,                   /* open */
 625         NULL,                   /* close */
 626         NULL,                   /* unmap */
 627         NULL,                   /* protect */
 628         NULL,                   /* sync */
 629         NULL,                   /* advise */
 630         filemap_nopage,         /* nopage */
 631         NULL,                   /* wppage */
 632         NULL,                   /* swapout */
 633         NULL,                   /* swapin */
 634 };
 635 
 636 /* This is used for a general mmap of a disk file */
 637 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 638 {
 639         struct vm_operations_struct * ops;
 640 
 641         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
 642                 ops = &file_shared_mmap;
 643                 /* share_page() can only guarantee proper page sharing if
 644                  * the offsets are all page aligned. */
 645                 if (vma->vm_offset & (PAGE_SIZE - 1))
 646                         return -EINVAL;
 647         } else {
 648                 ops = &file_private_mmap;
 649                 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 650                         return -EINVAL;
 651         }
 652         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 653                 return -EACCES;
 654         if (!inode->i_op || !inode->i_op->readpage)
 655                 return -ENOEXEC;
 656         if (!IS_RDONLY(inode)) {
 657                 inode->i_atime = CURRENT_TIME;
 658                 inode->i_dirt = 1;
 659         }
 660         vma->vm_inode = inode;
 661         inode->i_count++;
 662         vma->vm_ops = ops;
 663         return 0;
 664 }
 665 
 666 
 667 /*
 668  * The msync() system call.
 669  */
 670 
 671 static int msync_interval(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 672         unsigned long start, unsigned long end, int flags)
 673 {
 674         if (!vma->vm_inode)
 675                 return 0;
 676         if (vma->vm_ops->sync) {
 677                 int error;
 678                 error = vma->vm_ops->sync(vma, start, end-start, flags);
 679                 if (error)
 680                         return error;
 681                 if (flags & MS_SYNC)
 682                         return file_fsync(vma->vm_inode, NULL);
 683                 return 0;
 684         }
 685         return 0;
 686 }
 687 
 688 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 689 {
 690         unsigned long end;
 691         struct vm_area_struct * vma;
 692         int unmapped_error, error;
 693 
 694         if (start & ~PAGE_MASK)
 695                 return -EINVAL;
 696         len = (len + ~PAGE_MASK) & PAGE_MASK;
 697         end = start + len;
 698         if (end < start)
 699                 return -EINVAL;
 700         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
 701                 return -EINVAL;
 702         if (end == start)
 703                 return 0;
 704         /*
 705          * If the interval [start,end) covers some unmapped address ranges,
 706          * just ignore them, but return -EFAULT at the end.
 707          */
 708         vma = find_vma(current, start);
 709         unmapped_error = 0;
 710         for (;;) {
 711                 /* Still start < end. */
 712                 if (!vma)
 713                         return -EFAULT;
 714                 /* Here start < vma->vm_end. */
 715                 if (start < vma->vm_start) {
 716                         unmapped_error = -EFAULT;
 717                         start = vma->vm_start;
 718                 }
 719                 /* Here vma->vm_start <= start < vma->vm_end. */
 720                 if (end <= vma->vm_end) {
 721                         if (start < end) {
 722                                 error = msync_interval(vma, start, end, flags);
 723                                 if (error)
 724                                         return error;
 725                         }
 726                         return unmapped_error;
 727                 }
 728                 /* Here vma->vm_start <= start < vma->vm_end < end. */
 729                 error = msync_interval(vma, start, vma->vm_end, flags);
 730                 if (error)
 731                         return error;
 732                 start = vma->vm_end;
 733                 vma = vma->vm_next;
 734         }
 735 }

/* [previous][next][first][last][top][bottom][index][help] */