root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. invalidate_inode_pages
  2. truncate_inode_pages
  3. shrink_mmap
  4. page_unuse
  5. update_vm_cache
  6. try_to_read_ahead
  7. __wait_on_page
  8. generic_file_read
  9. fill_page
  10. filemap_nopage
  11. do_write_page
  12. filemap_write_page
  13. filemap_swapout
  14. filemap_swapin
  15. filemap_sync_pte
  16. filemap_sync_pte_range
  17. filemap_sync_pmd_range
  18. filemap_sync
  19. filemap_unmap
  20. generic_file_mmap
  21. msync_interval
  22. sys_msync

   1 /*
   2  *      linux/mm/filemap.c
   3  *
   4  * Copyright (C) 1994, 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 #include <linux/fs.h>
  22 #include <linux/locks.h>
  23 #include <linux/pagemap.h>
  24 #include <linux/swap.h>
  25 
  26 #include <asm/segment.h>
  27 #include <asm/system.h>
  28 #include <asm/pgtable.h>
  29 
  30 /*
  31  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  32  * though.
  33  *
  34  * Shared mappings now work. 15.8.1995  Bruno.
  35  */
  36 
  37 unsigned long page_cache_size = 0;
  38 struct page * page_hash_table[PAGE_HASH_SIZE];
  39 
  40 /*
  41  * Simple routines for both non-shared and shared mappings.
  42  */
  43 
  44 /*
  45  * Invalidate the pages of an inode, removing all pages that aren't
  46  * locked down (those are sure to be up-to-date anyway, so we shouldn't
  47  * invalidate them).
  48  */
  49 void invalidate_inode_pages(struct inode * inode)
     /* [previous][next][first][last][top][bottom][index][help] */
  50 {
  51         struct page ** p;
  52         struct page * page;
  53 
  54         p = &inode->i_pages;
  55         while ((page = *p) != NULL) {
  56                 if (page->locked) {
  57                         p = &page->next;
  58                         continue;
  59                 }
  60                 inode->i_nrpages--;
  61                 if ((*p = page->next) != NULL)
  62                         (*p)->prev = page->prev;
  63                 page->dirty = 0;
  64                 page->next = NULL;
  65                 page->prev = NULL;
  66                 remove_page_from_hash_queue(page);
  67                 page->inode = NULL;
  68                 free_page(page_address(page));
  69                 continue;
  70         }
  71 }
  72 
  73 /*
  74  * Truncate the page cache at a set offset, removing the pages
  75  * that are beyond that offset (and zeroing out partial pages).
  76  */
  77 void truncate_inode_pages(struct inode * inode, unsigned long start)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         struct page ** p;
  80         struct page * page;
  81 
  82 repeat:
  83         p = &inode->i_pages;
  84         while ((page = *p) != NULL) {
  85                 unsigned long offset = page->offset;
  86 
  87                 /* page wholly truncated - free it */
  88                 if (offset >= start) {
  89                         if (page->locked) {
  90                                 wait_on_page(page);
  91                                 goto repeat;
  92                         }
  93                         inode->i_nrpages--;
  94                         if ((*p = page->next) != NULL)
  95                                 (*p)->prev = page->prev;
  96                         page->dirty = 0;
  97                         page->next = NULL;
  98                         page->prev = NULL;
  99                         remove_page_from_hash_queue(page);
 100                         page->inode = NULL;
 101                         free_page(page_address(page));
 102                         continue;
 103                 }
 104                 p = &page->next;
 105                 offset = start - offset;
 106                 /* partial truncate, clear end of page */
 107                 if (offset < PAGE_SIZE)
 108                         memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
 109         }
 110 }
 111 
 112 int shrink_mmap(int priority, int dma)
     /* [previous][next][first][last][top][bottom][index][help] */
 113 {
 114         static int clock = 0;
 115         struct page * page;
 116         unsigned long limit = MAP_NR(high_memory);
 117         struct buffer_head *tmp, *bh;
 118 
 119         priority = (limit<<2) >> priority;
 120         page = mem_map + clock;
 121         while (priority-- > 0) {
 122                 if (page->locked)
 123                         goto next;
 124                 if (dma && !page->dma)
 125                         goto next;
 126                 /* First of all, regenerate the page's referenced bit
 127                    from any buffers in the page */
 128                 bh = page->buffers;
 129                 if (bh) {
 130                         tmp = bh;
 131                         do {
 132                                 if (buffer_touched(tmp)) {
 133                                         clear_bit(BH_Touched, &tmp->b_state);
 134                                         page->referenced = 1;
 135                                 }
 136                                 tmp = tmp->b_this_page;
 137                         } while (tmp != bh);
 138                 }
 139 
 140                 /* We can't throw away shared pages, but we do mark
 141                    them as referenced.  This relies on the fact that
 142                    no page is currently in both the page cache and the
 143                    buffer cache; we'd have to modify the following
 144                    test to allow for that case. */
 145                 if (page->count > 1)
 146                         page->referenced = 1;
 147                 else if (page->referenced)
 148                         page->referenced = 0;
 149                 else if (page->count) {
 150                         /* The page is an old, unshared page --- try
 151                            to discard it. */
 152                         if (page->inode) {
 153                                 remove_page_from_hash_queue(page);
 154                                 remove_page_from_inode_queue(page);
 155                                 free_page(page_address(page));
 156                                 return 1;
 157                         }
 158                         if (bh && try_to_free_buffer(bh, &bh, 6))
 159                                 return 1;
 160                 }
 161 next:
 162                 page++;
 163                 clock++;
 164                 if (clock >= limit) {
 165                         clock = 0;
 166                         page = mem_map;
 167                 }
 168         }
 169         return 0;
 170 }
 171 
 172 /*
 173  * This is called from try_to_swap_out() when we try to get rid of some
 174  * pages..  If we're unmapping the last occurrence of this page, we also
 175  * free it from the page hash-queues etc, as we don't want to keep it
 176  * in-core unnecessarily.
 177  */
 178 unsigned long page_unuse(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         struct page * p = mem_map + MAP_NR(page);
 181         int count = p->count;
 182 
 183         if (count != 2)
 184                 return count;
 185         if (!p->inode)
 186                 return count;
 187         remove_page_from_hash_queue(p);
 188         remove_page_from_inode_queue(p);
 189         free_page(page);
 190         return 1;
 191 }
 192 
 193 /*
 194  * Update a page cache copy, when we're doing a "write()" system call
 195  * See also "update_vm_cache()".
 196  */
 197 void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         unsigned long offset, len;
 200 
 201         offset = (pos & ~PAGE_MASK);
 202         pos = pos & PAGE_MASK;
 203         len = PAGE_SIZE - offset;
 204         do {
 205                 struct page * page;
 206 
 207                 if (len > count)
 208                         len = count;
 209                 page = find_page(inode, pos);
 210                 if (page) {
 211                         unsigned long addr;
 212 
 213                         wait_on_page(page);
 214                         addr = page_address(page);
 215                         memcpy((void *) (offset + addr), buf, len);
 216                         free_page(addr);
 217                 }
 218                 count -= len;
 219                 buf += len;
 220                 len = PAGE_SIZE;
 221                 offset = 0;
 222                 pos += PAGE_SIZE;
 223         } while (count);
 224 }
 225 
 226 /*
 227  * Try to read ahead in the file. "page_cache" is a potentially free page
 228  * that we could use for the cache (if it is 0 we can try to create one,
 229  * this is all overlapped with the IO on the previous page finishing anyway)
 230  */
 231 static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233         struct page * page;
 234 
 235         offset &= PAGE_MASK;
 236         if (!page_cache) {
 237                 page_cache = __get_free_page(GFP_KERNEL);
 238                 if (!page_cache)
 239                         return 0;
 240         }
 241         if (offset >= inode->i_size)
 242                 return page_cache;
 243 #if 1
 244         page = find_page(inode, offset);
 245         if (page) {
 246                 page->count--;
 247                 return page_cache;
 248         }
 249         /*
 250          * Ok, add the new page to the hash-queues...
 251          */
 252         page = mem_map + MAP_NR(page_cache);
 253         page->count++;
 254         page->uptodate = 0;
 255         page->error = 0;
 256         page->offset = offset;
 257         add_page_to_inode_queue(inode, page);
 258         add_page_to_hash_queue(inode, page);
 259 
 260         inode->i_op->readpage(inode, page);
 261 
 262         free_page(page_cache);
 263         return 0;
 264 #else
 265         return page_cache;
 266 #endif
 267 }
 268 
 269 /* 
 270  * Wait for IO to complete on a locked page.
 271  */
 272 void __wait_on_page(struct page *page)
     /* [previous][next][first][last][top][bottom][index][help] */
 273 {
 274         struct wait_queue wait = { current, NULL };
 275 
 276         page->count++;
 277         add_wait_queue(&page->wait, &wait);
 278 repeat:
 279         run_task_queue(&tq_disk);
 280         current->state = TASK_UNINTERRUPTIBLE;
 281         if (page->locked) {
 282                 schedule();
 283                 goto repeat;
 284         }
 285         remove_wait_queue(&page->wait, &wait);
 286         page->count--;
 287         current->state = TASK_RUNNING;
 288 }
 289 
 290 
 291 /*
 292  * This is a generic file read routine, and uses the
 293  * inode->i_op->readpage() function for the actual low-level
 294  * stuff.
 295  *
 296  * This is really ugly. But the goto's actually try to clarify some
 297  * of the logic when it comes to error handling etc.
 298  */
 299 #define MAX_READAHEAD (PAGE_SIZE*8)
 300 int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302         int error, read;
 303         unsigned long pos, page_cache;
 304         unsigned long ra_pos, ra_end;   /* read-ahead */
 305         
 306         if (count <= 0)
 307                 return 0;
 308         error = 0;
 309         read = 0;
 310         page_cache = 0;
 311 
 312         pos = filp->f_pos;
 313         ra_pos = filp->f_reada;
 314         ra_end = MAX_READAHEAD;
 315         if (!ra_pos) {
 316                 ra_pos = (pos + PAGE_SIZE) & PAGE_MASK;
 317                 ra_end = 0;
 318         }
 319         ra_end += pos + count;
 320 
 321         for (;;) {
 322                 struct page *page;
 323                 unsigned long offset, addr, nr;
 324 
 325                 if (pos >= inode->i_size)
 326                         break;
 327                 offset = pos & ~PAGE_MASK;
 328                 nr = PAGE_SIZE - offset;
 329                 /*
 330                  * Try to find the data in the page cache..
 331                  */
 332                 page = find_page(inode, pos & PAGE_MASK);
 333                 if (page)
 334                         goto found_page;
 335 
 336                 /*
 337                  * Ok, it wasn't cached, so we need to create a new
 338                  * page..
 339                  */
 340                 if (page_cache)
 341                         goto new_page;
 342 
 343                 error = -ENOMEM;
 344                 page_cache = __get_free_page(GFP_KERNEL);
 345                 if (!page_cache)
 346                         break;
 347                 error = 0;
 348 
 349                 /*
 350                  * That could have slept, so we need to check again..
 351                  */
 352                 if (pos >= inode->i_size)
 353                         break;
 354                 page = find_page(inode, pos & PAGE_MASK);
 355                 if (!page)
 356                         goto new_page;
 357 
 358 found_page:
 359                 addr = page_address(page);
 360                 if (nr > count)
 361                         nr = count;
 362 
 363                 /*
 364                  * We may want to do read-ahead.. Do this only
 365                  * if we're waiting for the current page to be
 366                  * filled in, and if
 367                  *  - we're going to read more than this page
 368                  *  - if "f_reada" is set
 369                  */
 370                 if (page->locked) {
 371                         while (ra_pos < ra_end) {
 372                                 page_cache = try_to_read_ahead(inode, ra_pos, page_cache);
 373                                 ra_pos += PAGE_SIZE;
 374                                 if (!page->locked)
 375                                         goto unlocked_page;
 376                         }
 377                         __wait_on_page(page);
 378                 }
 379 unlocked_page:
 380                 if (!page->uptodate)
 381                         goto read_page;
 382                 if (nr > inode->i_size - pos)
 383                         nr = inode->i_size - pos;
 384                 memcpy_tofs(buf, (void *) (addr + offset), nr);
 385                 free_page(addr);
 386                 buf += nr;
 387                 pos += nr;
 388                 read += nr;
 389                 count -= nr;
 390                 if (count)
 391                         continue;
 392                 break;
 393         
 394 
 395 new_page:
 396                 /*
 397                  * Ok, add the new page to the hash-queues...
 398                  */
 399                 addr = page_cache;
 400                 page = mem_map + MAP_NR(page_cache);
 401                 page_cache = 0;
 402                 page->count++;
 403                 page->uptodate = 0;
 404                 page->error = 0;
 405                 page->offset = pos & PAGE_MASK;
 406                 add_page_to_inode_queue(inode, page);
 407                 add_page_to_hash_queue(inode, page);
 408 
 409                 /*
 410                  * Error handling is tricky. If we get a read error,
 411                  * the cached page stays in the cache (but uptodate=0),
 412                  * and the next process that accesses it will try to
 413                  * re-read it. This is needed for NFS etc, where the
 414                  * identity of the reader can decide if we can read the
 415                  * page or not..
 416                  */
 417 read_page:
 418                 error = inode->i_op->readpage(inode, page);
 419                 if (!error)
 420                         goto found_page;
 421                 free_page(addr);
 422                 break;
 423         }
 424 
 425         if (read) {
 426                 error = read;
 427 
 428                 /*
 429                  * Start some extra read-ahead if we haven't already
 430                  * read ahead enough..
 431                  */
 432                 while (ra_pos < ra_end) {
 433                         page_cache = try_to_read_ahead(inode, ra_pos, page_cache);
 434                         ra_pos += PAGE_SIZE;
 435                 }
 436                 run_task_queue(&tq_disk);
 437 
 438                 filp->f_pos = pos;
 439                 filp->f_reada = ra_pos;
 440                 if (!IS_RDONLY(inode)) {
 441                         inode->i_atime = CURRENT_TIME;
 442                         inode->i_dirt = 1;
 443                 }
 444         }
 445         if (page_cache)
 446                 free_page(page_cache);
 447 
 448         return error;
 449 }
 450 
 451 /*
 452  * Find a cached page and wait for it to become up-to-date, return
 453  * the page address.  Increments the page count.
 454  */
 455 static inline unsigned long fill_page(struct inode * inode, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 456 {
 457         struct page * page;
 458         unsigned long new_page;
 459 
 460         page = find_page(inode, offset);
 461         if (page)
 462                 goto found_page_dont_free;
 463         new_page = __get_free_page(GFP_KERNEL);
 464         page = find_page(inode, offset);
 465         if (page)
 466                 goto found_page;
 467         if (!new_page)
 468                 return 0;
 469         page = mem_map + MAP_NR(new_page);
 470         new_page = 0;
 471         page->count++;
 472         page->uptodate = 0;
 473         page->error = 0;
 474         page->offset = offset;
 475         add_page_to_inode_queue(inode, page);
 476         add_page_to_hash_queue(inode, page);
 477         inode->i_op->readpage(inode, page);
 478         if (page->locked)
 479                 new_page = try_to_read_ahead(inode, offset + PAGE_SIZE, 0);
 480 found_page:
 481         if (new_page)
 482                 free_page(new_page);
 483 found_page_dont_free:
 484         wait_on_page(page);
 485         return page_address(page);
 486 }
 487 
 488 /*
 489  * Semantics for shared and private memory areas are different past the end
 490  * of the file. A shared mapping past the last page of the file is an error
 491  * and results in a SIBGUS, while a private mapping just maps in a zero page.
 492  */
 493 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
     /* [previous][next][first][last][top][bottom][index][help] */
 494 {
 495         unsigned long offset;
 496         struct inode * inode = area->vm_inode;
 497         unsigned long page;
 498 
 499         offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
 500         if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
 501                 return 0;
 502 
 503         page = fill_page(inode, offset);
 504         if (page && no_share) {
 505                 unsigned long new_page = __get_free_page(GFP_KERNEL);
 506                 if (new_page)
 507                         memcpy((void *) new_page, (void *) page, PAGE_SIZE);
 508                 free_page(page);
 509                 return new_page;
 510         }
 511         return page;
 512 }
 513 
 514 /*
 515  * Tries to write a shared mapped page to its backing store. May return -EIO
 516  * if the disk is full.
 517  */
 518 static inline int do_write_page(struct inode * inode, struct file * file,
     /* [previous][next][first][last][top][bottom][index][help] */
 519         const char * page, unsigned long offset)
 520 {
 521         int old_fs, retval;
 522         unsigned long size;
 523 
 524         size = offset + PAGE_SIZE;
 525         /* refuse to extend file size.. */
 526         if (S_ISREG(inode->i_mode)) {
 527                 if (size > inode->i_size)
 528                         size = inode->i_size;
 529                 /* Ho humm.. We should have tested for this earlier */
 530                 if (size < offset)
 531                         return -EIO;
 532         }
 533         size -= offset;
 534         old_fs = get_fs();
 535         set_fs(KERNEL_DS);
 536         retval = -EIO;
 537         if (size == file->f_op->write(inode, file, (const char *) page, size))
 538                 retval = 0;
 539         set_fs(old_fs);
 540         return retval;
 541 }
 542 
 543 static int filemap_write_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 544         unsigned long offset,
 545         unsigned long page)
 546 {
 547         int result;
 548         struct file file;
 549         struct inode * inode;
 550         struct buffer_head * bh;
 551 
 552         bh = mem_map[MAP_NR(page)].buffers;
 553         if (bh) {
 554                 /* whee.. just mark the buffer heads dirty */
 555                 struct buffer_head * tmp = bh;
 556                 do {
 557                         mark_buffer_dirty(tmp, 0);
 558                         tmp = tmp->b_this_page;
 559                 } while (tmp != bh);
 560                 return 0;
 561         }
 562 
 563         inode = vma->vm_inode;
 564         file.f_op = inode->i_op->default_file_ops;
 565         if (!file.f_op->write)
 566                 return -EIO;
 567         file.f_mode = 3;
 568         file.f_flags = 0;
 569         file.f_count = 1;
 570         file.f_inode = inode;
 571         file.f_pos = offset;
 572         file.f_reada = 0;
 573 
 574         down(&inode->i_sem);
 575         result = do_write_page(inode, &file, (const char *) page, offset);
 576         up(&inode->i_sem);
 577         return result;
 578 }
 579 
 580 
 581 /*
 582  * Swapping to a shared file: while we're busy writing out the page
 583  * (and the page still exists in memory), we save the page information
 584  * in the page table, so that "filemap_swapin()" can re-use the page
 585  * immediately if it is called while we're busy swapping it out..
 586  *
 587  * Once we've written it all out, we mark the page entry "empty", which
 588  * will result in a normal page-in (instead of a swap-in) from the now
 589  * up-to-date disk file.
 590  */
 591 int filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 592         unsigned long offset,
 593         pte_t *page_table)
 594 {
 595         int error;
 596         unsigned long page = pte_page(*page_table);
 597         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 598 
 599         flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
 600         set_pte(page_table, __pte(entry));
 601         flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
 602         error = filemap_write_page(vma, offset, page);
 603         if (pte_val(*page_table) == entry)
 604                 pte_clear(page_table);
 605         return error;
 606 }
 607 
 608 /*
 609  * filemap_swapin() is called only if we have something in the page
 610  * tables that is non-zero (but not present), which we know to be the
 611  * page index of a page that is busy being swapped out (see above).
 612  * So we just use it directly..
 613  */
 614 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 615         unsigned long offset,
 616         unsigned long entry)
 617 {
 618         unsigned long page = SWP_OFFSET(entry);
 619 
 620         mem_map[page].count++;
 621         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 622         return mk_pte(page,vma->vm_page_prot);
 623 }
 624 
 625 
 626 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 627         unsigned long address, unsigned int flags)
 628 {
 629         pte_t pte = *ptep;
 630         unsigned long page;
 631         int error;
 632 
 633         if (!(flags & MS_INVALIDATE)) {
 634                 if (!pte_present(pte))
 635                         return 0;
 636                 if (!pte_dirty(pte))
 637                         return 0;
 638                 flush_cache_page(vma, address);
 639                 set_pte(ptep, pte_mkclean(pte));
 640                 flush_tlb_page(vma, address);
 641                 page = pte_page(pte);
 642                 mem_map[MAP_NR(page)].count++;
 643         } else {
 644                 if (pte_none(pte))
 645                         return 0;
 646                 flush_cache_page(vma, address);
 647                 pte_clear(ptep);
 648                 flush_tlb_page(vma, address);
 649                 if (!pte_present(pte)) {
 650                         swap_free(pte_val(pte));
 651                         return 0;
 652                 }
 653                 page = pte_page(pte);
 654                 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
 655                         free_page(page);
 656                         return 0;
 657                 }
 658         }
 659         error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
 660         free_page(page);
 661         return error;
 662 }
 663 
 664 static inline int filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 665         unsigned long address, unsigned long size, 
 666         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 667 {
 668         pte_t * pte;
 669         unsigned long end;
 670         int error;
 671 
 672         if (pmd_none(*pmd))
 673                 return 0;
 674         if (pmd_bad(*pmd)) {
 675                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 676                 pmd_clear(pmd);
 677                 return 0;
 678         }
 679         pte = pte_offset(pmd, address);
 680         offset += address & PMD_MASK;
 681         address &= ~PMD_MASK;
 682         end = address + size;
 683         if (end > PMD_SIZE)
 684                 end = PMD_SIZE;
 685         error = 0;
 686         do {
 687                 error |= filemap_sync_pte(pte, vma, address + offset, flags);
 688                 address += PAGE_SIZE;
 689                 pte++;
 690         } while (address < end);
 691         return error;
 692 }
 693 
 694 static inline int filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 695         unsigned long address, unsigned long size, 
 696         struct vm_area_struct *vma, unsigned int flags)
 697 {
 698         pmd_t * pmd;
 699         unsigned long offset, end;
 700         int error;
 701 
 702         if (pgd_none(*pgd))
 703                 return 0;
 704         if (pgd_bad(*pgd)) {
 705                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 706                 pgd_clear(pgd);
 707                 return 0;
 708         }
 709         pmd = pmd_offset(pgd, address);
 710         offset = address & PMD_MASK;
 711         address &= ~PMD_MASK;
 712         end = address + size;
 713         if (end > PGDIR_SIZE)
 714                 end = PGDIR_SIZE;
 715         error = 0;
 716         do {
 717                 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 718                 address = (address + PMD_SIZE) & PMD_MASK;
 719                 pmd++;
 720         } while (address < end);
 721         return error;
 722 }
 723 
 724 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 725         size_t size, unsigned int flags)
 726 {
 727         pgd_t * dir;
 728         unsigned long end = address + size;
 729         int error = 0;
 730 
 731         dir = pgd_offset(current->mm, address);
 732         flush_cache_range(vma->vm_mm, end - size, end);
 733         while (address < end) {
 734                 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 735                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 736                 dir++;
 737         }
 738         flush_tlb_range(vma->vm_mm, end - size, end);
 739         return error;
 740 }
 741 
 742 /*
 743  * This handles (potentially partial) area unmaps..
 744  */
 745 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 746 {
 747         filemap_sync(vma, start, len, MS_ASYNC);
 748 }
 749 
 750 /*
 751  * Shared mappings need to be able to do the right thing at
 752  * close/unmap/sync. They will also use the private file as
 753  * backing-store for swapping..
 754  */
 755 static struct vm_operations_struct file_shared_mmap = {
 756         NULL,                   /* no special open */
 757         NULL,                   /* no special close */
 758         filemap_unmap,          /* unmap - we need to sync the pages */
 759         NULL,                   /* no special protect */
 760         filemap_sync,           /* sync */
 761         NULL,                   /* advise */
 762         filemap_nopage,         /* nopage */
 763         NULL,                   /* wppage */
 764         filemap_swapout,        /* swapout */
 765         filemap_swapin,         /* swapin */
 766 };
 767 
 768 /*
 769  * Private mappings just need to be able to load in the map.
 770  *
 771  * (This is actually used for shared mappings as well, if we
 772  * know they can't ever get write permissions..)
 773  */
 774 static struct vm_operations_struct file_private_mmap = {
 775         NULL,                   /* open */
 776         NULL,                   /* close */
 777         NULL,                   /* unmap */
 778         NULL,                   /* protect */
 779         NULL,                   /* sync */
 780         NULL,                   /* advise */
 781         filemap_nopage,         /* nopage */
 782         NULL,                   /* wppage */
 783         NULL,                   /* swapout */
 784         NULL,                   /* swapin */
 785 };
 786 
 787 /* This is used for a general mmap of a disk file */
 788 int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         struct vm_operations_struct * ops;
 791 
 792         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
 793                 ops = &file_shared_mmap;
 794                 /* share_page() can only guarantee proper page sharing if
 795                  * the offsets are all page aligned. */
 796                 if (vma->vm_offset & (PAGE_SIZE - 1))
 797                         return -EINVAL;
 798         } else {
 799                 ops = &file_private_mmap;
 800                 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 801                         return -EINVAL;
 802         }
 803         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 804                 return -EACCES;
 805         if (!inode->i_op || !inode->i_op->readpage)
 806                 return -ENOEXEC;
 807         if (!IS_RDONLY(inode)) {
 808                 inode->i_atime = CURRENT_TIME;
 809                 inode->i_dirt = 1;
 810         }
 811         vma->vm_inode = inode;
 812         inode->i_count++;
 813         vma->vm_ops = ops;
 814         return 0;
 815 }
 816 
 817 
 818 /*
 819  * The msync() system call.
 820  */
 821 
 822 static int msync_interval(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 823         unsigned long start, unsigned long end, int flags)
 824 {
 825         if (!vma->vm_inode)
 826                 return 0;
 827         if (vma->vm_ops->sync) {
 828                 int error;
 829                 error = vma->vm_ops->sync(vma, start, end-start, flags);
 830                 if (error)
 831                         return error;
 832                 if (flags & MS_SYNC)
 833                         return file_fsync(vma->vm_inode, NULL);
 834                 return 0;
 835         }
 836         return 0;
 837 }
 838 
 839 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 840 {
 841         unsigned long end;
 842         struct vm_area_struct * vma;
 843         int unmapped_error, error;
 844 
 845         if (start & ~PAGE_MASK)
 846                 return -EINVAL;
 847         len = (len + ~PAGE_MASK) & PAGE_MASK;
 848         end = start + len;
 849         if (end < start)
 850                 return -EINVAL;
 851         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
 852                 return -EINVAL;
 853         if (end == start)
 854                 return 0;
 855         /*
 856          * If the interval [start,end) covers some unmapped address ranges,
 857          * just ignore them, but return -EFAULT at the end.
 858          */
 859         vma = find_vma(current, start);
 860         unmapped_error = 0;
 861         for (;;) {
 862                 /* Still start < end. */
 863                 if (!vma)
 864                         return -EFAULT;
 865                 /* Here start < vma->vm_end. */
 866                 if (start < vma->vm_start) {
 867                         unmapped_error = -EFAULT;
 868                         start = vma->vm_start;
 869                 }
 870                 /* Here vma->vm_start <= start < vma->vm_end. */
 871                 if (end <= vma->vm_end) {
 872                         if (start < end) {
 873                                 error = msync_interval(vma, start, end, flags);
 874                                 if (error)
 875                                         return error;
 876                         }
 877                         return unmapped_error;
 878                 }
 879                 /* Here vma->vm_start <= start < vma->vm_end < end. */
 880                 error = msync_interval(vma, start, vma->vm_end, flags);
 881                 if (error)
 882                         return error;
 883                 start = vma->vm_end;
 884                 vma = vma->vm_next;
 885         }
 886 }

/* [previous][next][first][last][top][bottom][index][help] */