root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_pte
  3. free_one_pmd
  4. free_one_pgd
  5. clear_page_tables
  6. free_page_tables
  7. clone_page_tables
  8. copy_one_pte
  9. copy_one_pmd
  10. copy_one_pgd
  11. copy_page_tables
  12. forget_pte
  13. unmap_pte_range
  14. unmap_pmd_range
  15. unmap_page_range
  16. zeromap_pte_range
  17. zeromap_pmd_range
  18. zeromap_page_range
  19. remap_pte_range
  20. remap_pmd_range
  21. remap_page_range
  22. put_page
  23. put_dirty_page
  24. do_wp_page
  25. verify_area
  26. get_empty_page
  27. try_to_share
  28. share_page
  29. get_empty_pgtable
  30. do_swap_page
  31. do_no_page
  32. handle_pte_fault
  33. handle_mm_fault

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 /*
  32  * 05.04.94  -  Multi-page memory management added for v1.1.
  33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  34  */
  35 
  36 #include <linux/config.h>
  37 #include <linux/signal.h>
  38 #include <linux/sched.h>
  39 #include <linux/head.h>
  40 #include <linux/kernel.h>
  41 #include <linux/errno.h>
  42 #include <linux/string.h>
  43 #include <linux/types.h>
  44 #include <linux/ptrace.h>
  45 #include <linux/mman.h>
  46 #include <linux/mm.h>
  47 
  48 #include <asm/system.h>
  49 #include <asm/segment.h>
  50 #include <asm/pgtable.h>
  51 
  52 unsigned long high_memory = 0;
  53 
  54 /*
  55  * The free_area_list arrays point to the queue heads of the free areas
  56  * of different sizes
  57  */
  58 int nr_swap_pages = 0;
  59 int nr_free_pages = 0;
  60 struct mem_list free_area_list[NR_MEM_LISTS];
  61 unsigned char * free_area_map[NR_MEM_LISTS];
  62 
  63 #define copy_page(from,to) memcpy((void *) to, (void *) from, PAGE_SIZE)
  64 
  65 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
  66 
  67 mem_map_t * mem_map = NULL;
  68 
  69 /*
  70  * oom() prints a message (so that the user knows why the process died),
  71  * and gives the process an untrappable SIGKILL.
  72  */
  73 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         printk("\nOut of memory for %s.\n", current->comm);
  76         task->sigaction[SIGKILL-1].sa_handler = NULL;
  77         task->blocked &= ~(1<<(SIGKILL-1));
  78         send_sig(SIGKILL,task,1);
  79 }
  80 
  81 static inline void free_one_pte(pte_t * page_table)
     /* [previous][next][first][last][top][bottom][index][help] */
  82 {
  83         pte_t page = *page_table;
  84 
  85         if (pte_none(page))
  86                 return;
  87         pte_clear(page_table);
  88         if (!pte_present(page)) {
  89                 swap_free(pte_val(page));
  90                 return;
  91         }
  92         free_page(pte_page(page));
  93         return;
  94 }
  95 
  96 static inline void free_one_pmd(pmd_t * dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98         int j;
  99         pte_t * pte;
 100 
 101         if (pmd_none(*dir))
 102                 return;
 103         if (pmd_bad(*dir)) {
 104                 printk("free_one_pmd: bad directory entry %08lx\n", pmd_val(*dir));
 105                 pmd_clear(dir);
 106                 return;
 107         }
 108         pte = pte_offset(dir, 0);
 109         pmd_clear(dir);
 110         if (pte_inuse(pte)) {
 111                 pte_free(pte);
 112                 return;
 113         }
 114         for (j = 0; j < PTRS_PER_PTE ; j++)
 115                 free_one_pte(pte+j);
 116         pte_free(pte);
 117 }
 118 
 119 static inline void free_one_pgd(pgd_t * dir)
     /* [previous][next][first][last][top][bottom][index][help] */
 120 {
 121         pmd_t * pmd;
 122 
 123         if (pgd_none(*dir))
 124                 return;
 125         if (pgd_bad(*dir)) {
 126                 printk("free_one_pgd: bad directory entry %08lx\n", pgd_val(*dir));
 127                 pgd_clear(dir);
 128                 return;
 129         }
 130         pmd = pmd_offset(dir, 0);
 131         pgd_clear(dir);
 132         if (!pmd_inuse(pmd)) {
 133                 int j;
 134                 for (j = 0; j < PTRS_PER_PMD ; j++)
 135                         free_one_pmd(pmd+j);
 136         }
 137         pmd_free(pmd);
 138 }
 139         
 140 
 141 /*
 142  * This function clears all user-level page tables of a process - this
 143  * is needed by execve(), so that old pages aren't in the way. Note that
 144  * unlike 'free_page_tables()', this function still leaves a valid
 145  * page-table-tree in memory: it just removes the user pages. The two
 146  * functions are similar, but there is a fundamental difference.
 147  */
 148 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         int i;
 151         pgd_t * page_dir;
 152 
 153         if (!tsk)
 154                 return;
 155         if (tsk == task[0])
 156                 panic("task[0] (swapper) doesn't support exec()\n");
 157         page_dir = pgd_offset(tsk, 0);
 158         if (!page_dir || page_dir == swapper_pg_dir) {
 159                 printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
 160                 return;
 161         }
 162         if (pgd_inuse(page_dir)) {
 163                 pgd_t * new_pg;
 164 
 165                 if (!(new_pg = pgd_alloc())) {
 166                         oom(tsk);
 167                         return;
 168                 }
 169                 for (i = USER_PTRS_PER_PGD ; i < PTRS_PER_PGD ; i++)
 170                         new_pg[i] = page_dir[i];
 171                 SET_PAGE_DIR(tsk, new_pg);
 172                 pgd_free(page_dir);
 173                 return;
 174         }
 175         for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
 176                 free_one_pgd(page_dir + i);
 177         invalidate();
 178         return;
 179 }
 180 
 181 /*
 182  * This function frees up all page tables of a process when it exits.
 183  */
 184 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 185 {
 186         int i;
 187         pgd_t * page_dir;
 188 
 189         if (!tsk)
 190                 return;
 191         if (tsk == task[0]) {
 192                 printk("task[0] (swapper) killed: unable to recover\n");
 193                 panic("Trying to free up swapper memory space");
 194         }
 195         page_dir = pgd_offset(tsk, 0);
 196         if (!page_dir || page_dir == swapper_pg_dir) {
 197                 printk("%s trying to free kernel page-directory: not good\n", tsk->comm);
 198                 return;
 199         }
 200         SET_PAGE_DIR(tsk, swapper_pg_dir);
 201         if (pgd_inuse(page_dir)) {
 202                 pgd_free(page_dir);
 203                 return;
 204         }
 205         for (i = 0 ; i < PTRS_PER_PGD ; i++)
 206                 free_one_pgd(page_dir + i);
 207         pgd_free(page_dir);
 208         invalidate();
 209 }
 210 
 211 /*
 212  * clone_page_tables() clones the page table for a process - both
 213  * processes will have the exact same pages in memory. There are
 214  * probably races in the memory management with cloning, but we'll
 215  * see..
 216  */
 217 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219         pgd_t * pg_dir;
 220 
 221         pg_dir = pgd_offset(current, 0);
 222         pgd_reuse(pg_dir);
 223         SET_PAGE_DIR(tsk, pg_dir);
 224         return 0;
 225 }
 226 
 227 static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 228 {
 229         pte_t pte = *old_pte;
 230 
 231         if (pte_none(pte))
 232                 return;
 233         if (!pte_present(pte)) {
 234                 swap_duplicate(pte_val(pte));
 235                 *new_pte = pte;
 236                 return;
 237         }
 238         if (pte_page(pte) > high_memory || (mem_map[MAP_NR(pte_page(pte))] & MAP_PAGE_RESERVED)) {
 239                 *new_pte = pte;
 240                 return;
 241         }
 242         if (pte_cow(pte))
 243                 pte = pte_wrprotect(pte);
 244         if (delete_from_swap_cache(pte_page(pte)))
 245                 pte = pte_mkdirty(pte);
 246         *new_pte = pte_mkold(pte);
 247         *old_pte = pte;
 248         mem_map[MAP_NR(pte_page(pte))]++;
 249 }
 250 
 251 static inline int copy_one_pmd(pmd_t * old_pmd, pmd_t * new_pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 252 {
 253         int j;
 254         pte_t *old_pte, *new_pte;
 255 
 256         if (pmd_none(*old_pmd))
 257                 return 0;
 258         if (pmd_bad(*old_pmd)) {
 259                 printk("copy_one_pmd: bad page table (%08lx): probable memory corruption\n", pmd_val(*old_pmd));
 260                 pmd_clear(old_pmd);
 261                 return 0;
 262         }
 263         old_pte = pte_offset(old_pmd, 0);
 264         if (pte_inuse(old_pte)) {
 265                 pte_reuse(old_pte);
 266                 *new_pmd = *old_pmd;
 267                 return 0;
 268         }
 269         new_pte = pte_alloc(new_pmd, 0);
 270         if (!new_pte)
 271                 return -ENOMEM;
 272         for (j = 0 ; j < PTRS_PER_PTE ; j++) {
 273                 copy_one_pte(old_pte, new_pte);
 274                 old_pte++;
 275                 new_pte++;
 276         }
 277         return 0;
 278 }
 279 
 280 static inline int copy_one_pgd(pgd_t * old_pgd, pgd_t * new_pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         int j;
 283         pmd_t *old_pmd, *new_pmd;
 284 
 285         if (pgd_none(*old_pgd))
 286                 return 0;
 287         if (pgd_bad(*old_pgd)) {
 288                 printk("copy_one_pgd: bad page table (%p: %08lx): probable memory corruption\n", old_pgd, pgd_val(*old_pgd));
 289                 pgd_clear(old_pgd);
 290                 return 0;
 291         }
 292         old_pmd = pmd_offset(old_pgd, 0);
 293         if (pmd_inuse(old_pmd)) {
 294                 pmd_reuse(old_pmd);
 295                 *new_pgd = *old_pgd;
 296                 return 0;
 297         }
 298         new_pmd = pmd_alloc(new_pgd, 0);
 299         if (!new_pmd)
 300                 return -ENOMEM;
 301         for (j = 0 ; j < PTRS_PER_PMD ; j++) {
 302                 int error = copy_one_pmd(old_pmd, new_pmd);
 303                 if (error)
 304                         return error;
 305                 old_pmd++;
 306                 new_pmd++;
 307         }
 308         return 0;
 309 }
 310 
 311 /*
 312  * copy_page_tables() just copies the whole process memory range:
 313  * note the special handling of RESERVED (ie kernel) pages, which
 314  * means that they are always shared by all processes.
 315  */
 316 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 317 {
 318         int i;
 319         pgd_t *old_pgd;
 320         pgd_t *new_pgd;
 321 
 322         new_pgd = pgd_alloc();
 323         if (!new_pgd)
 324                 return -ENOMEM;
 325         SET_PAGE_DIR(tsk, new_pgd);
 326         old_pgd = pgd_offset(current, 0);
 327         for (i = 0 ; i < PTRS_PER_PGD ; i++) {
 328                 int errno = copy_one_pgd(old_pgd, new_pgd);
 329                 if (errno) {
 330                         free_page_tables(tsk);
 331                         invalidate();
 332                         return errno;
 333                 }
 334                 old_pgd++;
 335                 new_pgd++;
 336         }
 337         invalidate();
 338         return 0;
 339 }
 340 
 341 static inline void forget_pte(pte_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 342 {
 343         if (pte_none(page))
 344                 return;
 345         if (pte_present(page)) {
 346                 free_page(pte_page(page));
 347                 if (mem_map[MAP_NR(pte_page(page))] & MAP_PAGE_RESERVED)
 348                         return;
 349                 if (current->mm->rss <= 0)
 350                         return;
 351                 current->mm->rss--;
 352                 return;
 353         }
 354         swap_free(pte_val(page));
 355 }
 356 
 357 static inline void unmap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 358 {
 359         pte_t * pte;
 360         unsigned long end;
 361 
 362         if (pmd_none(*pmd))
 363                 return;
 364         if (pmd_bad(*pmd)) {
 365                 printk("unmap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 366                 pmd_clear(pmd);
 367                 return;
 368         }
 369         pte = pte_offset(pmd, address);
 370         address &= ~PMD_MASK;
 371         end = address + size;
 372         if (end >= PMD_SIZE)
 373                 end = PMD_SIZE;
 374         do {
 375                 pte_t page = *pte;
 376                 pte_clear(pte);
 377                 forget_pte(page);
 378                 address += PAGE_SIZE;
 379                 pte++;
 380         } while (address < end);
 381 }
 382 
 383 static inline void unmap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385         pmd_t * pmd;
 386         unsigned long end;
 387 
 388         if (pgd_none(*dir))
 389                 return;
 390         if (pgd_bad(*dir)) {
 391                 printk("unmap_pmd_range: bad pgd (%08lx)\n", pgd_val(*dir));
 392                 pgd_clear(dir);
 393                 return;
 394         }
 395         pmd = pmd_offset(dir, address);
 396         address &= ~PGDIR_MASK;
 397         end = address + size;
 398         if (end > PGDIR_SIZE)
 399                 end = PGDIR_SIZE;
 400         do {
 401                 unmap_pte_range(pmd, address, end - address);
 402                 address = (address + PMD_SIZE) & PMD_MASK; 
 403                 pmd++;
 404         } while (address < end);
 405 }
 406 
 407 /*
 408  * a more complete version of free_page_tables which performs with page
 409  * granularity.
 410  */
 411 int unmap_page_range(unsigned long address, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 412 {
 413         pgd_t * dir;
 414         unsigned long end = address + size;
 415 
 416         dir = pgd_offset(current, address);
 417         while (address < end) {
 418                 unmap_pmd_range(dir, address, end - address);
 419                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 420                 dir++;
 421         }
 422         invalidate();
 423         return 0;
 424 }
 425 
 426 static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 427 {
 428         unsigned long end;
 429 
 430         address &= ~PMD_MASK;
 431         end = address + size;
 432         if (end > PMD_SIZE)
 433                 end = PMD_SIZE;
 434         do {
 435                 pte_t oldpage = *pte;
 436                 *pte = zero_pte;
 437                 forget_pte(oldpage);
 438                 address += PAGE_SIZE;
 439                 pte++;
 440         } while (address < end);
 441 }
 442 
 443 static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size, pte_t zero_pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 444 {
 445         unsigned long end;
 446 
 447         address &= ~PGDIR_MASK;
 448         end = address + size;
 449         if (end > PGDIR_SIZE)
 450                 end = PGDIR_SIZE;
 451         do {
 452                 pte_t * pte = pte_alloc(pmd, address);
 453                 if (!pte)
 454                         return -ENOMEM;
 455                 zeromap_pte_range(pte, address, end - address, zero_pte);
 456                 address = (address + PMD_SIZE) & PMD_MASK;
 457                 pmd++;
 458         } while (address < end);
 459         return 0;
 460 }
 461 
 462 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 463 {
 464         int error = 0;
 465         pgd_t * dir;
 466         unsigned long end = address + size;
 467         pte_t zero_pte;
 468 
 469         zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
 470         dir = pgd_offset(current, address);
 471         while (address < end) {
 472                 pmd_t *pmd = pmd_alloc(dir, address);
 473                 error = -ENOMEM;
 474                 if (!pmd)
 475                         break;
 476                 error = zeromap_pmd_range(pmd, address, end - address, zero_pte);
 477                 if (error)
 478                         break;
 479                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 480                 dir++;
 481         }
 482         invalidate();
 483         return error;
 484 }
 485 
 486 /*
 487  * maps a range of physical memory into the requested pages. the old
 488  * mappings are removed. any references to nonexistent pages results
 489  * in null mappings (currently treated as "copy-on-access")
 490  */
 491 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 492         unsigned long offset, pgprot_t prot)
 493 {
 494         unsigned long end;
 495 
 496         address &= ~PMD_MASK;
 497         end = address + size;
 498         if (end > PMD_SIZE)
 499                 end = PMD_SIZE;
 500         do {
 501                 pte_t oldpage = *pte;
 502                 pte_clear(pte);
 503                 if (offset >= high_memory || (mem_map[MAP_NR(offset)] & MAP_PAGE_RESERVED))
 504                         *pte = mk_pte(offset, prot);
 505                 forget_pte(oldpage);
 506                 address += PAGE_SIZE;
 507                 offset += PAGE_SIZE;
 508                 pte++;
 509         } while (address < end);
 510 }
 511 
 512 static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 513         unsigned long offset, pgprot_t prot)
 514 {
 515         unsigned long end;
 516 
 517         address &= ~PGDIR_MASK;
 518         end = address + size;
 519         if (end > PGDIR_SIZE)
 520                 end = PGDIR_SIZE;
 521         offset -= address;
 522         do {
 523                 pte_t * pte = pte_alloc(pmd, address);
 524                 if (!pte)
 525                         return -ENOMEM;
 526                 remap_pte_range(pte, address, end - address, address + offset, prot);
 527                 address = (address + PMD_SIZE) & PMD_MASK;
 528                 pmd++;
 529         } while (address < end);
 530         return 0;
 531 }
 532 
 533 int remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 534 {
 535         int error = 0;
 536         pgd_t * dir;
 537         unsigned long end = from + size;
 538 
 539         offset -= from;
 540         dir = pgd_offset(current, from);
 541         while (from < end) {
 542                 pmd_t *pmd = pmd_alloc(dir, from);
 543                 error = -ENOMEM;
 544                 if (!pmd)
 545                         break;
 546                 error = remap_pmd_range(pmd, from, end - from, offset + from, prot);
 547                 if (error)
 548                         break;
 549                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
 550                 dir++;
 551         }
 552         invalidate();
 553         return error;
 554 }
 555 
 556 /*
 557  * sanity-check function..
 558  */
 559 static void put_page(pte_t * page_table, pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 560 {
 561         if (!pte_none(*page_table)) {
 562                 printk("put_page: page already exists %08lx\n", pte_val(*page_table));
 563                 free_page(pte_page(pte));
 564                 return;
 565         }
 566 /* no need for invalidate */
 567         *page_table = pte;
 568 }
 569 
 570 /*
 571  * This routine is used to map in a page into an address space: needed by
 572  * execve() for the initial stack and environment pages.
 573  */
 574 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 575 {
 576         pgd_t * pgd;
 577         pmd_t * pmd;
 578         pte_t * pte;
 579 
 580         if (page >= high_memory)
 581                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 582         if (mem_map[MAP_NR(page)] != 1)
 583                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 584         pgd = pgd_offset(tsk,address);
 585         pmd = pmd_alloc(pgd, address);
 586         if (!pmd) {
 587                 free_page(page);
 588                 oom(tsk);
 589                 return 0;
 590         }
 591         pte = pte_alloc(pmd, address);
 592         if (!pte) {
 593                 free_page(page);
 594                 oom(tsk);
 595                 return 0;
 596         }
 597         if (!pte_none(*pte)) {
 598                 printk("put_dirty_page: page already exists\n");
 599                 pte_clear(pte);
 600                 invalidate();
 601         }
 602         *pte = pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY)));
 603 /* no need for invalidate */
 604         return page;
 605 }
 606 
 607 /*
 608  * This routine handles present pages, when users try to write
 609  * to a shared page. It is done by copying the page to a new address
 610  * and decrementing the shared-page counter for the old page.
 611  *
 612  * Goto-purists beware: the only reason for goto's here is that it results
 613  * in better assembly code.. The "default" path will see no jumps at all.
 614  *
 615  * Note that this routine assumes that the protection checks have been
 616  * done by the caller (the low-level page fault routine in most cases).
 617  * Thus we can safely just mark it writable once we've done any necessary
 618  * COW.
 619  *
 620  * We also mark the page dirty at this point even though the page will
 621  * change only once the write actually happens. This avoids a few races,
 622  * and potentially makes it more efficient.
 623  */
 624 void do_wp_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 625         int write_access)
 626 {
 627         pgd_t *page_dir;
 628         pmd_t *page_middle;
 629         pte_t *page_table, pte;
 630         unsigned long old_page, new_page;
 631 
 632         new_page = __get_free_page(GFP_KERNEL);
 633         page_dir = pgd_offset(vma->vm_task,address);
 634         if (pgd_none(*page_dir))
 635                 goto end_wp_page;
 636         if (pgd_bad(*page_dir))
 637                 goto bad_wp_pagedir;
 638         page_middle = pmd_offset(page_dir, address);
 639         if (pmd_none(*page_middle))
 640                 goto end_wp_page;
 641         if (pmd_bad(*page_middle))
 642                 goto bad_wp_pagemiddle;
 643         page_table = pte_offset(page_middle, address);
 644         pte = *page_table;
 645         if (!pte_present(pte))
 646                 goto end_wp_page;
 647         if (pte_write(pte))
 648                 goto end_wp_page;
 649         old_page = pte_page(pte);
 650         if (old_page >= high_memory)
 651                 goto bad_wp_page;
 652         vma->vm_task->mm->min_flt++;
 653         /*
 654          * Do we need to copy?
 655          */
 656         if (mem_map[MAP_NR(old_page)] != 1) {
 657                 if (new_page) {
 658                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 659                                 ++vma->vm_task->mm->rss;
 660                         copy_page(old_page,new_page);
 661                         *page_table = pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)));
 662                         free_page(old_page);
 663                         invalidate();
 664                         return;
 665                 }
 666                 *page_table = BAD_PAGE;
 667                 free_page(old_page);
 668                 oom(vma->vm_task);
 669                 invalidate();
 670                 return;
 671         }
 672         *page_table = pte_mkdirty(pte_mkwrite(pte));
 673         invalidate();
 674         if (new_page)
 675                 free_page(new_page);
 676         return;
 677 bad_wp_page:
 678         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 679         send_sig(SIGKILL, vma->vm_task, 1);
 680         goto end_wp_page;
 681 bad_wp_pagemiddle:
 682         printk("do_wp_page: bogus page-middle at address %08lx (%08lx)\n", address, pmd_val(*page_middle));
 683         send_sig(SIGKILL, vma->vm_task, 1);
 684         goto end_wp_page;
 685 bad_wp_pagedir:
 686         printk("do_wp_page: bogus page-dir entry at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
 687         send_sig(SIGKILL, vma->vm_task, 1);
 688 end_wp_page:
 689         if (new_page)
 690                 free_page(new_page);
 691         return;
 692 }
 693 
 694 /*
 695  * Ugly, ugly, but the goto's result in better assembly..
 696  */
 697 int verify_area(int type, const void * addr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 698 {
 699         struct vm_area_struct * vma;
 700         unsigned long start = (unsigned long) addr;
 701 
 702         /* If the current user space is mapped to kernel space (for the
 703          * case where we use a fake user buffer with get_fs/set_fs()) we
 704          * don't expect to find the address in the user vm map.
 705          */
 706         if (get_fs() == get_ds())
 707                 return 0;
 708 
 709         vma = find_vma(current, start);
 710         if (!vma)
 711                 goto bad_area;
 712         if (vma->vm_start <= start)
 713                 goto good_area;
 714         if (!(vma->vm_flags & VM_GROWSDOWN))
 715                 goto bad_area;
 716         if (vma->vm_end - start > current->rlim[RLIMIT_STACK].rlim_cur)
 717                 goto bad_area;
 718 
 719 good_area:
 720         if (type == VERIFY_WRITE)
 721                 goto check_write;
 722         for (;;) {
 723                 struct vm_area_struct * next;
 724                 if (!(vma->vm_flags & VM_READ))
 725                         goto bad_area;
 726                 if (vma->vm_end - start >= size)
 727                         return 0;
 728                 next = vma->vm_next;
 729                 if (!next || vma->vm_end != next->vm_start)
 730                         goto bad_area;
 731                 vma = next;
 732         }
 733 
 734 check_write:
 735         if (!(vma->vm_flags & VM_WRITE))
 736                 goto bad_area;
 737         if (!wp_works_ok)
 738                 goto check_wp_fault_by_hand;
 739         for (;;) {
 740                 if (vma->vm_end - start >= size)
 741                         break;
 742                 if (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
 743                         goto bad_area;
 744                 vma = vma->vm_next;
 745                 if (!(vma->vm_flags & VM_WRITE))
 746                         goto bad_area;
 747         }
 748         return 0;
 749 
 750 check_wp_fault_by_hand:
 751         size--;
 752         size += start & ~PAGE_MASK;
 753         size >>= PAGE_SHIFT;
 754         start &= PAGE_MASK;
 755 
 756         for (;;) {
 757                 do_wp_page(vma, start, 1);
 758                 if (!size)
 759                         break;
 760                 size--;
 761                 start += PAGE_SIZE;
 762                 if (start < vma->vm_end)
 763                         continue;
 764                 vma = vma->vm_next;
 765                 if (!vma || vma->vm_start != start)
 766                         goto bad_area;
 767                 if (!(vma->vm_flags & VM_WRITE))
 768                         goto bad_area;;
 769         }
 770         return 0;
 771 
 772 bad_area:
 773         return -EFAULT;
 774 }
 775 
 776 static inline void get_empty_page(struct vm_area_struct * vma, pte_t * page_table)
     /* [previous][next][first][last][top][bottom][index][help] */
 777 {
 778         unsigned long tmp;
 779 
 780         if (!(tmp = get_free_page(GFP_KERNEL))) {
 781                 oom(vma->vm_task);
 782                 put_page(page_table, BAD_PAGE);
 783                 return;
 784         }
 785         put_page(page_table, pte_mkwrite(mk_pte(tmp, vma->vm_page_prot)));
 786 }
 787 
 788 /*
 789  * try_to_share() checks the page at address "address" in the task "p",
 790  * to see if it exists, and if it is clean. If so, share it with the current
 791  * task.
 792  *
 793  * NOTE! This assumes we have checked that p != current, and that they
 794  * share the same inode and can generally otherwise be shared.
 795  */
 796 static int try_to_share(unsigned long to_address, struct vm_area_struct * to_area,
     /* [previous][next][first][last][top][bottom][index][help] */
 797         unsigned long from_address, struct vm_area_struct * from_area,
 798         unsigned long newpage)
 799 {
 800         pgd_t * from_dir, * to_dir;
 801         pmd_t * from_middle, * to_middle;
 802         pte_t * from_table, * to_table;
 803         pte_t from, to;
 804 
 805         from_dir = pgd_offset(from_area->vm_task,from_address);
 806 /* is there a page-directory at from? */
 807         if (pgd_none(*from_dir))
 808                 return 0;
 809         if (pgd_bad(*from_dir)) {
 810                 printk("try_to_share: bad page directory %08lx\n", pgd_val(*from_dir));
 811                 pgd_clear(from_dir);
 812                 return 0;
 813         }
 814         from_middle = pmd_offset(from_dir, from_address);
 815 /* is there a mid-directory at from? */
 816         if (pmd_none(*from_middle))
 817                 return 0;
 818         if (pmd_bad(*from_middle)) {
 819                 printk("try_to_share: bad mid directory %08lx\n", pmd_val(*from_middle));
 820                 pmd_clear(from_middle);
 821                 return 0;
 822         }
 823         from_table = pte_offset(from_middle, from_address);
 824         from = *from_table;
 825 /* is the page present? */
 826         if (!pte_present(from))
 827                 return 0;
 828 /* if it is dirty it must be from a shared mapping to be shared */
 829         if (pte_dirty(from)) {
 830                 if (!(from_area->vm_flags & VM_SHARED))
 831                         return 0;
 832         }
 833 /* is the page reasonable at all? */
 834         if (pte_page(from) >= high_memory)
 835                 return 0;
 836         if (mem_map[MAP_NR(pte_page(from))] & MAP_PAGE_RESERVED)
 837                 return 0;
 838 /* is the destination ok? */
 839         to_dir = pgd_offset(to_area->vm_task,to_address);
 840 /* is there a page-directory at to? */
 841         if (pgd_none(*to_dir))
 842                 return 0;
 843         if (pgd_bad(*to_dir)) {
 844                 printk("try_to_share: bad page directory %08lx\n", pgd_val(*to_dir));
 845                 return 0;
 846         }
 847         to_middle = pmd_offset(to_dir, to_address);
 848 /* is there a mid-directory at to? */
 849         if (pmd_none(*to_middle))
 850                 return 0;
 851         if (pmd_bad(*to_middle)) {
 852                 printk("try_to_share: bad mid directory %08lx\n", pmd_val(*to_middle));
 853                 return 0;
 854         }
 855         to_table = pte_offset(to_middle, to_address);
 856         to = *to_table;
 857         if (!pte_none(to))
 858                 return 0;
 859 /* do we copy? */
 860         if (newpage) {
 861                 /* if it's in the swap cache, it's dirty by implication */
 862                 /* so we can't use it if it's not from a shared mapping */
 863                 if (in_swap_cache(pte_page(from))) {
 864                         if (!(from_area->vm_flags & VM_SHARED))
 865                                 return 0;
 866                 }
 867                 copy_page(pte_page(from), newpage);
 868                 *to_table = mk_pte(newpage, to_area->vm_page_prot);
 869                 return 1;
 870         }
 871 /*
 872  * do a final swap-cache test before sharing them: if it's in the swap
 873  * cache, we have to remove it now, as we get two pointers to the same
 874  * physical page and the cache can't handle it. Mark the original dirty.
 875  *
 876  * NOTE! Even if "from" is dirty, "to" will be clean: if we get here
 877  * with a dirty "from", the from-mapping is a shared map, so we can trust
 878  * the page contents to be up-to-date
 879  */
 880         if (in_swap_cache(pte_page(from))) {
 881                 if (!(from_area->vm_flags & VM_SHARED))
 882                         return 0;
 883                 *from_table = pte_mkdirty(from);
 884                 delete_from_swap_cache(pte_page(from));
 885         }
 886         mem_map[MAP_NR(pte_page(from))]++;
 887         *to_table = mk_pte(pte_page(from), to_area->vm_page_prot);
 888 /* Check if we need to do anything at all to the 'from' field */
 889         if (!pte_write(from))
 890                 return 1;
 891         if (from_area->vm_flags & VM_SHARED)
 892                 return 1;
 893 /* ok, need to mark it read-only, so invalidate any possible old TB entry */
 894         *from_table = pte_wrprotect(from);
 895         invalidate();
 896         return 1;
 897 }
 898 
 899 /*
 900  * share_page() tries to find a process that could share a page with
 901  * the current one.
 902  *
 903  * We first check if it is at all feasible by checking inode->i_count.
 904  * It should be >1 if there are other tasks sharing this inode.
 905  */
 906 static int share_page(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 907         int write_access, unsigned long newpage)
 908 {
 909         struct inode * inode;
 910         unsigned long offset;
 911         unsigned long from_address;
 912         unsigned long give_page;
 913         struct vm_area_struct * mpnt;
 914 
 915         if (!area || !(inode = area->vm_inode) || inode->i_count < 2)
 916                 return 0;
 917         /* do we need to copy or can we just share? */
 918         give_page = 0;
 919         if (write_access && !(area->vm_flags & VM_SHARED)) {
 920                 if (!newpage)
 921                         return 0;
 922                 give_page = newpage;
 923         }
 924         offset = address - area->vm_start + area->vm_offset;
 925         /* See if there is something in the VM we can share pages with. */
 926         /* Traverse the entire circular i_mmap list, except `area' itself. */
 927         for (mpnt = area->vm_next_share; mpnt != area; mpnt = mpnt->vm_next_share) {
 928                 /* must be same inode */
 929                 if (mpnt->vm_inode != inode) {
 930                         printk("Aiee! Corrupt vm_area_struct i_mmap ring\n");
 931                         break;  
 932                 }
 933                 /* offsets must be mutually page-aligned */
 934                 if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK)
 935                         continue;
 936                 /* the other area must actually cover the wanted page.. */
 937                 from_address = offset + mpnt->vm_start - mpnt->vm_offset;
 938                 if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end)
 939                         continue;
 940                 /* .. NOW we can actually try to use the same physical page */
 941                 if (!try_to_share(address, area, from_address, mpnt, give_page))
 942                         continue;
 943                 /* free newpage if we never used it.. */
 944                 if (give_page || !newpage)
 945                         return 1;
 946                 free_page(newpage);
 947                 return 1;
 948         }
 949         return 0;
 950 }
 951 
 952 /*
 953  * fill in an empty page-table if none exists.
 954  */
 955 static inline pte_t * get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 956 {
 957         pgd_t *pgd;
 958         pmd_t *pmd;
 959         pte_t *pte;
 960 
 961         pgd = pgd_offset(tsk, address);
 962         pmd = pmd_alloc(pgd, address);
 963         if (!pmd) {
 964                 oom(tsk);
 965                 return NULL;
 966         }
 967         pte = pte_alloc(pmd, address);
 968         if (!pte) {
 969                 oom(tsk);
 970                 return NULL;
 971         }
 972         return pte;
 973 }
 974 
 975 static inline void do_swap_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 976         pte_t * page_table, pte_t entry, int write_access)
 977 {
 978         pte_t page;
 979 
 980         if (!vma->vm_ops || !vma->vm_ops->swapin) {
 981                 swap_in(vma, page_table, pte_val(entry), write_access);
 982                 return;
 983         }
 984         page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
 985         if (pte_val(*page_table) != pte_val(entry)) {
 986                 free_page(pte_page(page));
 987                 return;
 988         }
 989         if (mem_map[MAP_NR(pte_page(page))] > 1 && !(vma->vm_flags & VM_SHARED))
 990                 page = pte_wrprotect(page);
 991         ++vma->vm_task->mm->rss;
 992         ++vma->vm_task->mm->maj_flt;
 993         *page_table = page;
 994         return;
 995 }
 996 
 997 /*
 998  * do_no_page() tries to create a new page mapping. It aggressively
 999  * tries to share with existing pages, but makes a separate copy if
1000  * the "write_access" parameter is true in order to avoid the next
1001  * page fault.
1002  */
1003 void do_no_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1004         int write_access)
1005 {
1006         pte_t * page_table;
1007         pte_t entry;
1008         unsigned long page;
1009 
1010         page_table = get_empty_pgtable(vma->vm_task,address);
1011         if (!page_table)
1012                 return;
1013         entry = *page_table;
1014         if (pte_present(entry))
1015                 return;
1016         if (!pte_none(entry)) {
1017                 do_swap_page(vma, address, page_table, entry, write_access);
1018                 return;
1019         }
1020         address &= PAGE_MASK;
1021         if (!vma->vm_ops || !vma->vm_ops->nopage) {
1022                 ++vma->vm_task->mm->rss;
1023                 ++vma->vm_task->mm->min_flt;
1024                 get_empty_page(vma, page_table);
1025                 return;
1026         }
1027         page = __get_free_page(GFP_KERNEL);
1028         if (share_page(vma, address, write_access, page)) {
1029                 ++vma->vm_task->mm->min_flt;
1030                 ++vma->vm_task->mm->rss;
1031                 return;
1032         }
1033         if (!page) {
1034                 oom(current);
1035                 put_page(page_table, BAD_PAGE);
1036                 return;
1037         }
1038         ++vma->vm_task->mm->maj_flt;
1039         ++vma->vm_task->mm->rss;
1040         /*
1041          * The fourth argument is "no_share", which tells the low-level code
1042          * to copy, not share the page even if sharing is possible.  It's
1043          * essentially an early COW detection 
1044          */
1045         page = vma->vm_ops->nopage(vma, address, page,
1046                 write_access && !(vma->vm_flags & VM_SHARED));
1047         if (share_page(vma, address, write_access, 0)) {
1048                 free_page(page);
1049                 return;
1050         }
1051         /*
1052          * This silly early PAGE_DIRTY setting removes a race
1053          * due to the bad i386 page protection. But it's valid
1054          * for other architectures too.
1055          *
1056          * Note that if write_access is true, we either now have
1057          * a exclusive copy of the page, or this is a shared mapping,
1058          * so we can make it writable and dirty to avoid having to
1059          * handle that later.
1060          */
1061         entry = mk_pte(page, vma->vm_page_prot);
1062         if (write_access) {
1063                 entry = pte_mkwrite(pte_mkdirty(entry));
1064         } else if (mem_map[MAP_NR(page)] > 1 && !(vma->vm_flags & VM_SHARED))
1065                 entry = pte_wrprotect(entry);
1066         put_page(page_table, entry);
1067 }
1068 
1069 /*
1070  * The above separate functions for the no-page and wp-page
1071  * cases will go away (they mostly do the same thing anyway),
1072  * and we'll instead use only a general "handle_mm_fault()".
1073  *
1074  * These routines also need to handle stuff like marking pages dirty
1075  * and/or accessed for architectures that don't do it in hardware (most
1076  * RISC architectures).  The early dirtying is also good on the i386.
1077  *
1078  * There is also a hook called "update_mmu_cache()" that architectures
1079  * with external mmu caches can use to update those (ie the Sparc or
1080  * PowerPC hashed page tables that act as extended TLBs).
1081  */
1082 static inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1083         int write_access, pte_t * pte)
1084 {
1085         if (!pte_present(*pte)) {
1086                 do_no_page(vma, address, write_access);
1087                 return;
1088         }
1089         *pte = pte_mkyoung(*pte);
1090         if (!write_access)
1091                 return;
1092         if (pte_write(*pte)) {
1093                 *pte = pte_mkdirty(*pte);
1094                 return;
1095         }
1096         do_wp_page(vma, address, write_access);
1097 }
1098 
1099 void handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1100         int write_access)
1101 {
1102         pgd_t *pgd;
1103         pmd_t *pmd;
1104         pte_t *pte;
1105 
1106         pgd = pgd_offset(vma->vm_task, address);
1107         pmd = pmd_alloc(pgd, address);
1108         if (!pmd)
1109                 goto no_memory;
1110         pte = pte_alloc(pmd, address);
1111         if (!pte)
1112                 goto no_memory;
1113         handle_pte_fault(vma, address, write_access, pte);
1114         update_mmu_cache(vma, address, *pte);
1115         return;
1116 no_memory:
1117         oom(vma->vm_task);
1118 }

/* [previous][next][first][last][top][bottom][index][help] */