root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. __do_wp_page
  13. do_wp_page
  14. __verify_write
  15. get_empty_page
  16. try_to_share
  17. share_page
  18. get_empty_pgtable
  19. do_no_page
  20. do_page_fault
  21. __bad_pagetable
  22. __bad_page
  23. __zero_page
  24. show_mem
  25. paging_init
  26. mem_init
  27. si_meminfo
  28. file_mmap_nopage
  29. file_mmap_free
  30. file_mmap_share

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 #include <asm/system.h>
  32 #include <linux/config.h>
  33 
  34 #include <linux/signal.h>
  35 #include <linux/sched.h>
  36 #include <linux/head.h>
  37 #include <linux/kernel.h>
  38 #include <linux/errno.h>
  39 #include <linux/string.h>
  40 #include <linux/types.h>
  41 #include <linux/ptrace.h>
  42 #include <linux/mman.h>
  43 
  44 unsigned long high_memory = 0;
  45 
  46 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  47 
  48 extern void sound_mem_init(void);
  49 extern void die_if_kernel(char *,struct pt_regs *,long);
  50 
  51 int nr_swap_pages = 0;
  52 int nr_free_pages = 0;
  53 unsigned long free_page_list = 0;
  54 /*
  55  * The secondary free_page_list is used for malloc() etc things that
  56  * may need pages during interrupts etc. Normal get_free_page() operations
  57  * don't touch it, so it stays as a kind of "panic-list", that can be
  58  * accessed when all other mm tricks have failed.
  59  */
  60 int nr_secondary_pages = 0;
  61 unsigned long secondary_page_list = 0;
  62 
  63 #define copy_page(from,to) \
  64 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
  65 
  66 unsigned short * mem_map = NULL;
  67 
  68 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  69 
  70 /*
  71  * oom() prints a message (so that the user knows why the process died),
  72  * and gives the process an untrappable SIGSEGV.
  73  */
  74 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  75 {
  76         printk("\nout of memory\n");
  77         task->sigaction[SIGKILL-1].sa_handler = NULL;
  78         task->blocked &= ~(1<<(SIGKILL-1));
  79         send_sig(SIGKILL,task,1);
  80 }
  81 
  82 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  83 {
  84         int j;
  85         unsigned long pg_table = *page_dir;
  86         unsigned long * page_table;
  87 
  88         if (!pg_table)
  89                 return;
  90         *page_dir = 0;
  91         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
  92                 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
  93                 return;
  94         }
  95         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
  96                 return;
  97         page_table = (unsigned long *) (pg_table & PAGE_MASK);
  98         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
  99                 unsigned long pg = *page_table;
 100                 
 101                 if (!pg)
 102                         continue;
 103                 *page_table = 0;
 104                 if (pg & PAGE_PRESENT)
 105                         free_page(PAGE_MASK & pg);
 106                 else
 107                         swap_free(pg);
 108         }
 109         free_page(PAGE_MASK & pg_table);
 110 }
 111 
 112 /*
 113  * This function clears all user-level page tables of a process - this
 114  * is needed by execve(), so that old pages aren't in the way. Note that
 115  * unlike 'free_page_tables()', this function still leaves a valid
 116  * page-table-tree in memory: it just removes the user pages. The two
 117  * functions are similar, but there is a fundamental difference.
 118  */
 119 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 120 {
 121         int i;
 122         unsigned long pg_dir;
 123         unsigned long * page_dir;
 124 
 125         if (!tsk)
 126                 return;
 127         if (tsk == task[0])
 128                 panic("task[0] (swapper) doesn't support exec()\n");
 129         pg_dir = tsk->tss.cr3;
 130         page_dir = (unsigned long *) pg_dir;
 131         if (!page_dir || page_dir == swapper_pg_dir) {
 132                 printk("Trying to clear kernel page-directory: not good\n");
 133                 return;
 134         }
 135         if (mem_map[MAP_NR(pg_dir)] > 1) {
 136                 unsigned long * new_pg;
 137 
 138                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 139                         oom(tsk);
 140                         return;
 141                 }
 142                 for (i = 768 ; i < 1024 ; i++)
 143                         new_pg[i] = page_dir[i];
 144                 free_page(pg_dir);
 145                 tsk->tss.cr3 = (unsigned long) new_pg;
 146                 return;
 147         }
 148         for (i = 0 ; i < 768 ; i++,page_dir++)
 149                 free_one_table(page_dir);
 150         invalidate();
 151         return;
 152 }
 153 
 154 /*
 155  * This function frees up all page tables of a process when it exits.
 156  */
 157 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 {
 159         int i;
 160         unsigned long pg_dir;
 161         unsigned long * page_dir;
 162 
 163         if (!tsk)
 164                 return;
 165         if (tsk == task[0]) {
 166                 printk("task[0] (swapper) killed: unable to recover\n");
 167                 panic("Trying to free up swapper memory space");
 168         }
 169         pg_dir = tsk->tss.cr3;
 170         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 171                 printk("Trying to free kernel page-directory: not good\n");
 172                 return;
 173         }
 174         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 175         if (tsk == current)
 176                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 177         if (mem_map[MAP_NR(pg_dir)] > 1) {
 178                 free_page(pg_dir);
 179                 return;
 180         }
 181         page_dir = (unsigned long *) pg_dir;
 182         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 183                 free_one_table(page_dir);
 184         free_page(pg_dir);
 185         invalidate();
 186 }
 187 
 188 /*
 189  * clone_page_tables() clones the page table for a process - both
 190  * processes will have the exact same pages in memory. There are
 191  * probably races in the memory management with cloning, but we'll
 192  * see..
 193  */
 194 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         unsigned long pg_dir;
 197 
 198         pg_dir = current->tss.cr3;
 199         mem_map[MAP_NR(pg_dir)]++;
 200         tsk->tss.cr3 = pg_dir;
 201         return 0;
 202 }
 203 
 204 /*
 205  * copy_page_tables() just copies the whole process memory range:
 206  * note the special handling of RESERVED (ie kernel) pages, which
 207  * means that they are always shared by all processes.
 208  */
 209 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         int i;
 212         unsigned long old_pg_dir, *old_page_dir;
 213         unsigned long new_pg_dir, *new_page_dir;
 214 
 215         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 216                 return -ENOMEM;
 217         old_pg_dir = current->tss.cr3;
 218         tsk->tss.cr3 = new_pg_dir;
 219         old_page_dir = (unsigned long *) old_pg_dir;
 220         new_page_dir = (unsigned long *) new_pg_dir;
 221         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 222                 int j;
 223                 unsigned long old_pg_table, *old_page_table;
 224                 unsigned long new_pg_table, *new_page_table;
 225 
 226                 old_pg_table = *old_page_dir;
 227                 if (!old_pg_table)
 228                         continue;
 229                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 230                         printk("copy_page_tables: bad page table: "
 231                                 "probable memory corruption");
 232                         *old_page_dir = 0;
 233                         continue;
 234                 }
 235                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 236                         *new_page_dir = old_pg_table;
 237                         continue;
 238                 }
 239                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 240                         free_page_tables(tsk);
 241                         return -ENOMEM;
 242                 }
 243                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 244                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 245                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 246                         unsigned long pg;
 247                         pg = *old_page_table;
 248                         if (!pg)
 249                                 continue;
 250                         if (!(pg & PAGE_PRESENT)) {
 251                                 *new_page_table = swap_duplicate(pg);
 252                                 continue;
 253                         }
 254                         if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
 255                                 pg &= ~PAGE_RW;
 256                         *new_page_table = pg;
 257                         if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
 258                                 continue;
 259                         *old_page_table = pg;
 260                         mem_map[MAP_NR(pg)]++;
 261                 }
 262                 *new_page_dir = new_pg_table | PAGE_TABLE;
 263         }
 264         invalidate();
 265         return 0;
 266 }
 267 
 268 /*
 269  * a more complete version of free_page_tables which performs with page
 270  * granularity.
 271  */
 272 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 273 {
 274         unsigned long page, page_dir;
 275         unsigned long *page_table, *dir;
 276         unsigned long poff, pcnt, pc;
 277 
 278         if (from & ~PAGE_MASK) {
 279                 printk("unmap_page_range called with wrong alignment\n");
 280                 return -EINVAL;
 281         }
 282         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 283         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 284         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 285         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 286                 pcnt = size;
 287 
 288         for ( ; size > 0; ++dir, size -= pcnt,
 289              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 290                 if (!(page_dir = *dir)) {
 291                         poff = 0;
 292                         continue;
 293                 }
 294                 if (!(page_dir & PAGE_PRESENT)) {
 295                         printk("unmap_page_range: bad page directory.");
 296                         continue;
 297                 }
 298                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 299                 if (poff) {
 300                         page_table += poff;
 301                         poff = 0;
 302                 }
 303                 for (pc = pcnt; pc--; page_table++) {
 304                         if ((page = *page_table) != 0) {
 305                                 *page_table = 0;
 306                                 if (1 & page) {
 307                                         if (!(mem_map[MAP_NR(page)]
 308                                               & MAP_PAGE_RESERVED))
 309                                                 --current->rss;
 310                                         free_page(PAGE_MASK & page);
 311                                 } else
 312                                         swap_free(page);
 313                         }
 314                 }
 315                 if (pcnt == PTRS_PER_PAGE) {
 316                         *dir = 0;
 317                         free_page(PAGE_MASK & page_dir);
 318                 }
 319         }
 320         invalidate();
 321         return 0;
 322 }
 323 
 324 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 325 {
 326         unsigned long *page_table, *dir;
 327         unsigned long poff, pcnt;
 328         unsigned long page;
 329 
 330         if (mask) {
 331                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 332                         printk("zeromap_page_range: mask = %08x\n",mask);
 333                         return -EINVAL;
 334                 }
 335                 mask |= ZERO_PAGE;
 336         }
 337         if (from & ~PAGE_MASK) {
 338                 printk("zeromap_page_range: from = %08lx\n",from);
 339                 return -EINVAL;
 340         }
 341         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 342         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 343         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 344         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 345                 pcnt = size;
 346 
 347         while (size > 0) {
 348                 if (!(PAGE_PRESENT & *dir)) {
 349                                 /* clear page needed here?  SRB. */
 350                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 351                                 invalidate();
 352                                 return -ENOMEM;
 353                         }
 354                         if (PAGE_PRESENT & *dir) {
 355                                 free_page((unsigned long) page_table);
 356                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 357                         } else
 358                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 359                 } else
 360                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 361                 page_table += poff;
 362                 poff = 0;
 363                 for (size -= pcnt; pcnt-- ;) {
 364                         if ((page = *page_table) != 0) {
 365                                 *page_table = 0;
 366                                 if (page & PAGE_PRESENT) {
 367                                         if (!(mem_map[MAP_NR(page)]
 368                                               & MAP_PAGE_RESERVED))
 369                                                 --current->rss;
 370                                         free_page(PAGE_MASK & page);
 371                                 } else
 372                                         swap_free(page);
 373                         }
 374                         *page_table++ = mask;
 375                 }
 376                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 377         }
 378         invalidate();
 379         return 0;
 380 }
 381 
 382 /*
 383  * maps a range of physical memory into the requested pages. the old
 384  * mappings are removed. any references to nonexistent pages results
 385  * in null mappings (currently treated as "copy-on-access")
 386  */
 387 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 388 {
 389         unsigned long *page_table, *dir;
 390         unsigned long poff, pcnt;
 391         unsigned long page;
 392 
 393         if (mask) {
 394                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 395                         printk("remap_page_range: mask = %08x\n",mask);
 396                         return -EINVAL;
 397                 }
 398         }
 399         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 400                 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
 401                 return -EINVAL;
 402         }
 403         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 404         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 405         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 406         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 407                 pcnt = size;
 408 
 409         while (size > 0) {
 410                 if (!(PAGE_PRESENT & *dir)) {
 411                         /* clearing page here, needed?  SRB. */
 412                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 413                                 invalidate();
 414                                 return -1;
 415                         }
 416                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 417                 }
 418                 else
 419                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 420                 if (poff) {
 421                         page_table += poff;
 422                         poff = 0;
 423                 }
 424 
 425                 for (size -= pcnt; pcnt-- ;) {
 426                         if ((page = *page_table) != 0) {
 427                                 *page_table = 0;
 428                                 if (PAGE_PRESENT & page) {
 429                                         if (!(mem_map[MAP_NR(page)]
 430                                               & MAP_PAGE_RESERVED))
 431                                                 --current->rss;
 432                                         free_page(PAGE_MASK & page);
 433                                 } else
 434                                         swap_free(page);
 435                         }
 436 
 437                         /*
 438                          * the first condition should return an invalid access
 439                          * when the page is referenced. current assumptions
 440                          * cause it to be treated as demand allocation in some
 441                          * cases.
 442                          */
 443                         if (!mask)
 444                                 *page_table++ = 0;      /* not present */
 445                         else if (to >= high_memory)
 446                                 *page_table++ = (to | mask);
 447                         else if (!mem_map[MAP_NR(to)])
 448                                 *page_table++ = 0;      /* not present */
 449                         else {
 450                                 *page_table++ = (to | mask);
 451                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 452                                         ++current->rss;
 453                                         mem_map[MAP_NR(to)]++;
 454                                 }
 455                         }
 456                         to += PAGE_SIZE;
 457                 }
 458                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 459         }
 460         invalidate();
 461         return 0;
 462 }
 463 
 464 /*
 465  * This function puts a page in memory at the wanted address.
 466  * It returns the physical address of the page gotten, 0 if
 467  * out of memory (either when trying to access page-table or
 468  * page.)
 469  */
 470 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 471         unsigned long address,int prot)
 472 {
 473         unsigned long *page_table;
 474 
 475         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 476                 printk("put_page: prot = %08x\n",prot);
 477         if (page >= high_memory) {
 478                 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
 479                 return 0;
 480         }
 481         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 482         if ((*page_table) & PAGE_PRESENT)
 483                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 484         else {
 485                 printk("put_page: bad page directory entry\n");
 486                 oom(tsk);
 487                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 488                 return 0;
 489         }
 490         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 491         if (*page_table) {
 492                 printk("put_page: page already exists\n");
 493                 *page_table = 0;
 494                 invalidate();
 495         }
 496         *page_table = page | prot;
 497 /* no need for invalidate */
 498         return page;
 499 }
 500 
 501 /*
 502  * The previous function doesn't work very well if you also want to mark
 503  * the page dirty: exec.c wants this, as it has earlier changed the page,
 504  * and we want the dirty-status to be correct (for VM). Thus the same
 505  * routine, but this time we mark it dirty too.
 506  */
 507 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 508 {
 509         unsigned long tmp, *page_table;
 510 
 511         if (page >= high_memory)
 512                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 513         if (mem_map[MAP_NR(page)] != 1)
 514                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 515         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 516         if (PAGE_PRESENT & *page_table)
 517                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 518         else {
 519                 if (!(tmp = get_free_page(GFP_KERNEL)))
 520                         return 0;
 521                 if (PAGE_PRESENT & *page_table) {
 522                         free_page(tmp);
 523                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 524                 } else {
 525                         *page_table = tmp | PAGE_TABLE;
 526                         page_table = (unsigned long *) tmp;
 527                 }
 528         }
 529         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 530         if (*page_table) {
 531                 printk("put_dirty_page: page already exists\n");
 532                 *page_table = 0;
 533                 invalidate();
 534         }
 535         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 536 /* no need for invalidate */
 537         return page;
 538 }
 539 
 540 /*
 541  * This routine handles present pages, when users try to write
 542  * to a shared page. It is done by copying the page to a new address
 543  * and decrementing the shared-page counter for the old page.
 544  *
 545  * Note that we do many checks twice (look at do_wp_page()), as
 546  * we have to be careful about race-conditions.
 547  *
 548  * Goto-purists beware: the only reason for goto's here is that it results
 549  * in better assembly code.. The "default" path will see no jumps at all.
 550  */
 551 static void __do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 552         struct task_struct * tsk, unsigned long user_esp)
 553 {
 554         unsigned long *pde, pte, old_page, prot;
 555         unsigned long new_page;
 556 
 557         new_page = __get_free_page(GFP_KERNEL);
 558         pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 559         pte = *pde;
 560         if (!(pte & PAGE_PRESENT))
 561                 goto end_wp_page;
 562         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 563                 goto bad_wp_pagetable;
 564         pte &= PAGE_MASK;
 565         pte += PAGE_PTR(address);
 566         old_page = *(unsigned long *) pte;
 567         if (!(old_page & PAGE_PRESENT))
 568                 goto end_wp_page;
 569         if (old_page >= high_memory)
 570                 goto bad_wp_page;
 571         if (old_page & PAGE_RW)
 572                 goto end_wp_page;
 573         tsk->min_flt++;
 574         prot = (old_page & ~PAGE_MASK) | PAGE_RW;
 575         old_page &= PAGE_MASK;
 576         if (mem_map[MAP_NR(old_page)] != 1) {
 577                 if (new_page) {
 578                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 579                                 ++tsk->rss;
 580                         copy_page(old_page,new_page);
 581                         *(unsigned long *) pte = new_page | prot;
 582                         free_page(old_page);
 583                         invalidate();
 584                         return;
 585                 }
 586                 free_page(old_page);
 587                 oom(tsk);
 588                 *(unsigned long *) pte = BAD_PAGE | prot;
 589                 invalidate();
 590                 return;
 591         }
 592         *(unsigned long *) pte |= PAGE_RW;
 593         invalidate();
 594         if (new_page)
 595                 free_page(new_page);
 596         return;
 597 bad_wp_page:
 598         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 599         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 600         send_sig(SIGKILL, tsk, 1);
 601         goto end_wp_page;
 602 bad_wp_pagetable:
 603         printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
 604         *pde = BAD_PAGETABLE | PAGE_TABLE;
 605         send_sig(SIGKILL, tsk, 1);
 606 end_wp_page:
 607         if (new_page)
 608                 free_page(new_page);
 609         return;
 610 }
 611 
 612 /*
 613  * check that a page table change is actually needed, and call
 614  * the low-level function only in that case..
 615  */
 616 void do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 617         struct task_struct * tsk, unsigned long user_esp)
 618 {
 619         unsigned long page;
 620         unsigned long * pg_table;
 621 
 622         pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 623         page = *pg_table;
 624         if (!page)
 625                 return;
 626         if ((page & PAGE_PRESENT) && page < high_memory) {
 627                 pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
 628                 page = *pg_table;
 629                 if (!(page & PAGE_PRESENT))
 630                         return;
 631                 if (page & PAGE_RW)
 632                         return;
 633                 if (!(page & PAGE_COW)) {
 634                         if (user_esp && tsk == current) {
 635                                 current->tss.cr2 = address;
 636                                 current->tss.error_code = error_code;
 637                                 current->tss.trap_no = 14;
 638                                 send_sig(SIGSEGV, tsk, 1);
 639                                 return;
 640                         }
 641                 }
 642                 if (mem_map[MAP_NR(page)] == 1) {
 643                         *pg_table |= PAGE_RW | PAGE_DIRTY;
 644                         invalidate();
 645                         return;
 646                 }
 647                 __do_wp_page(error_code, address, tsk, user_esp);
 648                 return;
 649         }
 650         printk("bad page directory entry %08lx\n",page);
 651         *pg_table = 0;
 652 }
 653 
 654 int __verify_write(unsigned long start, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 655 {
 656         size--;
 657         size += start & ~PAGE_MASK;
 658         size >>= PAGE_SHIFT;
 659         start &= PAGE_MASK;
 660         do {
 661                 do_wp_page(1,start,current,0);
 662                 start += PAGE_SIZE;
 663         } while (size--);
 664         return 0;
 665 }
 666 
 667 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 668 {
 669         unsigned long tmp;
 670 
 671         if (!(tmp = get_free_page(GFP_KERNEL))) {
 672                 oom(tsk);
 673                 tmp = BAD_PAGE;
 674         }
 675         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 676                 free_page(tmp);
 677 }
 678 
 679 /*
 680  * try_to_share() checks the page at address "address" in the task "p",
 681  * to see if it exists, and if it is clean. If so, share it with the current
 682  * task.
 683  *
 684  * NOTE! This assumes we have checked that p != current, and that they
 685  * share the same executable or library.
 686  *
 687  * We may want to fix this to allow page sharing for PIC pages at different
 688  * addresses so that ELF will really perform properly. As long as the vast
 689  * majority of sharable libraries load at fixed addresses this is not a
 690  * big concern. Any sharing of pages between the buffer cache and the
 691  * code space reduces the need for this as well.  - ERY
 692  */
 693 static int try_to_share(unsigned long address, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 694         struct task_struct * p, unsigned long error_code, unsigned long newpage)
 695 {
 696         unsigned long from;
 697         unsigned long to;
 698         unsigned long from_page;
 699         unsigned long to_page;
 700 
 701         from_page = (unsigned long)PAGE_DIR_OFFSET(p->tss.cr3,address);
 702         to_page = (unsigned long)PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 703 /* is there a page-directory at from? */
 704         from = *(unsigned long *) from_page;
 705         if (!(from & PAGE_PRESENT))
 706                 return 0;
 707         from &= PAGE_MASK;
 708         from_page = from + PAGE_PTR(address);
 709         from = *(unsigned long *) from_page;
 710 /* is the page clean and present? */
 711         if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
 712                 return 0;
 713         if (from >= high_memory)
 714                 return 0;
 715         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 716                 return 0;
 717 /* is the destination ok? */
 718         to = *(unsigned long *) to_page;
 719         if (!(to & PAGE_PRESENT))
 720                 return 0;
 721         to &= PAGE_MASK;
 722         to_page = to + PAGE_PTR(address);
 723         if (*(unsigned long *) to_page)
 724                 return 0;
 725 /* share them if read - do COW immediately otherwise */
 726         if (error_code & PAGE_RW) {
 727                 if(!newpage)    /* did the page exist?  SRB. */
 728                         return 0;
 729                 copy_page((from & PAGE_MASK),newpage);
 730                 to = newpage | PAGE_PRIVATE;
 731         } else {
 732                 mem_map[MAP_NR(from)]++;
 733                 from &= ~PAGE_RW;
 734                 to = from;
 735                 if(newpage)     /* only if it existed. SRB. */
 736                         free_page(newpage);
 737         }
 738         *(unsigned long *) from_page = from;
 739         *(unsigned long *) to_page = to;
 740         invalidate();
 741         return 1;
 742 }
 743 
 744 /*
 745  * share_page() tries to find a process that could share a page with
 746  * the current one. Address is the address of the wanted page relative
 747  * to the current data space.
 748  *
 749  * We first check if it is at all feasible by checking executable->i_count.
 750  * It should be >1 if there are other tasks sharing this inode.
 751  */
 752 int share_page(struct vm_area_struct * area, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 753         struct inode * inode,
 754         unsigned long address, unsigned long error_code, unsigned long newpage)
 755 {
 756         struct task_struct ** p;
 757 
 758         if (!inode || inode->i_count < 2 || !area->vm_ops)
 759                 return 0;
 760         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 761                 if (!*p)
 762                         continue;
 763                 if (tsk == *p)
 764                         continue;
 765                 if (inode != (*p)->executable) {
 766                           if(!area) continue;
 767                         /* Now see if there is something in the VMM that
 768                            we can share pages with */
 769                         if(area){
 770                           struct vm_area_struct * mpnt;
 771                           for (mpnt = (*p)->mmap; mpnt; mpnt = mpnt->vm_next) {
 772                             if (mpnt->vm_ops == area->vm_ops &&
 773                                mpnt->vm_inode->i_ino == area->vm_inode->i_ino&&
 774                                mpnt->vm_inode->i_dev == area->vm_inode->i_dev){
 775                               if (mpnt->vm_ops->share(mpnt, area, address))
 776                                 break;
 777                             };
 778                           };
 779                           if (!mpnt) continue;  /* Nope.  Nuthin here */
 780                         };
 781                 }
 782                 if (try_to_share(address,tsk,*p,error_code,newpage))
 783                         return 1;
 784         }
 785         return 0;
 786 }
 787 
 788 /*
 789  * fill in an empty page-table if none exists.
 790  */
 791 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 792 {
 793         unsigned long page;
 794         unsigned long *p;
 795 
 796         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 797         if (PAGE_PRESENT & *p)
 798                 return *p;
 799         if (*p) {
 800                 printk("get_empty_pgtable: bad page-directory entry \n");
 801                 *p = 0;
 802         }
 803         page = get_free_page(GFP_KERNEL);
 804         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 805         if (PAGE_PRESENT & *p) {
 806                 free_page(page);
 807                 return *p;
 808         }
 809         if (*p) {
 810                 printk("get_empty_pgtable: bad page-directory entry \n");
 811                 *p = 0;
 812         }
 813         if (page) {
 814                 *p = page | PAGE_TABLE;
 815                 return *p;
 816         }
 817         oom(current);
 818         *p = BAD_PAGETABLE | PAGE_TABLE;
 819         return 0;
 820 }
 821 
 822 void do_no_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 823         struct task_struct *tsk, unsigned long user_esp)
 824 {
 825         unsigned long tmp;
 826         unsigned long page;
 827         struct vm_area_struct * mpnt;
 828 
 829         page = get_empty_pgtable(tsk,address);
 830         if (!page)
 831                 return;
 832         page &= PAGE_MASK;
 833         page += PAGE_PTR(address);
 834         tmp = *(unsigned long *) page;
 835         if (tmp & PAGE_PRESENT)
 836                 return;
 837         ++tsk->rss;
 838         if (tmp) {
 839                 ++tsk->maj_flt;
 840                 swap_in((unsigned long *) page);
 841                 return;
 842         }
 843         address &= 0xfffff000;
 844         tmp = 0;
 845         for (mpnt = tsk->mmap; mpnt != NULL; mpnt = mpnt->vm_next) {
 846                 if (address < mpnt->vm_start)
 847                         break;
 848                 if (address >= mpnt->vm_end) {
 849                         tmp = mpnt->vm_end;
 850                         continue;
 851                 }
 852                 if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) {
 853                         ++tsk->min_flt;
 854                         get_empty_page(tsk,address);
 855                         return;
 856                 }
 857                 mpnt->vm_ops->nopage(error_code, mpnt, address);
 858                 return;
 859         }
 860         if (tsk != current)
 861                 goto ok_no_page;
 862         if (address >= tsk->end_data && address < tsk->brk)
 863                 goto ok_no_page;
 864         if (mpnt && mpnt == tsk->stk_vma &&
 865             address - tmp > mpnt->vm_start - address &&
 866             tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) {
 867                 mpnt->vm_start = address;
 868                 goto ok_no_page;
 869         }
 870         tsk->tss.cr2 = address;
 871         current->tss.error_code = error_code;
 872         current->tss.trap_no = 14;
 873         send_sig(SIGSEGV,tsk,1);
 874         if (error_code & 4)     /* user level access? */
 875                 return;
 876 ok_no_page:
 877         ++tsk->min_flt;
 878         get_empty_page(tsk,address);
 879 }
 880 
 881 /*
 882  * This routine handles page faults.  It determines the address,
 883  * and the problem, and then passes it off to one of the appropriate
 884  * routines.
 885  */
 886 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 887 {
 888         unsigned long address;
 889         unsigned long user_esp = 0;
 890         unsigned int bit;
 891 
 892         /* get the address */
 893         __asm__("movl %%cr2,%0":"=r" (address));
 894         if (address < TASK_SIZE) {
 895                 if (error_code & 4) {   /* user mode access? */
 896                         if (regs->eflags & VM_MASK) {
 897                                 bit = (address - 0xA0000) >> PAGE_SHIFT;
 898                                 if (bit < 32)
 899                                         current->screen_bitmap |= 1 << bit;
 900                         } else 
 901                                 user_esp = regs->esp;
 902                 }
 903                 if (error_code & 1)
 904                         do_wp_page(error_code, address, current, user_esp);
 905                 else
 906                         do_no_page(error_code, address, current, user_esp);
 907                 return;
 908         }
 909         address -= TASK_SIZE;
 910         if (wp_works_ok < 0 && address == 0 && (error_code & PAGE_PRESENT)) {
 911                 wp_works_ok = 1;
 912                 pg0[0] = PAGE_SHARED;
 913                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
 914                 return;
 915         }
 916         if (address < PAGE_SIZE) {
 917                 printk("Unable to handle kernel NULL pointer dereference");
 918                 pg0[0] = PAGE_SHARED;
 919         } else
 920                 printk("Unable to handle kernel paging request");
 921         printk(" at address %08lx\n",address);
 922         die_if_kernel("Oops", regs, error_code);
 923         do_exit(SIGKILL);
 924 }
 925 
 926 /*
 927  * BAD_PAGE is the page that is used for page faults when linux
 928  * is out-of-memory. Older versions of linux just did a
 929  * do_exit(), but using this instead means there is less risk
 930  * for a process dying in kernel mode, possibly leaving a inode
 931  * unused etc..
 932  *
 933  * BAD_PAGETABLE is the accompanying page-table: it is initialized
 934  * to point to BAD_PAGE entries.
 935  *
 936  * ZERO_PAGE is a special page that is used for zero-initialized
 937  * data and COW.
 938  */
 939 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 940 {
 941         extern char empty_bad_page_table[PAGE_SIZE];
 942 
 943         __asm__ __volatile__("cld ; rep ; stosl":
 944                 :"a" (BAD_PAGE + PAGE_TABLE),
 945                  "D" ((long) empty_bad_page_table),
 946                  "c" (PTRS_PER_PAGE)
 947                 :"di","cx");
 948         return (unsigned long) empty_bad_page_table;
 949 }
 950 
 951 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 952 {
 953         extern char empty_bad_page[PAGE_SIZE];
 954 
 955         __asm__ __volatile__("cld ; rep ; stosl":
 956                 :"a" (0),
 957                  "D" ((long) empty_bad_page),
 958                  "c" (PTRS_PER_PAGE)
 959                 :"di","cx");
 960         return (unsigned long) empty_bad_page;
 961 }
 962 
 963 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 964 {
 965         extern char empty_zero_page[PAGE_SIZE];
 966 
 967         __asm__ __volatile__("cld ; rep ; stosl":
 968                 :"a" (0),
 969                  "D" ((long) empty_zero_page),
 970                  "c" (PTRS_PER_PAGE)
 971                 :"di","cx");
 972         return (unsigned long) empty_zero_page;
 973 }
 974 
 975 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 976 {
 977         int i,free = 0,total = 0,reserved = 0;
 978         int shared = 0;
 979 
 980         printk("Mem-info:\n");
 981         printk("Free pages:      %6dkB\n",nr_free_pages<<(PAGE_SHIFT-10));
 982         printk("Secondary pages: %6dkB\n",nr_secondary_pages<<(PAGE_SHIFT-10));
 983         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 984         printk("Buffer memory:   %6dkB\n",buffermem>>10);
 985         printk("Buffer heads:    %6d\n",nr_buffer_heads);
 986         printk("Buffer blocks:   %6d\n",nr_buffers);
 987         i = high_memory >> PAGE_SHIFT;
 988         while (i-- > 0) {
 989                 total++;
 990                 if (mem_map[i] & MAP_PAGE_RESERVED)
 991                         reserved++;
 992                 else if (!mem_map[i])
 993                         free++;
 994                 else
 995                         shared += mem_map[i]-1;
 996         }
 997         printk("%d pages of RAM\n",total);
 998         printk("%d free pages\n",free);
 999         printk("%d reserved pages\n",reserved);
1000         printk("%d pages shared\n",shared);
1001 }
1002 
1003 /*
1004  * paging_init() sets up the page tables - note that the first 4MB are
1005  * already mapped by head.S.
1006  *
1007  * This routines also unmaps the page at virtual kernel address 0, so
1008  * that we can trap those pesky NULL-reference errors in the kernel.
1009  */
1010 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1011 {
1012         unsigned long * pg_dir;
1013         unsigned long * pg_table;
1014         unsigned long tmp;
1015         unsigned long address;
1016 
1017 /*
1018  * Physical page 0 is special; it's not touched by Linux since BIOS
1019  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
1020  * and write protected to detect null pointer references in the
1021  * kernel.
1022  */
1023 #if 0
1024         memset((void *) 0, 0, PAGE_SIZE);
1025 #endif
1026         start_mem = PAGE_ALIGN(start_mem);
1027         address = 0;
1028         pg_dir = swapper_pg_dir;
1029         while (address < end_mem) {
1030                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1031                 if (!tmp) {
1032                         tmp = start_mem | PAGE_TABLE;
1033                         *(pg_dir + 768) = tmp;
1034                         start_mem += PAGE_SIZE;
1035                 }
1036                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1037                 pg_dir++;
1038                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1039                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1040                         if (address < end_mem)
1041                                 *pg_table = address | PAGE_SHARED;
1042                         else
1043                                 *pg_table = 0;
1044                         address += PAGE_SIZE;
1045                 }
1046         }
1047         invalidate();
1048         return start_mem;
1049 }
1050 
1051 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1052               unsigned long start_mem, unsigned long end_mem)
1053 {
1054         int codepages = 0;
1055         int reservedpages = 0;
1056         int datapages = 0;
1057         unsigned long tmp;
1058         unsigned short * p;
1059         extern int etext;
1060 
1061         cli();
1062         end_mem &= PAGE_MASK;
1063         high_memory = end_mem;
1064         start_mem +=  0x0000000f;
1065         start_mem &= ~0x0000000f;
1066         tmp = MAP_NR(end_mem);
1067         mem_map = (unsigned short *) start_mem;
1068         p = mem_map + tmp;
1069         start_mem = (unsigned long) p;
1070         while (p > mem_map)
1071                 *--p = MAP_PAGE_RESERVED;
1072         start_low_mem = PAGE_ALIGN(start_low_mem);
1073         start_mem = PAGE_ALIGN(start_mem);
1074         while (start_low_mem < 0xA0000) {
1075                 mem_map[MAP_NR(start_low_mem)] = 0;
1076                 start_low_mem += PAGE_SIZE;
1077         }
1078         while (start_mem < end_mem) {
1079                 mem_map[MAP_NR(start_mem)] = 0;
1080                 start_mem += PAGE_SIZE;
1081         }
1082 #ifdef CONFIG_SOUND
1083         sound_mem_init();
1084 #endif
1085         free_page_list = 0;
1086         nr_free_pages = 0;
1087         for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
1088                 if (mem_map[MAP_NR(tmp)]) {
1089                         if (tmp >= 0xA0000 && tmp < 0x100000)
1090                                 reservedpages++;
1091                         else if (tmp < (unsigned long) &etext)
1092                                 codepages++;
1093                         else
1094                                 datapages++;
1095                         continue;
1096                 }
1097                 *(unsigned long *) tmp = free_page_list;
1098                 free_page_list = tmp;
1099                 nr_free_pages++;
1100         }
1101         tmp = nr_free_pages << PAGE_SHIFT;
1102         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1103                 tmp >> 10,
1104                 end_mem >> 10,
1105                 codepages << (PAGE_SHIFT-10),
1106                 reservedpages << (PAGE_SHIFT-10),
1107                 datapages << (PAGE_SHIFT-10));
1108 /* test if the WP bit is honoured in supervisor mode */
1109         wp_works_ok = -1;
1110         pg0[0] = PAGE_READONLY;
1111         invalidate();
1112         __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1113         pg0[0] = 0;
1114         invalidate();
1115         if (wp_works_ok < 0)
1116                 wp_works_ok = 0;
1117         return;
1118 }
1119 
1120 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1121 {
1122         int i;
1123 
1124         i = high_memory >> PAGE_SHIFT;
1125         val->totalram = 0;
1126         val->freeram = 0;
1127         val->sharedram = 0;
1128         val->bufferram = buffermem;
1129         while (i-- > 0)  {
1130                 if (mem_map[i] & MAP_PAGE_RESERVED)
1131                         continue;
1132                 val->totalram++;
1133                 if (!mem_map[i]) {
1134                         val->freeram++;
1135                         continue;
1136                 }
1137                 val->sharedram += mem_map[i]-1;
1138         }
1139         val->totalram <<= PAGE_SHIFT;
1140         val->freeram <<= PAGE_SHIFT;
1141         val->sharedram <<= PAGE_SHIFT;
1142         return;
1143 }
1144 
1145 
1146 /* This handles a generic mmap of a disk file */
1147 void file_mmap_nopage(int error_code, struct vm_area_struct * area, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1148 {
1149         struct inode * inode = area->vm_inode;
1150         unsigned int block;
1151         unsigned long page;
1152         int nr[8];
1153         int i, j;
1154         int prot = area->vm_page_prot;
1155 
1156         address &= PAGE_MASK;
1157         block = address - area->vm_start + area->vm_offset;
1158         block >>= inode->i_sb->s_blocksize_bits;
1159 
1160         page = get_free_page(GFP_KERNEL);
1161         if (share_page(area, area->vm_task, inode, address, error_code, page)) {
1162                 ++area->vm_task->min_flt;
1163                 return;
1164         }
1165 
1166         ++area->vm_task->maj_flt;
1167         if (!page) {
1168                 oom(current);
1169                 put_page(area->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
1170                 return;
1171         }
1172         for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
1173                 nr[j] = bmap(inode,block);
1174         if (error_code & PAGE_RW)
1175                 prot |= PAGE_RW | PAGE_DIRTY;
1176         page = bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, prot);
1177 
1178         if (!(prot & PAGE_RW)) {
1179                 if (share_page(area, area->vm_task, inode, address, error_code, page))
1180                         return;
1181         }
1182         if (put_page(area->vm_task,page,address,prot))
1183                 return;
1184         free_page(page);
1185         oom(current);
1186 }
1187 
1188 void file_mmap_free(struct vm_area_struct * area)
     /* [previous][next][first][last][top][bottom][index][help] */
1189 {
1190         if (area->vm_inode)
1191                 iput(area->vm_inode);
1192 #if 0
1193         if (area->vm_inode)
1194                 printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev, 
1195                                  area->vm_inode->i_ino, area->vm_inode->i_count);
1196 #endif
1197 }
1198 
1199 /*
1200  * Compare the contents of the mmap entries, and decide if we are allowed to
1201  * share the pages
1202  */
1203 int file_mmap_share(struct vm_area_struct * area1, 
     /* [previous][next][first][last][top][bottom][index][help] */
1204                     struct vm_area_struct * area2, 
1205                     unsigned long address)
1206 {
1207         if (area1->vm_inode != area2->vm_inode)
1208                 return 0;
1209         if (area1->vm_start != area2->vm_start)
1210                 return 0;
1211         if (area1->vm_end != area2->vm_end)
1212                 return 0;
1213         if (area1->vm_offset != area2->vm_offset)
1214                 return 0;
1215         if (area1->vm_page_prot != area2->vm_page_prot)
1216                 return 0;
1217         return 1;
1218 }
1219 
1220 struct vm_operations_struct file_mmap = {
1221         NULL,                   /* open */
1222         file_mmap_free,         /* close */
1223         file_mmap_nopage,       /* nopage */
1224         NULL,                   /* wppage */
1225         file_mmap_share,        /* share */
1226         NULL,                   /* unmap */
1227 };

/* [previous][next][first][last][top][bottom][index][help] */