root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. __do_wp_page
  13. do_wp_page
  14. verify_area
  15. get_empty_page
  16. try_to_share
  17. share_page
  18. get_empty_pgtable
  19. do_no_page
  20. do_page_fault
  21. __bad_pagetable
  22. __bad_page
  23. __zero_page
  24. show_mem
  25. paging_init
  26. mem_init
  27. si_meminfo
  28. file_mmap_nopage
  29. file_mmap_free
  30. file_mmap_share

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 #include <asm/system.h>
  32 
  33 #include <linux/signal.h>
  34 #include <linux/sched.h>
  35 #include <linux/head.h>
  36 #include <linux/kernel.h>
  37 #include <linux/errno.h>
  38 #include <linux/string.h>
  39 #include <linux/types.h>
  40 #include <linux/ptrace.h>
  41 #include <linux/mman.h>
  42 
  43 unsigned long high_memory = 0;
  44 
  45 extern void sound_mem_init(void);
  46 extern void die_if_kernel(char *,struct pt_regs *,long);
  47 
  48 int nr_swap_pages = 0;
  49 int nr_free_pages = 0;
  50 unsigned long free_page_list = 0;
  51 /*
  52  * The secondary free_page_list is used for malloc() etc things that
  53  * may need pages during interrupts etc. Normal get_free_page() operations
  54  * don't touch it, so it stays as a kind of "panic-list", that can be
  55  * accessed when all other mm tricks have failed.
  56  */
  57 int nr_secondary_pages = 0;
  58 unsigned long secondary_page_list = 0;
  59 
  60 #define copy_page(from,to) \
  61 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
  62 
  63 unsigned short * mem_map = NULL;
  64 
  65 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  66 
  67 /*
  68  * oom() prints a message (so that the user knows why the process died),
  69  * and gives the process an untrappable SIGSEGV.
  70  */
  71 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73         printk("\nout of memory\n");
  74         task->sigaction[SIGKILL-1].sa_handler = NULL;
  75         task->blocked &= ~(1<<(SIGKILL-1));
  76         send_sig(SIGKILL,task,1);
  77 }
  78 
  79 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  80 {
  81         int j;
  82         unsigned long pg_table = *page_dir;
  83         unsigned long * page_table;
  84 
  85         if (!pg_table)
  86                 return;
  87         *page_dir = 0;
  88         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
  89                 printk("Bad page table: [%08x]=%08x\n",page_dir,pg_table);
  90                 return;
  91         }
  92         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
  93                 return;
  94         page_table = (unsigned long *) (pg_table & PAGE_MASK);
  95         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
  96                 unsigned long pg = *page_table;
  97                 
  98                 if (!pg)
  99                         continue;
 100                 *page_table = 0;
 101                 if (pg & PAGE_PRESENT)
 102                         free_page(PAGE_MASK & pg);
 103                 else
 104                         swap_free(pg);
 105         }
 106         free_page(PAGE_MASK & pg_table);
 107 }
 108 
 109 /*
 110  * This function clears all user-level page tables of a process - this
 111  * is needed by execve(), so that old pages aren't in the way. Note that
 112  * unlike 'free_page_tables()', this function still leaves a valid
 113  * page-table-tree in memory: it just removes the user pages. The two
 114  * functions are similar, but there is a fundamental difference.
 115  */
 116 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 117 {
 118         int i;
 119         unsigned long pg_dir;
 120         unsigned long * page_dir;
 121 
 122         if (!tsk)
 123                 return;
 124         if (tsk == task[0])
 125                 panic("task[0] (swapper) doesn't support exec()\n");
 126         pg_dir = tsk->tss.cr3;
 127         page_dir = (unsigned long *) pg_dir;
 128         if (!page_dir || page_dir == swapper_pg_dir) {
 129                 printk("Trying to clear kernel page-directory: not good\n");
 130                 return;
 131         }
 132         if (mem_map[MAP_NR(pg_dir)] > 1) {
 133                 unsigned long * new_pg;
 134 
 135                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 136                         oom(tsk);
 137                         return;
 138                 }
 139                 for (i = 768 ; i < 1024 ; i++)
 140                         new_pg[i] = page_dir[i];
 141                 free_page(pg_dir);
 142                 tsk->tss.cr3 = (unsigned long) new_pg;
 143                 return;
 144         }
 145         for (i = 0 ; i < 768 ; i++,page_dir++)
 146                 free_one_table(page_dir);
 147         invalidate();
 148         return;
 149 }
 150 
 151 /*
 152  * This function frees up all page tables of a process when it exits.
 153  */
 154 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         int i;
 157         unsigned long pg_dir;
 158         unsigned long * page_dir;
 159 
 160         if (!tsk)
 161                 return;
 162         if (tsk == task[0]) {
 163                 printk("task[0] (swapper) killed: unable to recover\n");
 164                 panic("Trying to free up swapper memory space");
 165         }
 166         pg_dir = tsk->tss.cr3;
 167         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 168                 printk("Trying to free kernel page-directory: not good\n");
 169                 return;
 170         }
 171         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 172         if (tsk == current)
 173                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 174         if (mem_map[MAP_NR(pg_dir)] > 1) {
 175                 free_page(pg_dir);
 176                 return;
 177         }
 178         page_dir = (unsigned long *) pg_dir;
 179         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 180                 free_one_table(page_dir);
 181         free_page(pg_dir);
 182         invalidate();
 183 }
 184 
 185 /*
 186  * clone_page_tables() clones the page table for a process - both
 187  * processes will have the exact same pages in memory. There are
 188  * probably races in the memory management with cloning, but we'll
 189  * see..
 190  */
 191 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193         unsigned long pg_dir;
 194 
 195         pg_dir = current->tss.cr3;
 196         mem_map[MAP_NR(pg_dir)]++;
 197         tsk->tss.cr3 = pg_dir;
 198         return 0;
 199 }
 200 
 201 /*
 202  * copy_page_tables() just copies the whole process memory range:
 203  * note the special handling of RESERVED (ie kernel) pages, which
 204  * means that they are always shared by all processes.
 205  */
 206 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         int i;
 209         unsigned long old_pg_dir, *old_page_dir;
 210         unsigned long new_pg_dir, *new_page_dir;
 211 
 212         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 213                 return -ENOMEM;
 214         old_pg_dir = current->tss.cr3;
 215         tsk->tss.cr3 = new_pg_dir;
 216         old_page_dir = (unsigned long *) old_pg_dir;
 217         new_page_dir = (unsigned long *) new_pg_dir;
 218         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 219                 int j;
 220                 unsigned long old_pg_table, *old_page_table;
 221                 unsigned long new_pg_table, *new_page_table;
 222 
 223                 old_pg_table = *old_page_dir;
 224                 if (!old_pg_table)
 225                         continue;
 226                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 227                         printk("copy_page_tables: bad page table: "
 228                                 "probable memory corruption");
 229                         *old_page_dir = 0;
 230                         continue;
 231                 }
 232                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 233                         *new_page_dir = old_pg_table;
 234                         continue;
 235                 }
 236                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 237                         free_page_tables(tsk);
 238                         return -ENOMEM;
 239                 }
 240                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 241                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 242                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 243                         unsigned long pg;
 244                         pg = *old_page_table;
 245                         if (!pg)
 246                                 continue;
 247                         if (!(pg & PAGE_PRESENT)) {
 248                                 *new_page_table = swap_duplicate(pg);
 249                                 continue;
 250                         }
 251                         if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
 252                                 pg &= ~PAGE_RW;
 253                         *new_page_table = pg;
 254                         if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
 255                                 continue;
 256                         *old_page_table = pg;
 257                         mem_map[MAP_NR(pg)]++;
 258                 }
 259                 *new_page_dir = new_pg_table | PAGE_TABLE;
 260         }
 261         invalidate();
 262         return 0;
 263 }
 264 
 265 /*
 266  * a more complete version of free_page_tables which performs with page
 267  * granularity.
 268  */
 269 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 270 {
 271         unsigned long page, page_dir;
 272         unsigned long *page_table, *dir;
 273         unsigned long poff, pcnt, pc;
 274 
 275         if (from & ~PAGE_MASK) {
 276                 printk("unmap_page_range called with wrong alignment\n");
 277                 return -EINVAL;
 278         }
 279         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 280         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 281         poff = (from >> PAGE_SHIFT) & PTRS_PER_PAGE-1;
 282         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 283                 pcnt = size;
 284 
 285         for ( ; size > 0; ++dir, size -= pcnt,
 286              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 287                 if (!(page_dir = *dir)) {
 288                         poff = 0;
 289                         continue;
 290                 }
 291                 if (!(page_dir & PAGE_PRESENT)) {
 292                         printk("unmap_page_range: bad page directory.");
 293                         continue;
 294                 }
 295                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 296                 if (poff) {
 297                         page_table += poff;
 298                         poff = 0;
 299                 }
 300                 for (pc = pcnt; pc--; page_table++) {
 301                         if ((page = *page_table) != 0) {
 302                                 *page_table = 0;
 303                                 if (1 & page) {
 304                                         if (!(mem_map[MAP_NR(page)]
 305                                               & MAP_PAGE_RESERVED))
 306                                                 --current->rss;
 307                                         free_page(PAGE_MASK & page);
 308                                 } else
 309                                         swap_free(page);
 310                         }
 311                 }
 312                 if (pcnt == PTRS_PER_PAGE) {
 313                         *dir = 0;
 314                         free_page(PAGE_MASK & page_dir);
 315                 }
 316         }
 317         invalidate();
 318         return 0;
 319 }
 320 
 321 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 322 {
 323         unsigned long *page_table, *dir;
 324         unsigned long poff, pcnt;
 325         unsigned long page;
 326 
 327         if (mask) {
 328                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 329                         printk("zeromap_page_range: mask = %08x\n",mask);
 330                         return -EINVAL;
 331                 }
 332                 mask |= ZERO_PAGE;
 333         }
 334         if (from & ~PAGE_MASK) {
 335                 printk("zeromap_page_range: from = %08x\n",from);
 336                 return -EINVAL;
 337         }
 338         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 339         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 340         poff = (from >> PAGE_SHIFT) & PTRS_PER_PAGE-1;
 341         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 342                 pcnt = size;
 343 
 344         while (size > 0) {
 345                 if (!(PAGE_PRESENT & *dir)) {
 346                                 /* clear page needed here?  SRB. */
 347                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 348                                 invalidate();
 349                                 return -ENOMEM;
 350                         }
 351                         if (PAGE_PRESENT & *dir) {
 352                                 free_page((unsigned long) page_table);
 353                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 354                         } else
 355                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 356                 } else
 357                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 358                 page_table += poff;
 359                 poff = 0;
 360                 for (size -= pcnt; pcnt-- ;) {
 361                         if ((page = *page_table) != 0) {
 362                                 *page_table = 0;
 363                                 if (page & PAGE_PRESENT) {
 364                                         if (!(mem_map[MAP_NR(page)]
 365                                               & MAP_PAGE_RESERVED))
 366                                                 --current->rss;
 367                                         free_page(PAGE_MASK & page);
 368                                 } else
 369                                         swap_free(page);
 370                         }
 371                         *page_table++ = mask;
 372                 }
 373                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 374         }
 375         invalidate();
 376         return 0;
 377 }
 378 
 379 /*
 380  * maps a range of physical memory into the requested pages. the old
 381  * mappings are removed. any references to nonexistent pages results
 382  * in null mappings (currently treated as "copy-on-access")
 383  */
 384 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 385 {
 386         unsigned long *page_table, *dir;
 387         unsigned long poff, pcnt;
 388         unsigned long page;
 389 
 390         if (mask) {
 391                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 392                         printk("remap_page_range: mask = %08x\n",mask);
 393                         return -EINVAL;
 394                 }
 395         }
 396         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 397                 printk("remap_page_range: from = %08x, to=%08x\n",from,to);
 398                 return -EINVAL;
 399         }
 400         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 401         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 402         poff = (from >> PAGE_SHIFT) & PTRS_PER_PAGE-1;
 403         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 404                 pcnt = size;
 405 
 406         while (size > 0) {
 407                 if (!(PAGE_PRESENT & *dir)) {
 408                         /* clearing page here, needed?  SRB. */
 409                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 410                                 invalidate();
 411                                 return -1;
 412                         }
 413                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 414                 }
 415                 else
 416                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 417                 if (poff) {
 418                         page_table += poff;
 419                         poff = 0;
 420                 }
 421 
 422                 for (size -= pcnt; pcnt-- ;) {
 423                         if ((page = *page_table) != 0) {
 424                                 *page_table = 0;
 425                                 if (PAGE_PRESENT & page) {
 426                                         if (!(mem_map[MAP_NR(page)]
 427                                               & MAP_PAGE_RESERVED))
 428                                                 --current->rss;
 429                                         free_page(PAGE_MASK & page);
 430                                 } else
 431                                         swap_free(page);
 432                         }
 433 
 434                         /*
 435                          * the first condition should return an invalid access
 436                          * when the page is referenced. current assumptions
 437                          * cause it to be treated as demand allocation in some
 438                          * cases.
 439                          */
 440                         if (!mask)
 441                                 *page_table++ = 0;      /* not present */
 442                         else if (to >= high_memory)
 443                                 *page_table++ = (to | mask);
 444                         else if (!mem_map[MAP_NR(to)])
 445                                 *page_table++ = 0;      /* not present */
 446                         else {
 447                                 *page_table++ = (to | mask);
 448                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 449                                         ++current->rss;
 450                                         mem_map[MAP_NR(to)]++;
 451                                 }
 452                         }
 453                         to += PAGE_SIZE;
 454                 }
 455                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 456         }
 457         invalidate();
 458         return 0;
 459 }
 460 
 461 /*
 462  * This function puts a page in memory at the wanted address.
 463  * It returns the physical address of the page gotten, 0 if
 464  * out of memory (either when trying to access page-table or
 465  * page.)
 466  */
 467 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 468         unsigned long address,int prot)
 469 {
 470         unsigned long *page_table;
 471 
 472         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 473                 printk("put_page: prot = %08x\n",prot);
 474         if (page >= high_memory) {
 475                 printk("put_page: trying to put page %p at %p\n",page,address);
 476                 return 0;
 477         }
 478         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 479         if ((*page_table) & PAGE_PRESENT)
 480                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 481         else {
 482                 printk("put_page: bad page directory entry\n");
 483                 oom(tsk);
 484                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 485                 return 0;
 486         }
 487         page_table += (address >> PAGE_SHIFT) & PTRS_PER_PAGE-1;
 488         if (*page_table) {
 489                 printk("put_page: page already exists\n");
 490                 *page_table = 0;
 491                 invalidate();
 492         }
 493         *page_table = page | prot;
 494 /* no need for invalidate */
 495         return page;
 496 }
 497 
 498 /*
 499  * The previous function doesn't work very well if you also want to mark
 500  * the page dirty: exec.c wants this, as it has earlier changed the page,
 501  * and we want the dirty-status to be correct (for VM). Thus the same
 502  * routine, but this time we mark it dirty too.
 503  */
 504 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 505 {
 506         unsigned long tmp, *page_table;
 507 
 508         if (page >= high_memory)
 509                 printk("put_dirty_page: trying to put page %p at %p\n",page,address);
 510         if (mem_map[MAP_NR(page)] != 1)
 511                 printk("mem_map disagrees with %p at %p\n",page,address);
 512         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 513         if (PAGE_PRESENT & *page_table)
 514                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 515         else {
 516                 if (!(tmp = get_free_page(GFP_KERNEL)))
 517                         return 0;
 518                 if (PAGE_PRESENT & *page_table) {
 519                         free_page(tmp);
 520                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 521                 } else {
 522                         *page_table = tmp | PAGE_TABLE;
 523                         page_table = (unsigned long *) tmp;
 524                 }
 525         }
 526         page_table += (address >> PAGE_SHIFT) & PTRS_PER_PAGE-1;
 527         if (*page_table) {
 528                 printk("put_dirty_page: page already exists\n");
 529                 *page_table = 0;
 530                 invalidate();
 531         }
 532         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 533 /* no need for invalidate */
 534         return page;
 535 }
 536 
 537 /*
 538  * This routine handles present pages, when users try to write
 539  * to a shared page. It is done by copying the page to a new address
 540  * and decrementing the shared-page counter for the old page.
 541  *
 542  * Note that we do many checks twice (look at do_wp_page()), as
 543  * we have to be careful about race-conditions.
 544  *
 545  * Goto-purists beware: the only reason for goto's here is that it results
 546  * in better assembly code.. The "default" path will see no jumps at all.
 547  */
 548 static void __do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 549         struct task_struct * tsk, unsigned long user_esp)
 550 {
 551         unsigned long *pde, pte, old_page, prot;
 552         unsigned long new_page;
 553 
 554         new_page = __get_free_page(GFP_KERNEL);
 555         pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 556         pte = *pde;
 557         if (!(pte & PAGE_PRESENT))
 558                 goto end_wp_page;
 559         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 560                 goto bad_wp_pagetable;
 561         pte &= PAGE_MASK;
 562         pte += PAGE_PTR(address);
 563         old_page = *(unsigned long *) pte;
 564         if (!(old_page & PAGE_PRESENT))
 565                 goto end_wp_page;
 566         if (old_page >= high_memory)
 567                 goto bad_wp_page;
 568         if (old_page & PAGE_RW)
 569                 goto end_wp_page;
 570         tsk->min_flt++;
 571         prot = (old_page & ~PAGE_MASK) | PAGE_RW;
 572         old_page &= PAGE_MASK;
 573         if (mem_map[MAP_NR(old_page)] != 1) {
 574                 if (new_page) {
 575                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 576                                 ++tsk->rss;
 577                         copy_page(old_page,new_page);
 578                         *(unsigned long *) pte = new_page | prot;
 579                         free_page(old_page);
 580                         invalidate();
 581                         return;
 582                 }
 583                 free_page(old_page);
 584                 oom(tsk);
 585                 *(unsigned long *) pte = BAD_PAGE | prot;
 586                 invalidate();
 587                 return;
 588         }
 589         *(unsigned long *) pte |= PAGE_RW;
 590         invalidate();
 591         if (new_page)
 592                 free_page(new_page);
 593         return;
 594 bad_wp_page:
 595         printk("do_wp_page: bogus page at address %08x (%08x)\n",address,old_page);
 596         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 597         send_sig(SIGKILL, tsk, 1);
 598         goto end_wp_page;
 599 bad_wp_pagetable:
 600         printk("do_wp_page: bogus page-table at address %08x (%08x)\n",address,pte);
 601         *pde = BAD_PAGETABLE | PAGE_TABLE;
 602         send_sig(SIGKILL, tsk, 1);
 603 end_wp_page:
 604         if (new_page)
 605                 free_page(new_page);
 606         return;
 607 }
 608 
 609 /*
 610  * check that a page table change is actually needed, and call
 611  * the low-level function only in that case..
 612  */
 613 void do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 614         struct task_struct * tsk, unsigned long user_esp)
 615 {
 616         unsigned long page;
 617         unsigned long * pg_table;
 618 
 619         pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 620         page = *pg_table;
 621         if (!page)
 622                 return;
 623         if ((page & PAGE_PRESENT) && page < high_memory) {
 624                 pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
 625                 page = *pg_table;
 626                 if (!(page & PAGE_PRESENT))
 627                         return;
 628                 if (page & PAGE_RW)
 629                         return;
 630                 if (!(page & PAGE_COW)) {
 631                         if (user_esp && tsk == current) {
 632                                 send_sig(SIGSEGV, tsk, 1);
 633                                 return;
 634                         }
 635                 }
 636                 if (mem_map[MAP_NR(page)] == 1) {
 637                         *pg_table |= PAGE_RW | PAGE_DIRTY;
 638                         invalidate();
 639                         return;
 640                 }
 641                 __do_wp_page(error_code, address, tsk, user_esp);
 642                 return;
 643         }
 644         printk("bad page directory entry %08x\n",page);
 645         *pg_table = 0;
 646 }
 647 
 648 int verify_area(int type, void * addr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 649 {
 650         unsigned long start;
 651 
 652         start = (unsigned long) addr;
 653         if (start >= TASK_SIZE)
 654                 return -EFAULT;
 655         if (size > TASK_SIZE - start)
 656                 return -EFAULT;
 657         if (type == VERIFY_READ || !size)
 658                 return 0;
 659         if (!size)
 660                 return 0;
 661         size--;
 662         size += start & ~PAGE_MASK;
 663         size >>= PAGE_SHIFT;
 664         start &= PAGE_MASK;
 665         do {
 666                 do_wp_page(1,start,current,0);
 667                 start += PAGE_SIZE;
 668         } while (size--);
 669         return 0;
 670 }
 671 
 672 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 673 {
 674         unsigned long tmp;
 675 
 676         if (!(tmp = get_free_page(GFP_KERNEL))) {
 677                 oom(tsk);
 678                 tmp = BAD_PAGE;
 679         }
 680         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 681                 free_page(tmp);
 682 }
 683 
 684 /*
 685  * try_to_share() checks the page at address "address" in the task "p",
 686  * to see if it exists, and if it is clean. If so, share it with the current
 687  * task.
 688  *
 689  * NOTE! This assumes we have checked that p != current, and that they
 690  * share the same executable or library.
 691  *
 692  * We may want to fix this to allow page sharing for PIC pages at different
 693  * addresses so that ELF will really perform properly. As long as the vast
 694  * majority of sharable libraries load at fixed addresses this is not a
 695  * big concern. Any sharing of pages between the buffer cache and the
 696  * code space reduces the need for this as well.  - ERY
 697  */
 698 static int try_to_share(unsigned long address, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 699         struct task_struct * p, unsigned long error_code, unsigned long newpage)
 700 {
 701         unsigned long from;
 702         unsigned long to;
 703         unsigned long from_page;
 704         unsigned long to_page;
 705 
 706         from_page = (unsigned long)PAGE_DIR_OFFSET(p->tss.cr3,address);
 707         to_page = (unsigned long)PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 708 /* is there a page-directory at from? */
 709         from = *(unsigned long *) from_page;
 710         if (!(from & PAGE_PRESENT))
 711                 return 0;
 712         from &= PAGE_MASK;
 713         from_page = from + PAGE_PTR(address);
 714         from = *(unsigned long *) from_page;
 715 /* is the page clean and present? */
 716         if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
 717                 return 0;
 718         if (from >= high_memory)
 719                 return 0;
 720         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 721                 return 0;
 722 /* is the destination ok? */
 723         to = *(unsigned long *) to_page;
 724         if (!(to & PAGE_PRESENT))
 725                 return 0;
 726         to &= PAGE_MASK;
 727         to_page = to + PAGE_PTR(address);
 728         if (*(unsigned long *) to_page)
 729                 return 0;
 730 /* share them if read - do COW immediately otherwise */
 731         if (error_code & PAGE_RW) {
 732                 if(!newpage)    /* did the page exist?  SRB. */
 733                         return 0;
 734                 copy_page((from & PAGE_MASK),newpage);
 735                 to = newpage | PAGE_PRIVATE;
 736         } else {
 737                 mem_map[MAP_NR(from)]++;
 738                 from &= ~PAGE_RW;
 739                 to = from;
 740                 if(newpage)     /* only if it existed. SRB. */
 741                         free_page(newpage);
 742         }
 743         *(unsigned long *) from_page = from;
 744         *(unsigned long *) to_page = to;
 745         invalidate();
 746         return 1;
 747 }
 748 
 749 /*
 750  * share_page() tries to find a process that could share a page with
 751  * the current one. Address is the address of the wanted page relative
 752  * to the current data space.
 753  *
 754  * We first check if it is at all feasible by checking executable->i_count.
 755  * It should be >1 if there are other tasks sharing this inode.
 756  */
 757 int share_page(struct vm_area_struct * area, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 758         struct inode * inode,
 759         unsigned long address, unsigned long error_code, unsigned long newpage)
 760 {
 761         struct task_struct ** p;
 762 
 763         if (!inode || inode->i_count < 2)
 764                 return 0;
 765         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 766                 if (!*p)
 767                         continue;
 768                 if (tsk == *p)
 769                         continue;
 770                 if (inode != (*p)->executable) {
 771                           if(!area) continue;
 772                         /* Now see if there is something in the VMM that
 773                            we can share pages with */
 774                         if(area){
 775                           struct vm_area_struct * mpnt;
 776                           for(mpnt = (*p)->mmap; mpnt; mpnt = mpnt->vm_next){
 777                             if(mpnt->vm_ops && mpnt->vm_ops == area->vm_ops &&
 778                                mpnt->vm_inode->i_ino == area->vm_inode->i_ino&&
 779                                mpnt->vm_inode->i_dev == area->vm_inode->i_dev){
 780                               if (mpnt->vm_ops->share(mpnt, area, address))
 781                                 break;
 782                             };
 783                           };
 784                           if (!mpnt) continue;  /* Nope.  Nuthin here */
 785                         };
 786                 }
 787                 if (try_to_share(address,tsk,*p,error_code,newpage))
 788                         return 1;
 789         }
 790         return 0;
 791 }
 792 
 793 /*
 794  * fill in an empty page-table if none exists.
 795  */
 796 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 797 {
 798         unsigned long page;
 799         unsigned long *p;
 800 
 801         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 802         if (PAGE_PRESENT & *p)
 803                 return *p;
 804         if (*p) {
 805                 printk("get_empty_pgtable: bad page-directory entry \n");
 806                 *p = 0;
 807         }
 808         page = get_free_page(GFP_KERNEL);
 809         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 810         if (PAGE_PRESENT & *p) {
 811                 free_page(page);
 812                 return *p;
 813         }
 814         if (*p) {
 815                 printk("get_empty_pgtable: bad page-directory entry \n");
 816                 *p = 0;
 817         }
 818         if (page) {
 819                 *p = page | PAGE_TABLE;
 820                 return *p;
 821         }
 822         oom(current);
 823         *p = BAD_PAGETABLE | PAGE_TABLE;
 824         return 0;
 825 }
 826 
 827 void do_no_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 828         struct task_struct *tsk, unsigned long user_esp)
 829 {
 830         unsigned long tmp;
 831         unsigned long page;
 832         struct vm_area_struct * mpnt;
 833 
 834         page = get_empty_pgtable(tsk,address);
 835         if (!page)
 836                 return;
 837         page &= PAGE_MASK;
 838         page += PAGE_PTR(address);
 839         tmp = *(unsigned long *) page;
 840         if (tmp & PAGE_PRESENT)
 841                 return;
 842         ++tsk->rss;
 843         if (tmp) {
 844                 ++tsk->maj_flt;
 845                 swap_in((unsigned long *) page);
 846                 return;
 847         }
 848         address &= 0xfffff000;
 849         for (mpnt = tsk->mmap ; mpnt ; mpnt = mpnt->vm_next) {
 850                 if (address < mpnt->vm_start)
 851                         continue;
 852                 if (address >= ((mpnt->vm_end + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)))
 853                         continue;
 854                 mpnt->vm_ops->nopage(error_code, mpnt, address);
 855                 return;
 856         }
 857         ++tsk->min_flt;
 858         get_empty_page(tsk,address);
 859         if (tsk != current)
 860                 return;
 861         if (address < tsk->brk)
 862                 return;
 863         if (address+8192 >= (user_esp & 0xfffff000) && 
 864             address <= current->start_stack)
 865                 return;
 866         send_sig(SIGSEGV,tsk,1);
 867         return;
 868 }
 869 
 870 /*
 871  * This routine handles page faults.  It determines the address,
 872  * and the problem, and then passes it off to one of the appropriate
 873  * routines.
 874  */
 875 extern "C" void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 876 {
 877         unsigned long address;
 878         unsigned long user_esp = 0;
 879         unsigned long stack_limit;
 880         unsigned int bit;
 881 
 882         /* get the address */
 883         __asm__("movl %%cr2,%0":"=r" (address));
 884         if (address < TASK_SIZE) {
 885                 if (error_code & 4) {   /* user mode access? */
 886                         if (regs->eflags & VM_MASK) {
 887                                 bit = (address - 0xA0000) >> PAGE_SHIFT;
 888                                 if (bit < 32)
 889                                         current->screen_bitmap |= 1 << bit;
 890                         } else 
 891                                 user_esp = regs->esp;
 892                 }
 893                 if (error_code & 1)
 894                         do_wp_page(error_code, address, current, user_esp);
 895                 else
 896                         do_no_page(error_code, address, current, user_esp);
 897                 if (!user_esp)
 898                         return;
 899                 stack_limit = current->rlim[RLIMIT_STACK].rlim_cur;
 900                 if (stack_limit >= RLIM_INFINITY)
 901                         return;
 902                 if (stack_limit >= current->start_stack)
 903                         return;
 904                 stack_limit = current->start_stack - stack_limit;
 905                 if (user_esp < stack_limit)
 906                         send_sig(SIGSEGV, current, 1);
 907                 return;
 908         }
 909         printk("Unable to handle kernel paging request at address %08x\n",address);
 910         die_if_kernel("Oops", regs, error_code);
 911         do_exit(SIGKILL);
 912 }
 913 
 914 /*
 915  * BAD_PAGE is the page that is used for page faults when linux
 916  * is out-of-memory. Older versions of linux just did a
 917  * do_exit(), but using this instead means there is less risk
 918  * for a process dying in kernel mode, possibly leaving a inode
 919  * unused etc..
 920  *
 921  * BAD_PAGETABLE is the accompanying page-table: it is initialized
 922  * to point to BAD_PAGE entries.
 923  *
 924  * ZERO_PAGE is a special page that is used for zero-initialized
 925  * data and COW.
 926  */
 927 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 928 {
 929         extern char empty_bad_page_table[PAGE_SIZE];
 930 
 931         __asm__ __volatile__("cld ; rep ; stosl":
 932                 :"a" (BAD_PAGE + PAGE_TABLE),
 933                  "D" ((long) empty_bad_page_table),
 934                  "c" (PTRS_PER_PAGE)
 935                 :"di","cx");
 936         return (unsigned long) empty_bad_page_table;
 937 }
 938 
 939 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 940 {
 941         extern char empty_bad_page[PAGE_SIZE];
 942 
 943         __asm__ __volatile__("cld ; rep ; stosl":
 944                 :"a" (0),
 945                  "D" ((long) empty_bad_page),
 946                  "c" (PTRS_PER_PAGE)
 947                 :"di","cx");
 948         return (unsigned long) empty_bad_page;
 949 }
 950 
 951 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 952 {
 953         extern char empty_zero_page[PAGE_SIZE];
 954 
 955         __asm__ __volatile__("cld ; rep ; stosl":
 956                 :"a" (0),
 957                  "D" ((long) empty_zero_page),
 958                  "c" (PTRS_PER_PAGE)
 959                 :"di","cx");
 960         return (unsigned long) empty_zero_page;
 961 }
 962 
 963 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 964 {
 965         int i,free = 0,total = 0,reserved = 0;
 966         int shared = 0;
 967 
 968         printk("Mem-info:\n");
 969         printk("Free pages:      %6dkB\n",nr_free_pages<<PAGE_SHIFT-10);
 970         printk("Secondary pages: %6dkB\n",nr_secondary_pages<<PAGE_SHIFT-10);
 971         printk("Free swap:       %6dkB\n",nr_swap_pages<<PAGE_SHIFT-10);
 972         printk("Buffer memory:   %6dkB\n",buffermem>>10);
 973         printk("Buffer heads:    %6d\n",nr_buffer_heads);
 974         printk("Buffer blocks:   %6d\n",nr_buffers);
 975         i = high_memory >> PAGE_SHIFT;
 976         while (i-- > 0) {
 977                 total++;
 978                 if (mem_map[i] & MAP_PAGE_RESERVED)
 979                         reserved++;
 980                 else if (!mem_map[i])
 981                         free++;
 982                 else
 983                         shared += mem_map[i]-1;
 984         }
 985         printk("%d pages of RAM\n",total);
 986         printk("%d free pages\n",free);
 987         printk("%d reserved pages\n",reserved);
 988         printk("%d pages shared\n",shared);
 989 }
 990 
 991 /*
 992  * paging_init() sets up the page tables - note that the first 4MB are
 993  * already mapped by head.S.
 994  *
 995  * This routines also unmaps the page at virtual kernel address 0, so
 996  * that we can trap those pesky NULL-reference errors in the kernel.
 997  */
 998 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 999 {
1000         unsigned long * pg_dir;
1001         unsigned long * pg_table;
1002         unsigned long tmp;
1003         unsigned long address;
1004 
1005 /*
1006  * Physical page 0 is special: it's a "zero-page", and is guaranteed to
1007  * stay that way - it's write-protected and when there is a c-o-w, the
1008  * mm handler treats it specially.
1009  */
1010         memset((void *) 0, 0, PAGE_SIZE);
1011         start_mem = PAGE_ALIGN(start_mem);
1012         address = 0;
1013         pg_dir = swapper_pg_dir;
1014         while (address < end_mem) {
1015                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1016                 if (!tmp) {
1017                         tmp = start_mem | PAGE_TABLE;
1018                         *(pg_dir + 768) = tmp;
1019                         start_mem += PAGE_SIZE;
1020                 }
1021                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1022                 pg_dir++;
1023                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1024                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1025                         if (address && address < end_mem)
1026                                 *pg_table = address | PAGE_SHARED;
1027                         else
1028                                 *pg_table = 0;
1029                         address += PAGE_SIZE;
1030                 }
1031         }
1032         invalidate();
1033         return start_mem;
1034 }
1035 
1036 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1037               unsigned long start_mem, unsigned long end_mem)
1038 {
1039         int codepages = 0;
1040         int reservedpages = 0;
1041         int datapages = 0;
1042         unsigned long tmp;
1043         unsigned short * p;
1044         extern int etext;
1045 
1046         cli();
1047         end_mem &= PAGE_MASK;
1048         high_memory = end_mem;
1049         start_mem +=  0x0000000f;
1050         start_mem &= ~0x0000000f;
1051         tmp = MAP_NR(end_mem);
1052         mem_map = (unsigned short *) start_mem;
1053         p = mem_map + tmp;
1054         start_mem = (unsigned long) p;
1055         while (p > mem_map)
1056                 *--p = MAP_PAGE_RESERVED;
1057         start_low_mem = PAGE_ALIGN(start_low_mem);
1058         start_mem = PAGE_ALIGN(start_mem);
1059         while (start_low_mem < 0xA0000) {
1060                 mem_map[MAP_NR(start_low_mem)] = 0;
1061                 start_low_mem += PAGE_SIZE;
1062         }
1063         while (start_mem < end_mem) {
1064                 mem_map[MAP_NR(start_mem)] = 0;
1065                 start_mem += PAGE_SIZE;
1066         }
1067         sound_mem_init();
1068         free_page_list = 0;
1069         nr_free_pages = 0;
1070         for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
1071                 if (mem_map[MAP_NR(tmp)]) {
1072                         if (tmp >= 0xA0000 && tmp < 0x100000)
1073                                 reservedpages++;
1074                         else if (tmp < (unsigned long) &etext)
1075                                 codepages++;
1076                         else
1077                                 datapages++;
1078                         continue;
1079                 }
1080                 *(unsigned long *) tmp = free_page_list;
1081                 free_page_list = tmp;
1082                 nr_free_pages++;
1083         }
1084         tmp = nr_free_pages << PAGE_SHIFT;
1085         printk("Memory: %dk/%dk available (%dk kernel code, %dk reserved, %dk data)\n",
1086                 tmp >> 10,
1087                 end_mem >> 10,
1088                 codepages << PAGE_SHIFT-10,
1089                 reservedpages << PAGE_SHIFT-10,
1090                 datapages << PAGE_SHIFT-10);
1091         return;
1092 }
1093 
1094 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1095 {
1096         int i;
1097 
1098         i = high_memory >> PAGE_SHIFT;
1099         val->totalram = 0;
1100         val->freeram = 0;
1101         val->sharedram = 0;
1102         val->bufferram = buffermem;
1103         while (i-- > 0)  {
1104                 if (mem_map[i] & MAP_PAGE_RESERVED)
1105                         continue;
1106                 val->totalram++;
1107                 if (!mem_map[i]) {
1108                         val->freeram++;
1109                         continue;
1110                 }
1111                 val->sharedram += mem_map[i]-1;
1112         }
1113         val->totalram <<= PAGE_SHIFT;
1114         val->freeram <<= PAGE_SHIFT;
1115         val->sharedram <<= PAGE_SHIFT;
1116         return;
1117 }
1118 
1119 
1120 /* This handles a generic mmap of a disk file */
1121 void file_mmap_nopage(int error_code, struct vm_area_struct * area, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1122 {
1123         struct inode * inode = area->vm_inode;
1124         unsigned int block;
1125         unsigned int clear;
1126         unsigned long page;
1127         unsigned long tmp;
1128         int nr[8];
1129         int i, j;
1130         int prot = area->vm_page_prot; /* prot for buffer cache.. */
1131 
1132         address &= PAGE_MASK;
1133         block = address - area->vm_start + area->vm_offset;
1134         block >>= inode->i_sb->s_blocksize_bits;
1135 
1136         page = get_free_page(GFP_KERNEL);
1137         if (share_page(area, area->vm_task, inode, address, error_code, page)) {
1138                 ++area->vm_task->min_flt;
1139                 return;
1140         }
1141 
1142         ++area->vm_task->maj_flt;
1143         if (!page) {
1144                 oom(current);
1145                 put_page(area->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
1146                 return;
1147         }
1148         for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
1149                 nr[j] = bmap(inode,block);
1150 
1151         /*
1152          * If we don't mmap a whole page, we have to clear the end of the page,
1153          * which also means that we can't share the page with the buffer cache.
1154          * This is easy to handle by giving the 'bread_page()' a protection mask
1155          * that contains PAGE_RW, as the cache code won't try to share then..
1156          */
1157         clear = 0;
1158         if (address + PAGE_SIZE > area->vm_end) {
1159                 clear = address + PAGE_SIZE - area->vm_end;
1160                 prot |= PAGE_RW;
1161         }
1162         page = bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, prot);
1163 
1164         if (!(error_code & PAGE_RW)) {
1165                 if (share_page(area, area->vm_task, inode, address, error_code, page))
1166                         return;
1167         }
1168 
1169         tmp = page + PAGE_SIZE;
1170         while (clear--) {
1171                 *(char *)--tmp = 0;
1172         }
1173         if (put_page(area->vm_task,page,address,area->vm_page_prot))
1174                 return;
1175         free_page(page);
1176         oom(current);
1177 }
1178 
1179 void file_mmap_free(struct vm_area_struct * area)
     /* [previous][next][first][last][top][bottom][index][help] */
1180 {
1181         if (area->vm_inode)
1182                 iput(area->vm_inode);
1183 #if 0
1184         if (area->vm_inode)
1185                 printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev, 
1186                                  area->vm_inode->i_ino, area->vm_inode->i_count);
1187 #endif
1188 }
1189 
1190 /*
1191  * Compare the contents of the mmap entries, and decide if we are allowed to
1192  * share the pages
1193  */
1194 int file_mmap_share(struct vm_area_struct * area1, 
     /* [previous][next][first][last][top][bottom][index][help] */
1195                     struct vm_area_struct * area2, 
1196                     unsigned long address)
1197 {
1198         if (area1->vm_inode != area2->vm_inode)
1199                 return 0;
1200         if (area1->vm_start != area2->vm_start)
1201                 return 0;
1202         if (area1->vm_end != area2->vm_end)
1203                 return 0;
1204         if (area1->vm_offset != area2->vm_offset)
1205                 return 0;
1206         if (area1->vm_page_prot != area2->vm_page_prot)
1207                 return 0;
1208         return 1;
1209 }
1210 
1211 struct vm_operations_struct file_mmap = {
1212         NULL,                   /* open */
1213         file_mmap_free,         /* close */
1214         file_mmap_nopage,       /* nopage */
1215         NULL,                   /* wppage */
1216         file_mmap_share,        /* share */
1217 };

/* [previous][next][first][last][top][bottom][index][help] */