root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. __do_wp_page
  13. do_wp_page
  14. __verify_write
  15. get_empty_page
  16. try_to_share
  17. share_page
  18. get_empty_pgtable
  19. do_no_page
  20. do_page_fault
  21. __bad_pagetable
  22. __bad_page
  23. __zero_page
  24. show_mem
  25. paging_init
  26. mem_init
  27. si_meminfo
  28. file_mmap_nopage
  29. file_mmap_free
  30. file_mmap_share

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 /*
  32  * 05.04.94  -  Multi-page memory management added for v1.1.
  33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  34  */
  35 
  36 #include <asm/system.h>
  37 #include <linux/config.h>
  38 
  39 #include <linux/signal.h>
  40 #include <linux/sched.h>
  41 #include <linux/head.h>
  42 #include <linux/kernel.h>
  43 #include <linux/errno.h>
  44 #include <linux/string.h>
  45 #include <linux/types.h>
  46 #include <linux/ptrace.h>
  47 #include <linux/mman.h>
  48 
  49 unsigned long high_memory = 0;
  50 
  51 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  52 
  53 extern void sound_mem_init(void);
  54 extern void die_if_kernel(char *,struct pt_regs *,long);
  55 
  56 /*
  57  * The free_area_list arrays point to the queue heads of the free areas
  58  * of different sizes
  59  */
  60 int nr_swap_pages = 0;
  61 int nr_free_pages = 0;
  62 struct mem_list free_area_list[NR_MEM_LISTS];
  63 unsigned char * free_area_map[NR_MEM_LISTS];
  64 
  65 #define copy_page(from,to) \
  66 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
  67 
  68 unsigned short * mem_map = NULL;
  69 
  70 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  71 
  72 /*
  73  * oom() prints a message (so that the user knows why the process died),
  74  * and gives the process an untrappable SIGSEGV.
  75  */
  76 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         printk("\nout of memory\n");
  79         task->sigaction[SIGKILL-1].sa_handler = NULL;
  80         task->blocked &= ~(1<<(SIGKILL-1));
  81         send_sig(SIGKILL,task,1);
  82 }
  83 
  84 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  85 {
  86         int j;
  87         unsigned long pg_table = *page_dir;
  88         unsigned long * page_table;
  89 
  90         if (!pg_table)
  91                 return;
  92         *page_dir = 0;
  93         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
  94                 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
  95                 return;
  96         }
  97         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
  98                 return;
  99         page_table = (unsigned long *) (pg_table & PAGE_MASK);
 100         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
 101                 unsigned long pg = *page_table;
 102                 
 103                 if (!pg)
 104                         continue;
 105                 *page_table = 0;
 106                 if (pg & PAGE_PRESENT)
 107                         free_page(PAGE_MASK & pg);
 108                 else
 109                         swap_free(pg);
 110         }
 111         free_page(PAGE_MASK & pg_table);
 112 }
 113 
 114 /*
 115  * This function clears all user-level page tables of a process - this
 116  * is needed by execve(), so that old pages aren't in the way. Note that
 117  * unlike 'free_page_tables()', this function still leaves a valid
 118  * page-table-tree in memory: it just removes the user pages. The two
 119  * functions are similar, but there is a fundamental difference.
 120  */
 121 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 {
 123         int i;
 124         unsigned long pg_dir;
 125         unsigned long * page_dir;
 126 
 127         if (!tsk)
 128                 return;
 129         if (tsk == task[0])
 130                 panic("task[0] (swapper) doesn't support exec()\n");
 131         pg_dir = tsk->tss.cr3;
 132         page_dir = (unsigned long *) pg_dir;
 133         if (!page_dir || page_dir == swapper_pg_dir) {
 134                 printk("Trying to clear kernel page-directory: not good\n");
 135                 return;
 136         }
 137         if (mem_map[MAP_NR(pg_dir)] > 1) {
 138                 unsigned long * new_pg;
 139 
 140                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 141                         oom(tsk);
 142                         return;
 143                 }
 144                 for (i = 768 ; i < 1024 ; i++)
 145                         new_pg[i] = page_dir[i];
 146                 free_page(pg_dir);
 147                 tsk->tss.cr3 = (unsigned long) new_pg;
 148                 return;
 149         }
 150         for (i = 0 ; i < 768 ; i++,page_dir++)
 151                 free_one_table(page_dir);
 152         invalidate();
 153         return;
 154 }
 155 
 156 /*
 157  * This function frees up all page tables of a process when it exits.
 158  */
 159 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 160 {
 161         int i;
 162         unsigned long pg_dir;
 163         unsigned long * page_dir;
 164 
 165         if (!tsk)
 166                 return;
 167         if (tsk == task[0]) {
 168                 printk("task[0] (swapper) killed: unable to recover\n");
 169                 panic("Trying to free up swapper memory space");
 170         }
 171         pg_dir = tsk->tss.cr3;
 172         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 173                 printk("Trying to free kernel page-directory: not good\n");
 174                 return;
 175         }
 176         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 177         if (tsk == current)
 178                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 179         if (mem_map[MAP_NR(pg_dir)] > 1) {
 180                 free_page(pg_dir);
 181                 return;
 182         }
 183         page_dir = (unsigned long *) pg_dir;
 184         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 185                 free_one_table(page_dir);
 186         free_page(pg_dir);
 187         invalidate();
 188 }
 189 
 190 /*
 191  * clone_page_tables() clones the page table for a process - both
 192  * processes will have the exact same pages in memory. There are
 193  * probably races in the memory management with cloning, but we'll
 194  * see..
 195  */
 196 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         unsigned long pg_dir;
 199 
 200         pg_dir = current->tss.cr3;
 201         mem_map[MAP_NR(pg_dir)]++;
 202         tsk->tss.cr3 = pg_dir;
 203         return 0;
 204 }
 205 
 206 /*
 207  * copy_page_tables() just copies the whole process memory range:
 208  * note the special handling of RESERVED (ie kernel) pages, which
 209  * means that they are always shared by all processes.
 210  */
 211 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 212 {
 213         int i;
 214         unsigned long old_pg_dir, *old_page_dir;
 215         unsigned long new_pg_dir, *new_page_dir;
 216 
 217         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 218                 return -ENOMEM;
 219         old_pg_dir = current->tss.cr3;
 220         tsk->tss.cr3 = new_pg_dir;
 221         old_page_dir = (unsigned long *) old_pg_dir;
 222         new_page_dir = (unsigned long *) new_pg_dir;
 223         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 224                 int j;
 225                 unsigned long old_pg_table, *old_page_table;
 226                 unsigned long new_pg_table, *new_page_table;
 227 
 228                 old_pg_table = *old_page_dir;
 229                 if (!old_pg_table)
 230                         continue;
 231                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 232                         printk("copy_page_tables: bad page table: "
 233                                 "probable memory corruption");
 234                         *old_page_dir = 0;
 235                         continue;
 236                 }
 237                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 238                         *new_page_dir = old_pg_table;
 239                         continue;
 240                 }
 241                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 242                         free_page_tables(tsk);
 243                         return -ENOMEM;
 244                 }
 245                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 246                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 247                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 248                         unsigned long pg;
 249                         pg = *old_page_table;
 250                         if (!pg)
 251                                 continue;
 252                         if (!(pg & PAGE_PRESENT)) {
 253                                 *new_page_table = swap_duplicate(pg);
 254                                 continue;
 255                         }
 256                         if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
 257                                 pg &= ~PAGE_RW;
 258                         *new_page_table = pg;
 259                         if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
 260                                 continue;
 261                         *old_page_table = pg;
 262                         mem_map[MAP_NR(pg)]++;
 263                 }
 264                 *new_page_dir = new_pg_table | PAGE_TABLE;
 265         }
 266         invalidate();
 267         return 0;
 268 }
 269 
 270 /*
 271  * a more complete version of free_page_tables which performs with page
 272  * granularity.
 273  */
 274 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 275 {
 276         unsigned long page, page_dir;
 277         unsigned long *page_table, *dir;
 278         unsigned long poff, pcnt, pc;
 279 
 280         if (from & ~PAGE_MASK) {
 281                 printk("unmap_page_range called with wrong alignment\n");
 282                 return -EINVAL;
 283         }
 284         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 285         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 286         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 287         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 288                 pcnt = size;
 289 
 290         for ( ; size > 0; ++dir, size -= pcnt,
 291              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 292                 if (!(page_dir = *dir)) {
 293                         poff = 0;
 294                         continue;
 295                 }
 296                 if (!(page_dir & PAGE_PRESENT)) {
 297                         printk("unmap_page_range: bad page directory.");
 298                         continue;
 299                 }
 300                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 301                 if (poff) {
 302                         page_table += poff;
 303                         poff = 0;
 304                 }
 305                 for (pc = pcnt; pc--; page_table++) {
 306                         if ((page = *page_table) != 0) {
 307                                 *page_table = 0;
 308                                 if (1 & page) {
 309                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 310                                                 if (current->rss > 0)
 311                                                         --current->rss;
 312                                         free_page(PAGE_MASK & page);
 313                                 } else
 314                                         swap_free(page);
 315                         }
 316                 }
 317                 if (pcnt == PTRS_PER_PAGE) {
 318                         *dir = 0;
 319                         free_page(PAGE_MASK & page_dir);
 320                 }
 321         }
 322         invalidate();
 323         return 0;
 324 }
 325 
 326 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 327 {
 328         unsigned long *page_table, *dir;
 329         unsigned long poff, pcnt;
 330         unsigned long page;
 331 
 332         if (mask) {
 333                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 334                         printk("zeromap_page_range: mask = %08x\n",mask);
 335                         return -EINVAL;
 336                 }
 337                 mask |= ZERO_PAGE;
 338         }
 339         if (from & ~PAGE_MASK) {
 340                 printk("zeromap_page_range: from = %08lx\n",from);
 341                 return -EINVAL;
 342         }
 343         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 344         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 345         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 346         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 347                 pcnt = size;
 348 
 349         while (size > 0) {
 350                 if (!(PAGE_PRESENT & *dir)) {
 351                                 /* clear page needed here?  SRB. */
 352                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 353                                 invalidate();
 354                                 return -ENOMEM;
 355                         }
 356                         if (PAGE_PRESENT & *dir) {
 357                                 free_page((unsigned long) page_table);
 358                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 359                         } else
 360                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 361                 } else
 362                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 363                 page_table += poff;
 364                 poff = 0;
 365                 for (size -= pcnt; pcnt-- ;) {
 366                         if ((page = *page_table) != 0) {
 367                                 *page_table = 0;
 368                                 if (page & PAGE_PRESENT) {
 369                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 370                                                 if (current->rss > 0)
 371                                                         --current->rss;
 372                                         free_page(PAGE_MASK & page);
 373                                 } else
 374                                         swap_free(page);
 375                         }
 376                         *page_table++ = mask;
 377                 }
 378                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 379         }
 380         invalidate();
 381         return 0;
 382 }
 383 
 384 /*
 385  * maps a range of physical memory into the requested pages. the old
 386  * mappings are removed. any references to nonexistent pages results
 387  * in null mappings (currently treated as "copy-on-access")
 388  */
 389 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 390 {
 391         unsigned long *page_table, *dir;
 392         unsigned long poff, pcnt;
 393         unsigned long page;
 394 
 395         if (mask) {
 396                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 397                         printk("remap_page_range: mask = %08x\n",mask);
 398                         return -EINVAL;
 399                 }
 400         }
 401         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 402                 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
 403                 return -EINVAL;
 404         }
 405         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 406         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 407         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 408         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 409                 pcnt = size;
 410 
 411         while (size > 0) {
 412                 if (!(PAGE_PRESENT & *dir)) {
 413                         /* clearing page here, needed?  SRB. */
 414                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 415                                 invalidate();
 416                                 return -1;
 417                         }
 418                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 419                 }
 420                 else
 421                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 422                 if (poff) {
 423                         page_table += poff;
 424                         poff = 0;
 425                 }
 426 
 427                 for (size -= pcnt; pcnt-- ;) {
 428                         if ((page = *page_table) != 0) {
 429                                 *page_table = 0;
 430                                 if (PAGE_PRESENT & page) {
 431                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 432                                                 if (current->rss > 0)
 433                                                         --current->rss;
 434                                         free_page(PAGE_MASK & page);
 435                                 } else
 436                                         swap_free(page);
 437                         }
 438 
 439                         /*
 440                          * the first condition should return an invalid access
 441                          * when the page is referenced. current assumptions
 442                          * cause it to be treated as demand allocation in some
 443                          * cases.
 444                          */
 445                         if (!mask)
 446                                 *page_table++ = 0;      /* not present */
 447                         else if (to >= high_memory)
 448                                 *page_table++ = (to | mask);
 449                         else if (!mem_map[MAP_NR(to)])
 450                                 *page_table++ = 0;      /* not present */
 451                         else {
 452                                 *page_table++ = (to | mask);
 453                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 454                                         ++current->rss;
 455                                         mem_map[MAP_NR(to)]++;
 456                                 }
 457                         }
 458                         to += PAGE_SIZE;
 459                 }
 460                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 461         }
 462         invalidate();
 463         return 0;
 464 }
 465 
 466 /*
 467  * This function puts a page in memory at the wanted address.
 468  * It returns the physical address of the page gotten, 0 if
 469  * out of memory (either when trying to access page-table or
 470  * page.)
 471  */
 472 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 473         unsigned long address,int prot)
 474 {
 475         unsigned long *page_table;
 476 
 477         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 478                 printk("put_page: prot = %08x\n",prot);
 479         if (page >= high_memory) {
 480                 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
 481                 return 0;
 482         }
 483         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 484         if ((*page_table) & PAGE_PRESENT)
 485                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 486         else {
 487                 printk("put_page: bad page directory entry\n");
 488                 oom(tsk);
 489                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 490                 return 0;
 491         }
 492         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 493         if (*page_table) {
 494                 printk("put_page: page already exists\n");
 495                 *page_table = 0;
 496                 invalidate();
 497         }
 498         *page_table = page | prot;
 499 /* no need for invalidate */
 500         return page;
 501 }
 502 
 503 /*
 504  * The previous function doesn't work very well if you also want to mark
 505  * the page dirty: exec.c wants this, as it has earlier changed the page,
 506  * and we want the dirty-status to be correct (for VM). Thus the same
 507  * routine, but this time we mark it dirty too.
 508  */
 509 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 510 {
 511         unsigned long tmp, *page_table;
 512 
 513         if (page >= high_memory)
 514                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 515         if (mem_map[MAP_NR(page)] != 1)
 516                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 517         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 518         if (PAGE_PRESENT & *page_table)
 519                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 520         else {
 521                 if (!(tmp = get_free_page(GFP_KERNEL)))
 522                         return 0;
 523                 if (PAGE_PRESENT & *page_table) {
 524                         free_page(tmp);
 525                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 526                 } else {
 527                         *page_table = tmp | PAGE_TABLE;
 528                         page_table = (unsigned long *) tmp;
 529                 }
 530         }
 531         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 532         if (*page_table) {
 533                 printk("put_dirty_page: page already exists\n");
 534                 *page_table = 0;
 535                 invalidate();
 536         }
 537         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 538 /* no need for invalidate */
 539         return page;
 540 }
 541 
 542 /*
 543  * This routine handles present pages, when users try to write
 544  * to a shared page. It is done by copying the page to a new address
 545  * and decrementing the shared-page counter for the old page.
 546  *
 547  * Note that we do many checks twice (look at do_wp_page()), as
 548  * we have to be careful about race-conditions.
 549  *
 550  * Goto-purists beware: the only reason for goto's here is that it results
 551  * in better assembly code.. The "default" path will see no jumps at all.
 552  */
 553 static void __do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 554         struct task_struct * tsk, unsigned long user_esp)
 555 {
 556         unsigned long *pde, pte, old_page, prot;
 557         unsigned long new_page;
 558 
 559         new_page = __get_free_page(GFP_KERNEL);
 560         pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 561         pte = *pde;
 562         if (!(pte & PAGE_PRESENT))
 563                 goto end_wp_page;
 564         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 565                 goto bad_wp_pagetable;
 566         pte &= PAGE_MASK;
 567         pte += PAGE_PTR(address);
 568         old_page = *(unsigned long *) pte;
 569         if (!(old_page & PAGE_PRESENT))
 570                 goto end_wp_page;
 571         if (old_page >= high_memory)
 572                 goto bad_wp_page;
 573         if (old_page & PAGE_RW)
 574                 goto end_wp_page;
 575         tsk->min_flt++;
 576         prot = (old_page & ~PAGE_MASK) | PAGE_RW;
 577         old_page &= PAGE_MASK;
 578         if (mem_map[MAP_NR(old_page)] != 1) {
 579                 if (new_page) {
 580                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 581                                 ++tsk->rss;
 582                         copy_page(old_page,new_page);
 583                         *(unsigned long *) pte = new_page | prot;
 584                         free_page(old_page);
 585                         invalidate();
 586                         return;
 587                 }
 588                 free_page(old_page);
 589                 oom(tsk);
 590                 *(unsigned long *) pte = BAD_PAGE | prot;
 591                 invalidate();
 592                 return;
 593         }
 594         *(unsigned long *) pte |= PAGE_RW;
 595         invalidate();
 596         if (new_page)
 597                 free_page(new_page);
 598         return;
 599 bad_wp_page:
 600         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 601         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 602         send_sig(SIGKILL, tsk, 1);
 603         goto end_wp_page;
 604 bad_wp_pagetable:
 605         printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
 606         *pde = BAD_PAGETABLE | PAGE_TABLE;
 607         send_sig(SIGKILL, tsk, 1);
 608 end_wp_page:
 609         if (new_page)
 610                 free_page(new_page);
 611         return;
 612 }
 613 
 614 /*
 615  * check that a page table change is actually needed, and call
 616  * the low-level function only in that case..
 617  */
 618 void do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 619         struct task_struct * tsk, unsigned long user_esp)
 620 {
 621         unsigned long page;
 622         unsigned long * pg_table;
 623 
 624         pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 625         page = *pg_table;
 626         if (!page)
 627                 return;
 628         if ((page & PAGE_PRESENT) && page < high_memory) {
 629                 pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
 630                 page = *pg_table;
 631                 if (!(page & PAGE_PRESENT))
 632                         return;
 633                 if (page & PAGE_RW)
 634                         return;
 635                 if (!(page & PAGE_COW)) {
 636                         if (user_esp && tsk == current) {
 637                                 current->tss.cr2 = address;
 638                                 current->tss.error_code = error_code;
 639                                 current->tss.trap_no = 14;
 640                                 send_sig(SIGSEGV, tsk, 1);
 641                                 return;
 642                         }
 643                 }
 644                 if (mem_map[MAP_NR(page)] == 1) {
 645                         *pg_table |= PAGE_RW | PAGE_DIRTY;
 646                         invalidate();
 647                         return;
 648                 }
 649                 __do_wp_page(error_code, address, tsk, user_esp);
 650                 return;
 651         }
 652         printk("bad page directory entry %08lx\n",page);
 653         *pg_table = 0;
 654 }
 655 
 656 int __verify_write(unsigned long start, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 657 {
 658         size--;
 659         size += start & ~PAGE_MASK;
 660         size >>= PAGE_SHIFT;
 661         start &= PAGE_MASK;
 662         do {
 663                 do_wp_page(1,start,current,0);
 664                 start += PAGE_SIZE;
 665         } while (size--);
 666         return 0;
 667 }
 668 
 669 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 670 {
 671         unsigned long tmp;
 672 
 673         if (!(tmp = get_free_page(GFP_KERNEL))) {
 674                 oom(tsk);
 675                 tmp = BAD_PAGE;
 676         }
 677         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 678                 free_page(tmp);
 679 }
 680 
 681 /*
 682  * try_to_share() checks the page at address "address" in the task "p",
 683  * to see if it exists, and if it is clean. If so, share it with the current
 684  * task.
 685  *
 686  * NOTE! This assumes we have checked that p != current, and that they
 687  * share the same executable or library.
 688  *
 689  * We may want to fix this to allow page sharing for PIC pages at different
 690  * addresses so that ELF will really perform properly. As long as the vast
 691  * majority of sharable libraries load at fixed addresses this is not a
 692  * big concern. Any sharing of pages between the buffer cache and the
 693  * code space reduces the need for this as well.  - ERY
 694  */
 695 static int try_to_share(unsigned long address, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 696         struct task_struct * p, unsigned long error_code, unsigned long newpage)
 697 {
 698         unsigned long from;
 699         unsigned long to;
 700         unsigned long from_page;
 701         unsigned long to_page;
 702 
 703         from_page = (unsigned long)PAGE_DIR_OFFSET(p->tss.cr3,address);
 704         to_page = (unsigned long)PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 705 /* is there a page-directory at from? */
 706         from = *(unsigned long *) from_page;
 707         if (!(from & PAGE_PRESENT))
 708                 return 0;
 709         from &= PAGE_MASK;
 710         from_page = from + PAGE_PTR(address);
 711         from = *(unsigned long *) from_page;
 712 /* is the page clean and present? */
 713         if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
 714                 return 0;
 715         if (from >= high_memory)
 716                 return 0;
 717         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 718                 return 0;
 719 /* is the destination ok? */
 720         to = *(unsigned long *) to_page;
 721         if (!(to & PAGE_PRESENT))
 722                 return 0;
 723         to &= PAGE_MASK;
 724         to_page = to + PAGE_PTR(address);
 725         if (*(unsigned long *) to_page)
 726                 return 0;
 727 /* share them if read - do COW immediately otherwise */
 728         if (error_code & PAGE_RW) {
 729                 if(!newpage)    /* did the page exist?  SRB. */
 730                         return 0;
 731                 copy_page((from & PAGE_MASK),newpage);
 732                 to = newpage | PAGE_PRIVATE;
 733         } else {
 734                 mem_map[MAP_NR(from)]++;
 735                 from &= ~PAGE_RW;
 736                 to = from;
 737                 if(newpage)     /* only if it existed. SRB. */
 738                         free_page(newpage);
 739         }
 740         *(unsigned long *) from_page = from;
 741         *(unsigned long *) to_page = to;
 742         invalidate();
 743         return 1;
 744 }
 745 
 746 /*
 747  * share_page() tries to find a process that could share a page with
 748  * the current one. Address is the address of the wanted page relative
 749  * to the current data space.
 750  *
 751  * We first check if it is at all feasible by checking executable->i_count.
 752  * It should be >1 if there are other tasks sharing this inode.
 753  */
 754 int share_page(struct vm_area_struct * area, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 755         struct inode * inode,
 756         unsigned long address, unsigned long error_code, unsigned long newpage)
 757 {
 758         struct task_struct ** p;
 759 
 760         if (!inode || inode->i_count < 2 || !area->vm_ops)
 761                 return 0;
 762         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 763                 if (!*p)
 764                         continue;
 765                 if (tsk == *p)
 766                         continue;
 767                 if (inode != (*p)->executable) {
 768                           if(!area) continue;
 769                         /* Now see if there is something in the VMM that
 770                            we can share pages with */
 771                         if(area){
 772                           struct vm_area_struct * mpnt;
 773                           for (mpnt = (*p)->mmap; mpnt; mpnt = mpnt->vm_next) {
 774                             if (mpnt->vm_ops == area->vm_ops &&
 775                                mpnt->vm_inode->i_ino == area->vm_inode->i_ino&&
 776                                mpnt->vm_inode->i_dev == area->vm_inode->i_dev){
 777                               if (mpnt->vm_ops->share(mpnt, area, address))
 778                                 break;
 779                             };
 780                           };
 781                           if (!mpnt) continue;  /* Nope.  Nuthin here */
 782                         };
 783                 }
 784                 if (try_to_share(address,tsk,*p,error_code,newpage))
 785                         return 1;
 786         }
 787         return 0;
 788 }
 789 
 790 /*
 791  * fill in an empty page-table if none exists.
 792  */
 793 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 794 {
 795         unsigned long page;
 796         unsigned long *p;
 797 
 798         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 799         if (PAGE_PRESENT & *p)
 800                 return *p;
 801         if (*p) {
 802                 printk("get_empty_pgtable: bad page-directory entry \n");
 803                 *p = 0;
 804         }
 805         page = get_free_page(GFP_KERNEL);
 806         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 807         if (PAGE_PRESENT & *p) {
 808                 free_page(page);
 809                 return *p;
 810         }
 811         if (*p) {
 812                 printk("get_empty_pgtable: bad page-directory entry \n");
 813                 *p = 0;
 814         }
 815         if (page) {
 816                 *p = page | PAGE_TABLE;
 817                 return *p;
 818         }
 819         oom(current);
 820         *p = BAD_PAGETABLE | PAGE_TABLE;
 821         return 0;
 822 }
 823 
 824 void do_no_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 825         struct task_struct *tsk, unsigned long user_esp)
 826 {
 827         unsigned long tmp;
 828         unsigned long page;
 829         struct vm_area_struct * mpnt;
 830 
 831         page = get_empty_pgtable(tsk,address);
 832         if (!page)
 833                 return;
 834         page &= PAGE_MASK;
 835         page += PAGE_PTR(address);
 836         tmp = *(unsigned long *) page;
 837         if (tmp & PAGE_PRESENT)
 838                 return;
 839         ++tsk->rss;
 840         if (tmp) {
 841                 ++tsk->maj_flt;
 842                 swap_in((unsigned long *) page);
 843                 return;
 844         }
 845         address &= 0xfffff000;
 846         tmp = 0;
 847         for (mpnt = tsk->mmap; mpnt != NULL; mpnt = mpnt->vm_next) {
 848                 if (address < mpnt->vm_start)
 849                         break;
 850                 if (address >= mpnt->vm_end) {
 851                         tmp = mpnt->vm_end;
 852                         continue;
 853                 }
 854                 if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) {
 855                         ++tsk->min_flt;
 856                         get_empty_page(tsk,address);
 857                         return;
 858                 }
 859                 mpnt->vm_ops->nopage(error_code, mpnt, address);
 860                 return;
 861         }
 862         if (tsk != current)
 863                 goto ok_no_page;
 864         if (address >= tsk->end_data && address < tsk->brk)
 865                 goto ok_no_page;
 866         if (mpnt && mpnt == tsk->stk_vma &&
 867             address - tmp > mpnt->vm_start - address &&
 868             tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) {
 869                 mpnt->vm_start = address;
 870                 goto ok_no_page;
 871         }
 872         tsk->tss.cr2 = address;
 873         current->tss.error_code = error_code;
 874         current->tss.trap_no = 14;
 875         send_sig(SIGSEGV,tsk,1);
 876         if (error_code & 4)     /* user level access? */
 877                 return;
 878 ok_no_page:
 879         ++tsk->min_flt;
 880         get_empty_page(tsk,address);
 881 }
 882 
 883 /*
 884  * This routine handles page faults.  It determines the address,
 885  * and the problem, and then passes it off to one of the appropriate
 886  * routines.
 887  */
 888 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 889 {
 890         unsigned long address;
 891         unsigned long user_esp = 0;
 892         unsigned int bit;
 893 
 894         /* get the address */
 895         __asm__("movl %%cr2,%0":"=r" (address));
 896         if (address < TASK_SIZE) {
 897                 if (error_code & 4) {   /* user mode access? */
 898                         if (regs->eflags & VM_MASK) {
 899                                 bit = (address - 0xA0000) >> PAGE_SHIFT;
 900                                 if (bit < 32)
 901                                         current->screen_bitmap |= 1 << bit;
 902                         } else 
 903                                 user_esp = regs->esp;
 904                 }
 905                 if (error_code & PAGE_PRESENT)
 906                         do_wp_page(error_code, address, current, user_esp);
 907                 else
 908                         do_no_page(error_code, address, current, user_esp);
 909                 return;
 910         }
 911         address -= TASK_SIZE;
 912         if (wp_works_ok < 0 && address == 0 && (error_code & PAGE_PRESENT)) {
 913                 wp_works_ok = 1;
 914                 pg0[0] = PAGE_SHARED;
 915                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
 916                 return;
 917         }
 918         if (address < PAGE_SIZE) {
 919                 printk("Unable to handle kernel NULL pointer dereference");
 920                 pg0[0] = PAGE_SHARED;
 921         } else
 922                 printk("Unable to handle kernel paging request");
 923         printk(" at address %08lx\n",address);
 924         die_if_kernel("Oops", regs, error_code);
 925         do_exit(SIGKILL);
 926 }
 927 
 928 /*
 929  * BAD_PAGE is the page that is used for page faults when linux
 930  * is out-of-memory. Older versions of linux just did a
 931  * do_exit(), but using this instead means there is less risk
 932  * for a process dying in kernel mode, possibly leaving a inode
 933  * unused etc..
 934  *
 935  * BAD_PAGETABLE is the accompanying page-table: it is initialized
 936  * to point to BAD_PAGE entries.
 937  *
 938  * ZERO_PAGE is a special page that is used for zero-initialized
 939  * data and COW.
 940  */
 941 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 942 {
 943         extern char empty_bad_page_table[PAGE_SIZE];
 944 
 945         __asm__ __volatile__("cld ; rep ; stosl":
 946                 :"a" (BAD_PAGE + PAGE_TABLE),
 947                  "D" ((long) empty_bad_page_table),
 948                  "c" (PTRS_PER_PAGE)
 949                 :"di","cx");
 950         return (unsigned long) empty_bad_page_table;
 951 }
 952 
 953 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 954 {
 955         extern char empty_bad_page[PAGE_SIZE];
 956 
 957         __asm__ __volatile__("cld ; rep ; stosl":
 958                 :"a" (0),
 959                  "D" ((long) empty_bad_page),
 960                  "c" (PTRS_PER_PAGE)
 961                 :"di","cx");
 962         return (unsigned long) empty_bad_page;
 963 }
 964 
 965 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 966 {
 967         extern char empty_zero_page[PAGE_SIZE];
 968 
 969         __asm__ __volatile__("cld ; rep ; stosl":
 970                 :"a" (0),
 971                  "D" ((long) empty_zero_page),
 972                  "c" (PTRS_PER_PAGE)
 973                 :"di","cx");
 974         return (unsigned long) empty_zero_page;
 975 }
 976 
 977 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 978 {
 979         int i,free = 0,total = 0,reserved = 0;
 980         int shared = 0;
 981 
 982         printk("Mem-info:\n");
 983         show_free_areas();
 984         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 985         i = high_memory >> PAGE_SHIFT;
 986         while (i-- > 0) {
 987                 total++;
 988                 if (mem_map[i] & MAP_PAGE_RESERVED)
 989                         reserved++;
 990                 else if (!mem_map[i])
 991                         free++;
 992                 else
 993                         shared += mem_map[i]-1;
 994         }
 995         printk("%d pages of RAM\n",total);
 996         printk("%d free pages\n",free);
 997         printk("%d reserved pages\n",reserved);
 998         printk("%d pages shared\n",shared);
 999         show_buffers();
1000 }
1001 
1002 /*
1003  * paging_init() sets up the page tables - note that the first 4MB are
1004  * already mapped by head.S.
1005  *
1006  * This routines also unmaps the page at virtual kernel address 0, so
1007  * that we can trap those pesky NULL-reference errors in the kernel.
1008  */
1009 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1010 {
1011         unsigned long * pg_dir;
1012         unsigned long * pg_table;
1013         unsigned long tmp;
1014         unsigned long address;
1015 
1016 /*
1017  * Physical page 0 is special; it's not touched by Linux since BIOS
1018  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
1019  * and write protected to detect null pointer references in the
1020  * kernel.
1021  */
1022 #if 0
1023         memset((void *) 0, 0, PAGE_SIZE);
1024 #endif
1025         start_mem = PAGE_ALIGN(start_mem);
1026         address = 0;
1027         pg_dir = swapper_pg_dir;
1028         while (address < end_mem) {
1029                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1030                 if (!tmp) {
1031                         tmp = start_mem | PAGE_TABLE;
1032                         *(pg_dir + 768) = tmp;
1033                         start_mem += PAGE_SIZE;
1034                 }
1035                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1036                 pg_dir++;
1037                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1038                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1039                         if (address < end_mem)
1040                                 *pg_table = address | PAGE_SHARED;
1041                         else
1042                                 *pg_table = 0;
1043                         address += PAGE_SIZE;
1044                 }
1045         }
1046         invalidate();
1047         return start_mem;
1048 }
1049 
1050 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1051               unsigned long start_mem, unsigned long end_mem)
1052 {
1053         int codepages = 0;
1054         int reservedpages = 0;
1055         int datapages = 0;
1056         unsigned long tmp, mask;
1057         unsigned short * p;
1058         extern int etext;
1059 
1060         cli();
1061         end_mem &= PAGE_MASK;
1062         high_memory = end_mem;
1063         start_mem +=  0x0000000f;
1064         start_mem &= ~0x0000000f;
1065         tmp = MAP_NR(end_mem);
1066         mem_map = (unsigned short *) start_mem;
1067         p = mem_map + tmp;
1068         start_mem = (unsigned long) p;
1069         while (p > mem_map)
1070                 *--p = MAP_PAGE_RESERVED;
1071 
1072         /* set up the free-area data structures */
1073         for (mask = PAGE_MASK, tmp = 0 ; tmp < NR_MEM_LISTS ; tmp++, mask <<= 1) {
1074                 unsigned long bitmap_size;
1075                 free_area_list[tmp].prev = free_area_list[tmp].next = &free_area_list[tmp];
1076                 end_mem = (end_mem + ~mask) & mask;
1077                 bitmap_size = end_mem >> (PAGE_SHIFT + tmp);
1078                 bitmap_size = (bitmap_size + 7) >> 3;
1079                 free_area_map[tmp] = (unsigned char *) start_mem;
1080                 memset((void *) start_mem, 0, bitmap_size);
1081                 start_mem += bitmap_size;
1082         }
1083 
1084         /* mark usable pages in the mem_map[] */
1085         start_low_mem = PAGE_ALIGN(start_low_mem);
1086         start_mem = PAGE_ALIGN(start_mem);
1087 
1088         /*
1089          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
1090          * They seem to have done something stupid with the floppy
1091          * controller as well..
1092          */
1093         while (start_low_mem < 0x9f000) {
1094                 mem_map[MAP_NR(start_low_mem)] = 0;
1095                 start_low_mem += PAGE_SIZE;
1096         }
1097 
1098         while (start_mem < high_memory) {
1099                 mem_map[MAP_NR(start_mem)] = 0;
1100                 start_mem += PAGE_SIZE;
1101         }
1102 #ifdef CONFIG_SOUND
1103         sound_mem_init();
1104 #endif
1105         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
1106                 if (mem_map[MAP_NR(tmp)]) {
1107                         if (tmp >= 0xA0000 && tmp < 0x100000)
1108                                 reservedpages++;
1109                         else if (tmp < (unsigned long) &etext)
1110                                 codepages++;
1111                         else
1112                                 datapages++;
1113                         continue;
1114                 }
1115                 mem_map[MAP_NR(tmp)] = 1;
1116                 free_page(tmp);
1117         }
1118         tmp = nr_free_pages << PAGE_SHIFT;
1119         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1120                 tmp >> 10,
1121                 high_memory >> 10,
1122                 codepages << (PAGE_SHIFT-10),
1123                 reservedpages << (PAGE_SHIFT-10),
1124                 datapages << (PAGE_SHIFT-10));
1125 /* test if the WP bit is honoured in supervisor mode */
1126         wp_works_ok = -1;
1127         pg0[0] = PAGE_READONLY;
1128         invalidate();
1129         __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1130         pg0[0] = 0;
1131         invalidate();
1132         if (wp_works_ok < 0)
1133                 wp_works_ok = 0;
1134         return;
1135 }
1136 
1137 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1138 {
1139         int i;
1140 
1141         i = high_memory >> PAGE_SHIFT;
1142         val->totalram = 0;
1143         val->sharedram = 0;
1144         val->freeram = nr_free_pages << PAGE_SHIFT;
1145         val->bufferram = buffermem;
1146         while (i-- > 0)  {
1147                 if (mem_map[i] & MAP_PAGE_RESERVED)
1148                         continue;
1149                 val->totalram++;
1150                 if (!mem_map[i])
1151                         continue;
1152                 val->sharedram += mem_map[i]-1;
1153         }
1154         val->totalram <<= PAGE_SHIFT;
1155         val->sharedram <<= PAGE_SHIFT;
1156         return;
1157 }
1158 
1159 
1160 /* This handles a generic mmap of a disk file */
1161 void file_mmap_nopage(int error_code, struct vm_area_struct * area, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1162 {
1163         struct inode * inode = area->vm_inode;
1164         unsigned int block;
1165         unsigned long page;
1166         int nr[8];
1167         int i, j;
1168         int prot = area->vm_page_prot;
1169 
1170         address &= PAGE_MASK;
1171         block = address - area->vm_start + area->vm_offset;
1172         block >>= inode->i_sb->s_blocksize_bits;
1173 
1174         page = get_free_page(GFP_KERNEL);
1175         if (share_page(area, area->vm_task, inode, address, error_code, page)) {
1176                 ++area->vm_task->min_flt;
1177                 return;
1178         }
1179 
1180         ++area->vm_task->maj_flt;
1181         if (!page) {
1182                 oom(current);
1183                 put_page(area->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
1184                 return;
1185         }
1186         for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
1187                 nr[j] = bmap(inode,block);
1188         if (error_code & PAGE_RW)
1189                 prot |= PAGE_RW | PAGE_DIRTY;
1190         page = bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, prot);
1191 
1192         if (!(prot & PAGE_RW)) {
1193                 if (share_page(area, area->vm_task, inode, address, error_code, page))
1194                         return;
1195         }
1196         if (put_page(area->vm_task,page,address,prot))
1197                 return;
1198         free_page(page);
1199         oom(current);
1200 }
1201 
1202 void file_mmap_free(struct vm_area_struct * area)
     /* [previous][next][first][last][top][bottom][index][help] */
1203 {
1204         if (area->vm_inode)
1205                 iput(area->vm_inode);
1206 #if 0
1207         if (area->vm_inode)
1208                 printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev, 
1209                                  area->vm_inode->i_ino, area->vm_inode->i_count);
1210 #endif
1211 }
1212 
1213 /*
1214  * Compare the contents of the mmap entries, and decide if we are allowed to
1215  * share the pages
1216  */
1217 int file_mmap_share(struct vm_area_struct * area1, 
     /* [previous][next][first][last][top][bottom][index][help] */
1218                     struct vm_area_struct * area2, 
1219                     unsigned long address)
1220 {
1221         if (area1->vm_inode != area2->vm_inode)
1222                 return 0;
1223         if (area1->vm_start != area2->vm_start)
1224                 return 0;
1225         if (area1->vm_end != area2->vm_end)
1226                 return 0;
1227         if (area1->vm_offset != area2->vm_offset)
1228                 return 0;
1229         if (area1->vm_page_prot != area2->vm_page_prot)
1230                 return 0;
1231         return 1;
1232 }
1233 
1234 struct vm_operations_struct file_mmap = {
1235         NULL,                   /* open */
1236         file_mmap_free,         /* close */
1237         file_mmap_nopage,       /* nopage */
1238         NULL,                   /* wppage */
1239         file_mmap_share,        /* share */
1240         NULL,                   /* unmap */
1241 };

/* [previous][next][first][last][top][bottom][index][help] */