root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. __do_wp_page
  13. do_wp_page
  14. __verify_write
  15. verify_area
  16. get_empty_page
  17. try_to_share
  18. share_page
  19. get_empty_pgtable
  20. handle_no_page
  21. do_no_page
  22. do_page_fault
  23. __bad_pagetable
  24. __bad_page
  25. __zero_page
  26. show_mem
  27. paging_init
  28. mem_init
  29. si_meminfo
  30. file_mmap_nopage
  31. file_mmap_free
  32. file_mmap_share

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 /*
  32  * 05.04.94  -  Multi-page memory management added for v1.1.
  33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  34  */
  35 
  36 #include <asm/system.h>
  37 #include <linux/config.h>
  38 
  39 #include <linux/signal.h>
  40 #include <linux/sched.h>
  41 #include <linux/head.h>
  42 #include <linux/kernel.h>
  43 #include <linux/errno.h>
  44 #include <linux/string.h>
  45 #include <linux/types.h>
  46 #include <linux/ptrace.h>
  47 #include <linux/mman.h>
  48 #include <linux/segment.h>
  49 #include <asm/segment.h>
  50 
  51 /*
  52  * Define this if things work differently on a i386 and a i486:
  53  * it will (on a i486) warn about kernel memory accesses that are
  54  * done without a 'verify_area(VERIFY_WRITE,..)'
  55  */
  56 #undef CONFIG_TEST_VERIFY_AREA
  57 
  58 unsigned long high_memory = 0;
  59 
  60 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  61 
  62 extern void sound_mem_init(void);
  63 extern void die_if_kernel(char *,struct pt_regs *,long);
  64 extern void show_net_buffers(void);
  65 
  66 /*
  67  * The free_area_list arrays point to the queue heads of the free areas
  68  * of different sizes
  69  */
  70 int nr_swap_pages = 0;
  71 int nr_free_pages = 0;
  72 struct mem_list free_area_list[NR_MEM_LISTS];
  73 unsigned char * free_area_map[NR_MEM_LISTS];
  74 
  75 #define copy_page(from,to) \
  76 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
  77 
  78 unsigned short * mem_map = NULL;
  79 
  80 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  81 
  82 /*
  83  * oom() prints a message (so that the user knows why the process died),
  84  * and gives the process an untrappable SIGKILL.
  85  */
  86 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         printk("\nOut of memory.\n");
  89         task->sigaction[SIGKILL-1].sa_handler = NULL;
  90         task->blocked &= ~(1<<(SIGKILL-1));
  91         send_sig(SIGKILL,task,1);
  92 }
  93 
  94 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96         int j;
  97         unsigned long pg_table = *page_dir;
  98         unsigned long * page_table;
  99 
 100         if (!pg_table)
 101                 return;
 102         *page_dir = 0;
 103         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
 104                 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
 105                 return;
 106         }
 107         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
 108                 return;
 109         page_table = (unsigned long *) (pg_table & PAGE_MASK);
 110         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
 111                 unsigned long pg = *page_table;
 112                 
 113                 if (!pg)
 114                         continue;
 115                 *page_table = 0;
 116                 if (pg & PAGE_PRESENT)
 117                         free_page(PAGE_MASK & pg);
 118                 else
 119                         swap_free(pg);
 120         }
 121         free_page(PAGE_MASK & pg_table);
 122 }
 123 
 124 /*
 125  * This function clears all user-level page tables of a process - this
 126  * is needed by execve(), so that old pages aren't in the way. Note that
 127  * unlike 'free_page_tables()', this function still leaves a valid
 128  * page-table-tree in memory: it just removes the user pages. The two
 129  * functions are similar, but there is a fundamental difference.
 130  */
 131 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         int i;
 134         unsigned long pg_dir;
 135         unsigned long * page_dir;
 136 
 137         if (!tsk)
 138                 return;
 139         if (tsk == task[0])
 140                 panic("task[0] (swapper) doesn't support exec()\n");
 141         pg_dir = tsk->tss.cr3;
 142         page_dir = (unsigned long *) pg_dir;
 143         if (!page_dir || page_dir == swapper_pg_dir) {
 144                 printk("Trying to clear kernel page-directory: not good\n");
 145                 return;
 146         }
 147         if (mem_map[MAP_NR(pg_dir)] > 1) {
 148                 unsigned long * new_pg;
 149 
 150                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 151                         oom(tsk);
 152                         return;
 153                 }
 154                 for (i = 768 ; i < 1024 ; i++)
 155                         new_pg[i] = page_dir[i];
 156                 free_page(pg_dir);
 157                 tsk->tss.cr3 = (unsigned long) new_pg;
 158                 return;
 159         }
 160         for (i = 0 ; i < 768 ; i++,page_dir++)
 161                 free_one_table(page_dir);
 162         invalidate();
 163         return;
 164 }
 165 
 166 /*
 167  * This function frees up all page tables of a process when it exits.
 168  */
 169 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         int i;
 172         unsigned long pg_dir;
 173         unsigned long * page_dir;
 174 
 175         if (!tsk)
 176                 return;
 177         if (tsk == task[0]) {
 178                 printk("task[0] (swapper) killed: unable to recover\n");
 179                 panic("Trying to free up swapper memory space");
 180         }
 181         pg_dir = tsk->tss.cr3;
 182         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 183                 printk("Trying to free kernel page-directory: not good\n");
 184                 return;
 185         }
 186         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 187         if (tsk == current)
 188                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 189         if (mem_map[MAP_NR(pg_dir)] > 1) {
 190                 free_page(pg_dir);
 191                 return;
 192         }
 193         page_dir = (unsigned long *) pg_dir;
 194         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 195                 free_one_table(page_dir);
 196         free_page(pg_dir);
 197         invalidate();
 198 }
 199 
 200 /*
 201  * clone_page_tables() clones the page table for a process - both
 202  * processes will have the exact same pages in memory. There are
 203  * probably races in the memory management with cloning, but we'll
 204  * see..
 205  */
 206 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         unsigned long pg_dir;
 209 
 210         pg_dir = current->tss.cr3;
 211         mem_map[MAP_NR(pg_dir)]++;
 212         tsk->tss.cr3 = pg_dir;
 213         return 0;
 214 }
 215 
 216 /*
 217  * copy_page_tables() just copies the whole process memory range:
 218  * note the special handling of RESERVED (ie kernel) pages, which
 219  * means that they are always shared by all processes.
 220  */
 221 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int i;
 224         unsigned long old_pg_dir, *old_page_dir;
 225         unsigned long new_pg_dir, *new_page_dir;
 226 
 227         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 228                 return -ENOMEM;
 229         old_pg_dir = current->tss.cr3;
 230         tsk->tss.cr3 = new_pg_dir;
 231         old_page_dir = (unsigned long *) old_pg_dir;
 232         new_page_dir = (unsigned long *) new_pg_dir;
 233         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 234                 int j;
 235                 unsigned long old_pg_table, *old_page_table;
 236                 unsigned long new_pg_table, *new_page_table;
 237 
 238                 old_pg_table = *old_page_dir;
 239                 if (!old_pg_table)
 240                         continue;
 241                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 242                         printk("copy_page_tables: bad page table: "
 243                                 "probable memory corruption");
 244                         *old_page_dir = 0;
 245                         continue;
 246                 }
 247                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 248                         *new_page_dir = old_pg_table;
 249                         continue;
 250                 }
 251                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 252                         free_page_tables(tsk);
 253                         return -ENOMEM;
 254                 }
 255                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 256                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 257                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 258                         unsigned long pg;
 259                         pg = *old_page_table;
 260                         if (!pg)
 261                                 continue;
 262                         if (!(pg & PAGE_PRESENT)) {
 263                                 *new_page_table = swap_duplicate(pg);
 264                                 continue;
 265                         }
 266                         if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
 267                                 pg &= ~PAGE_RW;
 268                         *new_page_table = pg;
 269                         if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
 270                                 continue;
 271                         *old_page_table = pg;
 272                         mem_map[MAP_NR(pg)]++;
 273                 }
 274                 *new_page_dir = new_pg_table | PAGE_TABLE;
 275         }
 276         invalidate();
 277         return 0;
 278 }
 279 
 280 /*
 281  * a more complete version of free_page_tables which performs with page
 282  * granularity.
 283  */
 284 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 {
 286         unsigned long page, page_dir;
 287         unsigned long *page_table, *dir;
 288         unsigned long poff, pcnt, pc;
 289 
 290         if (from & ~PAGE_MASK) {
 291                 printk("unmap_page_range called with wrong alignment\n");
 292                 return -EINVAL;
 293         }
 294         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 295         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 296         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 297         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 298                 pcnt = size;
 299 
 300         for ( ; size > 0; ++dir, size -= pcnt,
 301              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 302                 if (!(page_dir = *dir)) {
 303                         poff = 0;
 304                         continue;
 305                 }
 306                 if (!(page_dir & PAGE_PRESENT)) {
 307                         printk("unmap_page_range: bad page directory.");
 308                         continue;
 309                 }
 310                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 311                 if (poff) {
 312                         page_table += poff;
 313                         poff = 0;
 314                 }
 315                 for (pc = pcnt; pc--; page_table++) {
 316                         if ((page = *page_table) != 0) {
 317                                 *page_table = 0;
 318                                 if (1 & page) {
 319                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 320                                                 if (current->mm->rss > 0)
 321                                                         --current->mm->rss;
 322                                         free_page(PAGE_MASK & page);
 323                                 } else
 324                                         swap_free(page);
 325                         }
 326                 }
 327                 if (pcnt == PTRS_PER_PAGE) {
 328                         *dir = 0;
 329                         free_page(PAGE_MASK & page_dir);
 330                 }
 331         }
 332         invalidate();
 333         return 0;
 334 }
 335 
 336 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 337 {
 338         unsigned long *page_table, *dir;
 339         unsigned long poff, pcnt;
 340         unsigned long page;
 341 
 342         if (mask) {
 343                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 344                         printk("zeromap_page_range: mask = %08x\n",mask);
 345                         return -EINVAL;
 346                 }
 347                 mask |= ZERO_PAGE;
 348         }
 349         if (from & ~PAGE_MASK) {
 350                 printk("zeromap_page_range: from = %08lx\n",from);
 351                 return -EINVAL;
 352         }
 353         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 354         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 355         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 356         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 357                 pcnt = size;
 358 
 359         while (size > 0) {
 360                 if (!(PAGE_PRESENT & *dir)) {
 361                                 /* clear page needed here?  SRB. */
 362                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 363                                 invalidate();
 364                                 return -ENOMEM;
 365                         }
 366                         if (PAGE_PRESENT & *dir) {
 367                                 free_page((unsigned long) page_table);
 368                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 369                         } else
 370                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 371                 } else
 372                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 373                 page_table += poff;
 374                 poff = 0;
 375                 for (size -= pcnt; pcnt-- ;) {
 376                         if ((page = *page_table) != 0) {
 377                                 *page_table = 0;
 378                                 if (page & PAGE_PRESENT) {
 379                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 380                                                 if (current->mm->rss > 0)
 381                                                         --current->mm->rss;
 382                                         free_page(PAGE_MASK & page);
 383                                 } else
 384                                         swap_free(page);
 385                         }
 386                         *page_table++ = mask;
 387                 }
 388                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 389         }
 390         invalidate();
 391         return 0;
 392 }
 393 
 394 /*
 395  * maps a range of physical memory into the requested pages. the old
 396  * mappings are removed. any references to nonexistent pages results
 397  * in null mappings (currently treated as "copy-on-access")
 398  */
 399 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 400 {
 401         unsigned long *page_table, *dir;
 402         unsigned long poff, pcnt;
 403         unsigned long page;
 404 
 405         if (mask) {
 406                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 407                         printk("remap_page_range: mask = %08x\n",mask);
 408                         return -EINVAL;
 409                 }
 410         }
 411         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 412                 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
 413                 return -EINVAL;
 414         }
 415         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 416         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 417         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 418         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 419                 pcnt = size;
 420 
 421         while (size > 0) {
 422                 if (!(PAGE_PRESENT & *dir)) {
 423                         /* clearing page here, needed?  SRB. */
 424                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 425                                 invalidate();
 426                                 return -1;
 427                         }
 428                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 429                 }
 430                 else
 431                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 432                 if (poff) {
 433                         page_table += poff;
 434                         poff = 0;
 435                 }
 436 
 437                 for (size -= pcnt; pcnt-- ;) {
 438                         if ((page = *page_table) != 0) {
 439                                 *page_table = 0;
 440                                 if (PAGE_PRESENT & page) {
 441                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 442                                                 if (current->mm->rss > 0)
 443                                                         --current->mm->rss;
 444                                         free_page(PAGE_MASK & page);
 445                                 } else
 446                                         swap_free(page);
 447                         }
 448 
 449                         /*
 450                          * the first condition should return an invalid access
 451                          * when the page is referenced. current assumptions
 452                          * cause it to be treated as demand allocation in some
 453                          * cases.
 454                          */
 455                         if (!mask)
 456                                 *page_table++ = 0;      /* not present */
 457                         else if (to >= high_memory)
 458                                 *page_table++ = (to | mask);
 459                         else if (!mem_map[MAP_NR(to)])
 460                                 *page_table++ = 0;      /* not present */
 461                         else {
 462                                 *page_table++ = (to | mask);
 463                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 464                                         ++current->mm->rss;
 465                                         mem_map[MAP_NR(to)]++;
 466                                 }
 467                         }
 468                         to += PAGE_SIZE;
 469                 }
 470                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 471         }
 472         invalidate();
 473         return 0;
 474 }
 475 
 476 /*
 477  * This function puts a page in memory at the wanted address.
 478  * It returns the physical address of the page gotten, 0 if
 479  * out of memory (either when trying to access page-table or
 480  * page.)
 481  */
 482 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 483         unsigned long address,int prot)
 484 {
 485         unsigned long *page_table;
 486 
 487         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 488                 printk("put_page: prot = %08x\n",prot);
 489         if (page >= high_memory) {
 490                 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
 491                 return 0;
 492         }
 493         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 494         if ((*page_table) & PAGE_PRESENT)
 495                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 496         else {
 497                 printk("put_page: bad page directory entry\n");
 498                 oom(tsk);
 499                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 500                 return 0;
 501         }
 502         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 503         if (*page_table) {
 504                 printk("put_page: page already exists\n");
 505                 *page_table = 0;
 506                 invalidate();
 507         }
 508         *page_table = page | prot;
 509 /* no need for invalidate */
 510         return page;
 511 }
 512 
 513 /*
 514  * The previous function doesn't work very well if you also want to mark
 515  * the page dirty: exec.c wants this, as it has earlier changed the page,
 516  * and we want the dirty-status to be correct (for VM). Thus the same
 517  * routine, but this time we mark it dirty too.
 518  */
 519 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 520 {
 521         unsigned long tmp, *page_table;
 522 
 523         if (page >= high_memory)
 524                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 525         if (mem_map[MAP_NR(page)] != 1)
 526                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 527         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 528         if (PAGE_PRESENT & *page_table)
 529                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 530         else {
 531                 if (!(tmp = get_free_page(GFP_KERNEL)))
 532                         return 0;
 533                 if (PAGE_PRESENT & *page_table) {
 534                         free_page(tmp);
 535                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 536                 } else {
 537                         *page_table = tmp | PAGE_TABLE;
 538                         page_table = (unsigned long *) tmp;
 539                 }
 540         }
 541         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 542         if (*page_table) {
 543                 printk("put_dirty_page: page already exists\n");
 544                 *page_table = 0;
 545                 invalidate();
 546         }
 547         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 548 /* no need for invalidate */
 549         return page;
 550 }
 551 
 552 /*
 553  * This routine handles present pages, when users try to write
 554  * to a shared page. It is done by copying the page to a new address
 555  * and decrementing the shared-page counter for the old page.
 556  *
 557  * Note that we do many checks twice (look at do_wp_page()), as
 558  * we have to be careful about race-conditions.
 559  *
 560  * Goto-purists beware: the only reason for goto's here is that it results
 561  * in better assembly code.. The "default" path will see no jumps at all.
 562  */
 563 static void __do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 564         struct task_struct * tsk)
 565 {
 566         unsigned long *pde, pte, old_page, prot;
 567         unsigned long new_page;
 568 
 569         new_page = __get_free_page(GFP_KERNEL);
 570         pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 571         pte = *pde;
 572         if (!(pte & PAGE_PRESENT))
 573                 goto end_wp_page;
 574         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 575                 goto bad_wp_pagetable;
 576         pte &= PAGE_MASK;
 577         pte += PAGE_PTR(address);
 578         old_page = *(unsigned long *) pte;
 579         if (!(old_page & PAGE_PRESENT))
 580                 goto end_wp_page;
 581         if (old_page >= high_memory)
 582                 goto bad_wp_page;
 583         if (old_page & PAGE_RW)
 584                 goto end_wp_page;
 585         tsk->mm->min_flt++;
 586         prot = (old_page & ~PAGE_MASK) | PAGE_RW;
 587         old_page &= PAGE_MASK;
 588         if (mem_map[MAP_NR(old_page)] != 1) {
 589                 if (new_page) {
 590                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 591                                 ++tsk->mm->rss;
 592                         copy_page(old_page,new_page);
 593                         *(unsigned long *) pte = new_page | prot;
 594                         free_page(old_page);
 595                         invalidate();
 596                         return;
 597                 }
 598                 free_page(old_page);
 599                 oom(tsk);
 600                 *(unsigned long *) pte = BAD_PAGE | prot;
 601                 invalidate();
 602                 return;
 603         }
 604         *(unsigned long *) pte |= PAGE_RW;
 605         invalidate();
 606         if (new_page)
 607                 free_page(new_page);
 608         return;
 609 bad_wp_page:
 610         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 611         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 612         send_sig(SIGKILL, tsk, 1);
 613         goto end_wp_page;
 614 bad_wp_pagetable:
 615         printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
 616         *pde = BAD_PAGETABLE | PAGE_TABLE;
 617         send_sig(SIGKILL, tsk, 1);
 618 end_wp_page:
 619         if (new_page)
 620                 free_page(new_page);
 621         return;
 622 }
 623 
 624 /*
 625  * check that a page table change is actually needed, and call
 626  * the low-level function only in that case..
 627  */
 628 void do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 629         struct task_struct * tsk)
 630 {
 631         unsigned long page;
 632         unsigned long * pg_table;
 633 
 634         pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 635         page = *pg_table;
 636         if (!page)
 637                 return;
 638         if ((page & PAGE_PRESENT) && page < high_memory) {
 639                 pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
 640                 page = *pg_table;
 641                 if (!(page & PAGE_PRESENT))
 642                         return;
 643                 if (page & PAGE_RW)
 644                         return;
 645                 if (!(page & PAGE_COW)) {
 646                         if ((error_code & PAGE_USER) && tsk == current) {
 647                                 current->tss.cr2 = address;
 648                                 current->tss.error_code = error_code;
 649                                 current->tss.trap_no = 14;
 650                                 send_sig(SIGSEGV, tsk, 1);
 651                                 return;
 652                         }
 653                 }
 654                 if (mem_map[MAP_NR(page)] == 1) {
 655                         *pg_table |= PAGE_RW | PAGE_DIRTY;
 656                         invalidate();
 657                         return;
 658                 }
 659                 __do_wp_page(error_code, address, tsk);
 660                 return;
 661         }
 662         printk("bad page directory entry %08lx\n",page);
 663         *pg_table = 0;
 664 }
 665 
 666 static int __verify_write(unsigned long start, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 667 {
 668         size--;
 669         size += start & ~PAGE_MASK;
 670         size >>= PAGE_SHIFT;
 671         start &= PAGE_MASK;
 672         do {
 673                 do_wp_page(1,start,current);
 674                 start += PAGE_SIZE;
 675         } while (size--);
 676         return 0;
 677 }
 678 
 679 int verify_area(int type, const void * addr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 680 {
 681         struct vm_area_struct * vma;
 682 
 683         /* If the current user space is mapped to kernel space (for the
 684          * case where we use a fake user buffer with get_fs/set_fs()) we
 685          * don't expect to find the address in the user vm map.
 686          */
 687         if (get_fs() == get_ds())
 688                 return 0;
 689 
 690         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 691                 if (!vma)
 692                         goto bad_area;
 693                 if (vma->vm_end > (unsigned long) addr)
 694                         break;
 695         }
 696         if (vma->vm_start <= (unsigned long) addr)
 697                 goto good_area;
 698         if (!(vma->vm_flags & VM_GROWSDOWN))
 699                 goto bad_area;
 700         if (vma->vm_end - (unsigned long) addr > current->rlim[RLIMIT_STACK].rlim_cur)
 701                 goto bad_area;
 702 good_area:
 703         while (vma->vm_end - (unsigned long) addr < size) {
 704                 struct vm_area_struct * next = vma->vm_next;
 705                 if (!next)
 706                         goto bad_area;
 707                 if (vma->vm_end != next->vm_start)
 708                         goto bad_area;
 709                 vma = next;
 710         }
 711         if (wp_works_ok || type == VERIFY_READ || !size)
 712                 return 0;
 713         return __verify_write((unsigned long) addr,size);
 714 bad_area:
 715         return -EFAULT;
 716 }
 717 
 718 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 719 {
 720         unsigned long tmp;
 721 
 722         if (!(tmp = get_free_page(GFP_KERNEL))) {
 723                 oom(tsk);
 724                 tmp = BAD_PAGE;
 725         }
 726         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 727                 free_page(tmp);
 728 }
 729 
 730 /*
 731  * try_to_share() checks the page at address "address" in the task "p",
 732  * to see if it exists, and if it is clean. If so, share it with the current
 733  * task.
 734  *
 735  * NOTE! This assumes we have checked that p != current, and that they
 736  * share the same inode and can generally otherwise be shared.
 737  */
 738 static int try_to_share(unsigned long to_address, struct vm_area_struct * to_area,
     /* [previous][next][first][last][top][bottom][index][help] */
 739         unsigned long from_address, struct vm_area_struct * from_area,
 740         unsigned long newpage)
 741 {
 742         unsigned long from;
 743         unsigned long to;
 744         unsigned long from_page;
 745         unsigned long to_page;
 746 
 747         from_page = (unsigned long)PAGE_DIR_OFFSET(from_area->vm_task->tss.cr3,from_address);
 748         to_page = (unsigned long)PAGE_DIR_OFFSET(to_area->vm_task->tss.cr3,to_address);
 749 /* is there a page-directory at from? */
 750         from = *(unsigned long *) from_page;
 751         if (!(from & PAGE_PRESENT))
 752                 return 0;
 753         from &= PAGE_MASK;
 754         from_page = from + PAGE_PTR(from_address);
 755         from = *(unsigned long *) from_page;
 756 /* is the page clean and present? */
 757         if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
 758                 return 0;
 759         if (from >= high_memory)
 760                 return 0;
 761         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 762                 return 0;
 763 /* is the destination ok? */
 764         to = *(unsigned long *) to_page;
 765         if (!(to & PAGE_PRESENT))
 766                 return 0;
 767         to &= PAGE_MASK;
 768         to_page = to + PAGE_PTR(to_address);
 769         if (*(unsigned long *) to_page)
 770                 return 0;
 771 /* do we copy? */
 772         if (newpage) {
 773                 copy_page((from & PAGE_MASK), newpage);
 774                 *(unsigned long *) to_page = newpage | to_area->vm_page_prot;
 775                 return 1;
 776         }
 777 /* just share them.. */
 778         mem_map[MAP_NR(from)]++;
 779 /* fill in the 'to' field, checking for COW-stuff */
 780         to = (from & PAGE_MASK) | to_area->vm_page_prot;
 781         if (to & PAGE_COW)
 782                 to &= ~PAGE_RW;
 783         *(unsigned long *) to_page = to;
 784 /* Check if we need to do anything at all to the 'from' field */
 785         if (!(from & PAGE_RW))
 786                 return 1;
 787         if (!(from_area->vm_page_prot & PAGE_COW))
 788                 return 1;
 789 /* ok, need to mark it read-only, so invalidate aany possible old TB entry */
 790         from &= ~PAGE_RW;
 791         *(unsigned long *) from_page = from;
 792         invalidate();
 793         return 1;
 794 }
 795 
 796 /*
 797  * share_page() tries to find a process that could share a page with
 798  * the current one.
 799  *
 800  * We first check if it is at all feasible by checking inode->i_count.
 801  * It should be >1 if there are other tasks sharing this inode.
 802  */
 803 static int share_page(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 804         unsigned long error_code, unsigned long newpage)
 805 {
 806         struct inode * inode;
 807         struct task_struct ** p;
 808         unsigned long offset;
 809         unsigned long from_address;
 810         unsigned long give_page;
 811 
 812         if (!area || !(inode = area->vm_inode) || inode->i_count < 2)
 813                 return 0;
 814         /* do we need to copy or can we just share? */
 815         give_page = 0;
 816         if ((area->vm_page_prot & PAGE_COW) && (error_code & PAGE_RW)) {
 817                 if (!newpage)
 818                         return 0;
 819                 give_page = newpage;
 820         }
 821         offset = address - area->vm_start + area->vm_offset;
 822         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 823                 struct vm_area_struct * mpnt;
 824                 if (!*p)
 825                         continue;
 826                 if (area->vm_task == *p)
 827                         continue;
 828                 /* Now see if there is something in the VMM that
 829                    we can share pages with */
 830                 for (mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) {
 831                         /* must be same inode */
 832                         if (mpnt->vm_inode != inode)
 833                                 continue;
 834                         /* offsets must be mutually page-aligned */
 835                         if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK)
 836                                 continue;
 837                         /* the other area must actually cover the wanted page.. */
 838                         from_address = offset + mpnt->vm_start - mpnt->vm_offset;
 839                         if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end)
 840                                 continue;
 841                         /* .. NOW we can actually try to use the same physical page */
 842                         if (!try_to_share(address, area, from_address, mpnt, give_page))
 843                                 continue;
 844                         /* free newpage if we never used it.. */
 845                         if (give_page || !newpage)
 846                                 return 1;
 847                         free_page(newpage);
 848                         return 1;
 849                 }
 850         }
 851         return 0;
 852 }
 853 
 854 /*
 855  * fill in an empty page-table if none exists.
 856  */
 857 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 858 {
 859         unsigned long page;
 860         unsigned long *p;
 861 
 862         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 863         if (PAGE_PRESENT & *p)
 864                 return *p;
 865         if (*p) {
 866                 printk("get_empty_pgtable: bad page-directory entry \n");
 867                 *p = 0;
 868         }
 869         page = get_free_page(GFP_KERNEL);
 870         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 871         if (PAGE_PRESENT & *p) {
 872                 free_page(page);
 873                 return *p;
 874         }
 875         if (*p) {
 876                 printk("get_empty_pgtable: bad page-directory entry \n");
 877                 *p = 0;
 878         }
 879         if (page) {
 880                 *p = page | PAGE_TABLE;
 881                 return *p;
 882         }
 883         oom(current);
 884         *p = BAD_PAGETABLE | PAGE_TABLE;
 885         return 0;
 886 }
 887 
 888 static void handle_no_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 889         unsigned long address, unsigned long error_code)
 890 {
 891         unsigned long page;
 892         int prot;
 893 
 894         page = get_free_page(GFP_KERNEL);
 895         if (share_page(vma, address, error_code, page)) {
 896                 ++vma->vm_task->mm->min_flt;
 897                 return;
 898         }
 899         if (!page) {
 900                 oom(current);
 901                 put_page(vma->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
 902                 return;
 903         }
 904         ++vma->vm_task->mm->maj_flt;
 905         ++vma->vm_task->mm->rss;
 906         page = vma->vm_ops->nopage(vma, address, page, error_code);
 907         if (share_page(vma, address, error_code, 0))
 908                 return;
 909         prot = vma->vm_page_prot;
 910         if ((prot & PAGE_COW) && mem_map[MAP_NR(page)] > 1)
 911                 prot &= ~PAGE_RW;
 912         if (put_page(vma->vm_task, page, address, prot))
 913                 return;
 914         free_page(page);
 915         oom(current);
 916 }
 917 
 918 void do_no_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 919         struct task_struct *tsk)
 920 {
 921         unsigned long page, tmp;
 922         struct vm_area_struct * vma;
 923 
 924         page = get_empty_pgtable(tsk,address);
 925         if (!page)
 926                 return;
 927         page &= PAGE_MASK;
 928         page += PAGE_PTR(address);
 929         tmp = *(unsigned long *) page;
 930         if (tmp & PAGE_PRESENT)
 931                 return;
 932         if (tmp) {
 933                 ++tsk->mm->rss;
 934                 ++tsk->mm->maj_flt;
 935                 swap_in((unsigned long *) page);
 936                 return;
 937         }
 938         address &= PAGE_MASK;
 939         for (vma = tsk->mm->mmap ; ; vma = vma->vm_next) {
 940                 if (!vma)
 941                         goto bad_area;
 942                 if (vma->vm_end > address)
 943                         break;
 944         }
 945         if (vma->vm_start <= address)
 946                 goto good_area;
 947         if (!(vma->vm_flags & VM_GROWSDOWN))
 948                 goto bad_area;
 949         if (vma->vm_end - address > tsk->rlim[RLIMIT_STACK].rlim_cur)
 950                 goto bad_area;
 951         vma->vm_offset -= vma->vm_start - address;
 952         vma->vm_start = address;
 953 
 954 good_area:
 955         if (!vma->vm_ops || !vma->vm_ops->nopage) {
 956                 ++tsk->mm->rss;
 957                 ++tsk->mm->min_flt;
 958                 get_empty_page(tsk,address);
 959                 return;
 960         }
 961         handle_no_page(vma, address, error_code);
 962         return;
 963 
 964 bad_area:
 965         if (tsk != current)
 966                 goto kernel_needs_bad_page;
 967         tsk->tss.cr2 = address;
 968         tsk->tss.error_code = error_code;
 969         tsk->tss.trap_no = 14;
 970         send_sig(SIGSEGV,tsk,1);
 971         if (error_code & 4)     /* user level access? */
 972                 return;
 973 
 974 kernel_needs_bad_page:
 975         ++tsk->mm->rss;
 976         ++tsk->mm->min_flt;
 977         get_empty_page(tsk,address);
 978 }
 979 
 980 /*
 981  * This routine handles page faults.  It determines the address,
 982  * and the problem, and then passes it off to one of the appropriate
 983  * routines.
 984  */
 985 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 986 {
 987         unsigned long address;
 988         unsigned long page;
 989         unsigned int bit;
 990 
 991         /* get the address */
 992         __asm__("movl %%cr2,%0":"=r" (address));
 993         if (address < TASK_SIZE) {
 994                 if (regs->eflags & VM_MASK) {
 995                         bit = (address - 0xA0000) >> PAGE_SHIFT;
 996                         if (bit < 32)
 997                                 current->screen_bitmap |= 1 << bit;
 998                 }
 999                 if (error_code & PAGE_PRESENT) {
1000 #ifdef CONFIG_TEST_VERIFY_AREA
1001                         if (regs->cs == KERNEL_CS)
1002                                 printk("WP fault at %08x\n", regs->eip);
1003 #endif
1004                         do_wp_page(error_code, address, current);
1005                 } else {
1006                         do_no_page(error_code, address, current);
1007                 }
1008                 return;
1009         }
1010         address -= TASK_SIZE;
1011         if (wp_works_ok < 0 && address == 0 && (error_code & PAGE_PRESENT)) {
1012                 wp_works_ok = 1;
1013                 pg0[0] = PAGE_SHARED;
1014                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
1015                 return;
1016         }
1017         if (address < PAGE_SIZE) {
1018                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
1019                 pg0[0] = PAGE_SHARED;
1020         } else
1021                 printk(KERN_ALERT "Unable to handle kernel paging request");
1022         printk(" at kernel address %08lx\n",address);
1023         address += TASK_SIZE;
1024         __asm__("movl %%cr3,%0" : "=r" (page));
1025         printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
1026                 current->tss.cr3, page);
1027         page = ((unsigned long *) page)[address >> 22];
1028         printk(KERN_ALERT "*pde = %08lx\n", page);
1029         if (page & PAGE_PRESENT) {
1030                 page &= PAGE_MASK;
1031                 address &= 0x003ff000;
1032                 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
1033                 printk(KERN_ALERT "*pte = %08lx\n", page);
1034         }
1035         die_if_kernel("Oops", regs, error_code);
1036         do_exit(SIGKILL);
1037 }
1038 
1039 /*
1040  * BAD_PAGE is the page that is used for page faults when linux
1041  * is out-of-memory. Older versions of linux just did a
1042  * do_exit(), but using this instead means there is less risk
1043  * for a process dying in kernel mode, possibly leaving a inode
1044  * unused etc..
1045  *
1046  * BAD_PAGETABLE is the accompanying page-table: it is initialized
1047  * to point to BAD_PAGE entries.
1048  *
1049  * ZERO_PAGE is a special page that is used for zero-initialized
1050  * data and COW.
1051  */
1052 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1053 {
1054         extern char empty_bad_page_table[PAGE_SIZE];
1055 
1056         __asm__ __volatile__("cld ; rep ; stosl":
1057                 :"a" (BAD_PAGE + PAGE_TABLE),
1058                  "D" ((long) empty_bad_page_table),
1059                  "c" (PTRS_PER_PAGE)
1060                 :"di","cx");
1061         return (unsigned long) empty_bad_page_table;
1062 }
1063 
1064 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1065 {
1066         extern char empty_bad_page[PAGE_SIZE];
1067 
1068         __asm__ __volatile__("cld ; rep ; stosl":
1069                 :"a" (0),
1070                  "D" ((long) empty_bad_page),
1071                  "c" (PTRS_PER_PAGE)
1072                 :"di","cx");
1073         return (unsigned long) empty_bad_page;
1074 }
1075 
1076 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1077 {
1078         extern char empty_zero_page[PAGE_SIZE];
1079 
1080         __asm__ __volatile__("cld ; rep ; stosl":
1081                 :"a" (0),
1082                  "D" ((long) empty_zero_page),
1083                  "c" (PTRS_PER_PAGE)
1084                 :"di","cx");
1085         return (unsigned long) empty_zero_page;
1086 }
1087 
1088 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1089 {
1090         int i,free = 0,total = 0,reserved = 0;
1091         int shared = 0;
1092 
1093         printk("Mem-info:\n");
1094         show_free_areas();
1095         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
1096         i = high_memory >> PAGE_SHIFT;
1097         while (i-- > 0) {
1098                 total++;
1099                 if (mem_map[i] & MAP_PAGE_RESERVED)
1100                         reserved++;
1101                 else if (!mem_map[i])
1102                         free++;
1103                 else
1104                         shared += mem_map[i]-1;
1105         }
1106         printk("%d pages of RAM\n",total);
1107         printk("%d free pages\n",free);
1108         printk("%d reserved pages\n",reserved);
1109         printk("%d pages shared\n",shared);
1110         show_buffers();
1111 #ifdef CONFIG_NET
1112         show_net_buffers();
1113 #endif
1114 }
1115 
1116 extern unsigned long free_area_init(unsigned long, unsigned long);
1117 
1118 /*
1119  * paging_init() sets up the page tables - note that the first 4MB are
1120  * already mapped by head.S.
1121  *
1122  * This routines also unmaps the page at virtual kernel address 0, so
1123  * that we can trap those pesky NULL-reference errors in the kernel.
1124  */
1125 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1126 {
1127         unsigned long * pg_dir;
1128         unsigned long * pg_table;
1129         unsigned long tmp;
1130         unsigned long address;
1131 
1132 /*
1133  * Physical page 0 is special; it's not touched by Linux since BIOS
1134  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
1135  * and write protected to detect null pointer references in the
1136  * kernel.
1137  */
1138 #if 0
1139         memset((void *) 0, 0, PAGE_SIZE);
1140 #endif
1141         start_mem = PAGE_ALIGN(start_mem);
1142         address = 0;
1143         pg_dir = swapper_pg_dir;
1144         while (address < end_mem) {
1145                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1146                 if (!tmp) {
1147                         tmp = start_mem | PAGE_TABLE;
1148                         *(pg_dir + 768) = tmp;
1149                         start_mem += PAGE_SIZE;
1150                 }
1151                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1152                 pg_dir++;
1153                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1154                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1155                         if (address < end_mem)
1156                                 *pg_table = address | PAGE_SHARED;
1157                         else
1158                                 *pg_table = 0;
1159                         address += PAGE_SIZE;
1160                 }
1161         }
1162         invalidate();
1163         return free_area_init(start_mem, end_mem);
1164 }
1165 
1166 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1167               unsigned long start_mem, unsigned long end_mem)
1168 {
1169         int codepages = 0;
1170         int reservedpages = 0;
1171         int datapages = 0;
1172         unsigned long tmp;
1173         extern int etext;
1174 
1175         cli();
1176         end_mem &= PAGE_MASK;
1177         high_memory = end_mem;
1178 
1179         /* mark usable pages in the mem_map[] */
1180         start_low_mem = PAGE_ALIGN(start_low_mem);
1181         start_mem = PAGE_ALIGN(start_mem);
1182 
1183         /*
1184          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
1185          * They seem to have done something stupid with the floppy
1186          * controller as well..
1187          */
1188         while (start_low_mem < 0x9f000) {
1189                 mem_map[MAP_NR(start_low_mem)] = 0;
1190                 start_low_mem += PAGE_SIZE;
1191         }
1192 
1193         while (start_mem < high_memory) {
1194                 mem_map[MAP_NR(start_mem)] = 0;
1195                 start_mem += PAGE_SIZE;
1196         }
1197 #ifdef CONFIG_SOUND
1198         sound_mem_init();
1199 #endif
1200         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
1201                 if (mem_map[MAP_NR(tmp)]) {
1202                         if (tmp >= 0xA0000 && tmp < 0x100000)
1203                                 reservedpages++;
1204                         else if (tmp < (unsigned long) &etext)
1205                                 codepages++;
1206                         else
1207                                 datapages++;
1208                         continue;
1209                 }
1210                 mem_map[MAP_NR(tmp)] = 1;
1211                 free_page(tmp);
1212         }
1213         tmp = nr_free_pages << PAGE_SHIFT;
1214         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1215                 tmp >> 10,
1216                 high_memory >> 10,
1217                 codepages << (PAGE_SHIFT-10),
1218                 reservedpages << (PAGE_SHIFT-10),
1219                 datapages << (PAGE_SHIFT-10));
1220 /* test if the WP bit is honoured in supervisor mode */
1221         wp_works_ok = -1;
1222         pg0[0] = PAGE_READONLY;
1223         invalidate();
1224         __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1225         pg0[0] = 0;
1226         invalidate();
1227         if (wp_works_ok < 0)
1228                 wp_works_ok = 0;
1229 #ifdef CONFIG_TEST_VERIFY_AREA
1230         wp_works_ok = 0;
1231 #endif
1232         return;
1233 }
1234 
1235 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1236 {
1237         int i;
1238 
1239         i = high_memory >> PAGE_SHIFT;
1240         val->totalram = 0;
1241         val->sharedram = 0;
1242         val->freeram = nr_free_pages << PAGE_SHIFT;
1243         val->bufferram = buffermem;
1244         while (i-- > 0)  {
1245                 if (mem_map[i] & MAP_PAGE_RESERVED)
1246                         continue;
1247                 val->totalram++;
1248                 if (!mem_map[i])
1249                         continue;
1250                 val->sharedram += mem_map[i]-1;
1251         }
1252         val->totalram <<= PAGE_SHIFT;
1253         val->sharedram <<= PAGE_SHIFT;
1254         return;
1255 }
1256 
1257 
1258 /* This handles a generic mmap of a disk file */
1259 unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1260         unsigned long page, int error_code)
1261 {
1262         struct inode * inode = area->vm_inode;
1263         unsigned int block;
1264         int nr[8];
1265         int i, j;
1266         int prot = area->vm_page_prot;
1267 
1268         address &= PAGE_MASK;
1269         block = address - area->vm_start + area->vm_offset;
1270         block >>= inode->i_sb->s_blocksize_bits;
1271 
1272         for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
1273                 nr[j] = bmap(inode,block);
1274         if (error_code & PAGE_RW)
1275                 prot |= PAGE_RW | PAGE_DIRTY;
1276         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, !(error_code & PAGE_RW));
1277 }
1278 
1279 void file_mmap_free(struct vm_area_struct * area)
     /* [previous][next][first][last][top][bottom][index][help] */
1280 {
1281         if (area->vm_inode)
1282                 iput(area->vm_inode);
1283 #if 0
1284         if (area->vm_inode)
1285                 printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev, 
1286                                  area->vm_inode->i_ino, area->vm_inode->i_count);
1287 #endif
1288 }
1289 
1290 /*
1291  * Compare the contents of the mmap entries, and decide if we are allowed to
1292  * share the pages
1293  */
1294 int file_mmap_share(struct vm_area_struct * area1, 
     /* [previous][next][first][last][top][bottom][index][help] */
1295                     struct vm_area_struct * area2, 
1296                     unsigned long address)
1297 {
1298         return 1;
1299 }
1300 
1301 struct vm_operations_struct file_mmap = {
1302         NULL,                   /* open */
1303         file_mmap_free,         /* close */
1304         file_mmap_nopage,       /* nopage */
1305         NULL,                   /* wppage */
1306         file_mmap_share,        /* share */
1307         NULL,                   /* unmap */
1308 };

/* [previous][next][first][last][top][bottom][index][help] */