root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. do_wp_page
  13. verify_area
  14. get_empty_page
  15. try_to_share
  16. share_page
  17. get_empty_pgtable
  18. do_swap_page
  19. do_no_page
  20. do_page_fault
  21. __bad_pagetable
  22. __bad_page
  23. __zero_page
  24. show_mem
  25. paging_init
  26. mem_init
  27. si_meminfo

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 /*
  32  * 05.04.94  -  Multi-page memory management added for v1.1.
  33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  34  */
  35 
  36 #include <linux/config.h>
  37 #include <linux/signal.h>
  38 #include <linux/sched.h>
  39 #include <linux/head.h>
  40 #include <linux/kernel.h>
  41 #include <linux/errno.h>
  42 #include <linux/string.h>
  43 #include <linux/types.h>
  44 #include <linux/ptrace.h>
  45 #include <linux/mman.h>
  46 
  47 #include <asm/system.h>
  48 #include <asm/segment.h>
  49 
  50 /*
  51  * Define this if things work differently on a i386 and a i486:
  52  * it will (on a i486) warn about kernel memory accesses that are
  53  * done without a 'verify_area(VERIFY_WRITE,..)'
  54  */
  55 #undef CONFIG_TEST_VERIFY_AREA
  56 
  57 unsigned long high_memory = 0;
  58 
  59 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  60 
  61 extern void scsi_mem_init(unsigned long);
  62 extern void sound_mem_init(void);
  63 extern void die_if_kernel(char *,struct pt_regs *,long);
  64 extern void show_net_buffers(void);
  65 
  66 /*
  67  * The free_area_list arrays point to the queue heads of the free areas
  68  * of different sizes
  69  */
  70 int nr_swap_pages = 0;
  71 int nr_free_pages = 0;
  72 struct mem_list free_area_list[NR_MEM_LISTS];
  73 unsigned char * free_area_map[NR_MEM_LISTS];
  74 
  75 #define copy_page(from,to) memcpy((void *) to, (void *) from, PAGE_SIZE)
  76 
  77 unsigned short * mem_map = NULL;
  78 
  79 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  80 
  81 /*
  82  * oom() prints a message (so that the user knows why the process died),
  83  * and gives the process an untrappable SIGKILL.
  84  */
  85 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87         printk("\nOut of memory for %s.\n", current->comm);
  88         task->sigaction[SIGKILL-1].sa_handler = NULL;
  89         task->blocked &= ~(1<<(SIGKILL-1));
  90         send_sig(SIGKILL,task,1);
  91 }
  92 
  93 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         int j;
  96         unsigned long pg_table = *page_dir;
  97         unsigned long * page_table;
  98 
  99         if (!pg_table)
 100                 return;
 101         *page_dir = 0;
 102         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
 103                 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
 104                 return;
 105         }
 106         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
 107                 return;
 108         page_table = (unsigned long *) (pg_table & PAGE_MASK);
 109         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
 110                 unsigned long pg = *page_table;
 111                 
 112                 if (!pg)
 113                         continue;
 114                 *page_table = 0;
 115                 if (pg & PAGE_PRESENT)
 116                         free_page(PAGE_MASK & pg);
 117                 else
 118                         swap_free(pg);
 119         }
 120         free_page(PAGE_MASK & pg_table);
 121 }
 122 
 123 /*
 124  * This function clears all user-level page tables of a process - this
 125  * is needed by execve(), so that old pages aren't in the way. Note that
 126  * unlike 'free_page_tables()', this function still leaves a valid
 127  * page-table-tree in memory: it just removes the user pages. The two
 128  * functions are similar, but there is a fundamental difference.
 129  */
 130 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 131 {
 132         int i;
 133         unsigned long pg_dir;
 134         unsigned long * page_dir;
 135 
 136         if (!tsk)
 137                 return;
 138         if (tsk == task[0])
 139                 panic("task[0] (swapper) doesn't support exec()\n");
 140         pg_dir = tsk->tss.cr3;
 141         page_dir = (unsigned long *) pg_dir;
 142         if (!page_dir || page_dir == swapper_pg_dir) {
 143                 printk("Trying to clear kernel page-directory: not good\n");
 144                 return;
 145         }
 146         if (mem_map[MAP_NR(pg_dir)] > 1) {
 147                 unsigned long * new_pg;
 148 
 149                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 150                         oom(tsk);
 151                         return;
 152                 }
 153                 for (i = 768 ; i < 1024 ; i++)
 154                         new_pg[i] = page_dir[i];
 155                 free_page(pg_dir);
 156                 tsk->tss.cr3 = (unsigned long) new_pg;
 157                 return;
 158         }
 159         for (i = 0 ; i < 768 ; i++,page_dir++)
 160                 free_one_table(page_dir);
 161         invalidate();
 162         return;
 163 }
 164 
 165 /*
 166  * This function frees up all page tables of a process when it exits.
 167  */
 168 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 169 {
 170         int i;
 171         unsigned long pg_dir;
 172         unsigned long * page_dir;
 173 
 174         if (!tsk)
 175                 return;
 176         if (tsk == task[0]) {
 177                 printk("task[0] (swapper) killed: unable to recover\n");
 178                 panic("Trying to free up swapper memory space");
 179         }
 180         pg_dir = tsk->tss.cr3;
 181         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 182                 printk("Trying to free kernel page-directory: not good\n");
 183                 return;
 184         }
 185         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 186         if (tsk == current)
 187                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 188         if (mem_map[MAP_NR(pg_dir)] > 1) {
 189                 free_page(pg_dir);
 190                 return;
 191         }
 192         page_dir = (unsigned long *) pg_dir;
 193         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 194                 free_one_table(page_dir);
 195         free_page(pg_dir);
 196         invalidate();
 197 }
 198 
 199 /*
 200  * clone_page_tables() clones the page table for a process - both
 201  * processes will have the exact same pages in memory. There are
 202  * probably races in the memory management with cloning, but we'll
 203  * see..
 204  */
 205 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 206 {
 207         unsigned long pg_dir;
 208 
 209         pg_dir = current->tss.cr3;
 210         mem_map[MAP_NR(pg_dir)]++;
 211         tsk->tss.cr3 = pg_dir;
 212         return 0;
 213 }
 214 
 215 /*
 216  * copy_page_tables() just copies the whole process memory range:
 217  * note the special handling of RESERVED (ie kernel) pages, which
 218  * means that they are always shared by all processes.
 219  */
 220 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 221 {
 222         int i;
 223         unsigned long old_pg_dir, *old_page_dir;
 224         unsigned long new_pg_dir, *new_page_dir;
 225 
 226         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 227                 return -ENOMEM;
 228         old_pg_dir = current->tss.cr3;
 229         tsk->tss.cr3 = new_pg_dir;
 230         old_page_dir = (unsigned long *) old_pg_dir;
 231         new_page_dir = (unsigned long *) new_pg_dir;
 232         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 233                 int j;
 234                 unsigned long old_pg_table, *old_page_table;
 235                 unsigned long new_pg_table, *new_page_table;
 236 
 237                 old_pg_table = *old_page_dir;
 238                 if (!old_pg_table)
 239                         continue;
 240                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 241                         printk("copy_page_tables: bad page table: "
 242                                 "probable memory corruption\n");
 243                         *old_page_dir = 0;
 244                         continue;
 245                 }
 246                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 247                         *new_page_dir = old_pg_table;
 248                         continue;
 249                 }
 250                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 251                         free_page_tables(tsk);
 252                         return -ENOMEM;
 253                 }
 254                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 255                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 256                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 257                         unsigned long pg;
 258                         pg = *old_page_table;
 259                         if (!pg)
 260                                 continue;
 261                         if (!(pg & PAGE_PRESENT)) {
 262                                 *new_page_table = swap_duplicate(pg);
 263                                 continue;
 264                         }
 265                         if (pg > high_memory || (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)) {
 266                                 *new_page_table = pg;
 267                                 continue;
 268                         }
 269                         if (pg & PAGE_COW)
 270                                 pg &= ~PAGE_RW;
 271                         if (delete_from_swap_cache(pg))
 272                                 pg |= PAGE_DIRTY;
 273                         *new_page_table = pg;
 274                         *old_page_table = pg;
 275                         mem_map[MAP_NR(pg)]++;
 276                 }
 277                 *new_page_dir = new_pg_table | PAGE_TABLE;
 278         }
 279         invalidate();
 280         return 0;
 281 }
 282 
 283 /*
 284  * a more complete version of free_page_tables which performs with page
 285  * granularity.
 286  */
 287 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 {
 289         unsigned long page, page_dir;
 290         unsigned long *page_table, *dir;
 291         unsigned long poff, pcnt, pc;
 292 
 293         if (from & ~PAGE_MASK) {
 294                 printk("unmap_page_range called with wrong alignment\n");
 295                 return -EINVAL;
 296         }
 297         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 298         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 299         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 300         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 301                 pcnt = size;
 302 
 303         for ( ; size > 0; ++dir, size -= pcnt,
 304              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 305                 if (!(page_dir = *dir)) {
 306                         poff = 0;
 307                         continue;
 308                 }
 309                 if (!(page_dir & PAGE_PRESENT)) {
 310                         printk("unmap_page_range: bad page directory.");
 311                         continue;
 312                 }
 313                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 314                 if (poff) {
 315                         page_table += poff;
 316                         poff = 0;
 317                 }
 318                 for (pc = pcnt; pc--; page_table++) {
 319                         if ((page = *page_table) != 0) {
 320                                 *page_table = 0;
 321                                 if (PAGE_PRESENT & page) {
 322                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 323                                                 if (current->mm->rss > 0)
 324                                                         --current->mm->rss;
 325                                         free_page(PAGE_MASK & page);
 326                                 } else
 327                                         swap_free(page);
 328                         }
 329                 }
 330                 if (pcnt == PTRS_PER_PAGE) {
 331                         *dir = 0;
 332                         free_page(PAGE_MASK & page_dir);
 333                 }
 334         }
 335         invalidate();
 336         return 0;
 337 }
 338 
 339 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         unsigned long *page_table, *dir;
 342         unsigned long poff, pcnt;
 343         unsigned long page;
 344 
 345         if (mask) {
 346                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 347                         printk("zeromap_page_range: mask = %08x\n",mask);
 348                         return -EINVAL;
 349                 }
 350                 mask |= ZERO_PAGE;
 351         }
 352         if (from & ~PAGE_MASK) {
 353                 printk("zeromap_page_range: from = %08lx\n",from);
 354                 return -EINVAL;
 355         }
 356         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 357         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 358         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 359         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 360                 pcnt = size;
 361 
 362         while (size > 0) {
 363                 if (!(PAGE_PRESENT & *dir)) {
 364                                 /* clear page needed here?  SRB. */
 365                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 366                                 invalidate();
 367                                 return -ENOMEM;
 368                         }
 369                         if (PAGE_PRESENT & *dir) {
 370                                 free_page((unsigned long) page_table);
 371                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 372                         } else
 373                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 374                 } else
 375                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 376                 page_table += poff;
 377                 poff = 0;
 378                 for (size -= pcnt; pcnt-- ;) {
 379                         if ((page = *page_table) != 0) {
 380                                 *page_table = 0;
 381                                 if (page & PAGE_PRESENT) {
 382                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 383                                                 if (current->mm->rss > 0)
 384                                                         --current->mm->rss;
 385                                         free_page(PAGE_MASK & page);
 386                                 } else
 387                                         swap_free(page);
 388                         }
 389                         *page_table++ = mask;
 390                 }
 391                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 392         }
 393         invalidate();
 394         return 0;
 395 }
 396 
 397 /*
 398  * maps a range of physical memory into the requested pages. the old
 399  * mappings are removed. any references to nonexistent pages results
 400  * in null mappings (currently treated as "copy-on-access")
 401  */
 402 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 403 {
 404         unsigned long *page_table, *dir;
 405         unsigned long poff, pcnt;
 406         unsigned long page;
 407 
 408         if (mask) {
 409                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 410                         printk("remap_page_range: mask = %08x\n",mask);
 411                         return -EINVAL;
 412                 }
 413         }
 414         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 415                 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
 416                 return -EINVAL;
 417         }
 418         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 419         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 420         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 421         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 422                 pcnt = size;
 423 
 424         while (size > 0) {
 425                 if (!(PAGE_PRESENT & *dir)) {
 426                         /* clearing page here, needed?  SRB. */
 427                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 428                                 invalidate();
 429                                 return -1;
 430                         }
 431                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 432                 }
 433                 else
 434                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 435                 if (poff) {
 436                         page_table += poff;
 437                         poff = 0;
 438                 }
 439 
 440                 for (size -= pcnt; pcnt-- ;) {
 441                         if ((page = *page_table) != 0) {
 442                                 *page_table = 0;
 443                                 if (PAGE_PRESENT & page) {
 444                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 445                                                 if (current->mm->rss > 0)
 446                                                         --current->mm->rss;
 447                                         free_page(PAGE_MASK & page);
 448                                 } else
 449                                         swap_free(page);
 450                         }
 451 
 452                         /*
 453                          * the first condition should return an invalid access
 454                          * when the page is referenced. current assumptions
 455                          * cause it to be treated as demand allocation in some
 456                          * cases.
 457                          */
 458                         if (!mask)
 459                                 *page_table++ = 0;      /* not present */
 460                         else if (to >= high_memory)
 461                                 *page_table++ = (to | mask);
 462                         else if (!mem_map[MAP_NR(to)])
 463                                 *page_table++ = 0;      /* not present */
 464                         else {
 465                                 *page_table++ = (to | mask);
 466                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 467                                         ++current->mm->rss;
 468                                         mem_map[MAP_NR(to)]++;
 469                                 }
 470                         }
 471                         to += PAGE_SIZE;
 472                 }
 473                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 474         }
 475         invalidate();
 476         return 0;
 477 }
 478 
 479 /*
 480  * This function puts a page in memory at the wanted address.
 481  * It returns the physical address of the page gotten, 0 if
 482  * out of memory (either when trying to access page-table or
 483  * page.)
 484  */
 485 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 486         unsigned long address,int prot)
 487 {
 488         unsigned long *page_table;
 489 
 490         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 491                 printk("put_page: prot = %08x\n",prot);
 492         if (page >= high_memory) {
 493                 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
 494                 return 0;
 495         }
 496         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 497         if ((*page_table) & PAGE_PRESENT)
 498                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 499         else {
 500                 printk("put_page: bad page directory entry\n");
 501                 oom(tsk);
 502                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 503                 return 0;
 504         }
 505         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 506         if (*page_table) {
 507                 printk("put_page: page already exists\n");
 508                 *page_table = 0;
 509                 invalidate();
 510         }
 511         *page_table = page | prot;
 512 /* no need for invalidate */
 513         return page;
 514 }
 515 
 516 /*
 517  * The previous function doesn't work very well if you also want to mark
 518  * the page dirty: exec.c wants this, as it has earlier changed the page,
 519  * and we want the dirty-status to be correct (for VM). Thus the same
 520  * routine, but this time we mark it dirty too.
 521  */
 522 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 523 {
 524         unsigned long tmp, *page_table;
 525 
 526         if (page >= high_memory)
 527                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 528         if (mem_map[MAP_NR(page)] != 1)
 529                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 530         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 531         if (PAGE_PRESENT & *page_table)
 532                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 533         else {
 534                 if (!(tmp = get_free_page(GFP_KERNEL)))
 535                         return 0;
 536                 if (PAGE_PRESENT & *page_table) {
 537                         free_page(tmp);
 538                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 539                 } else {
 540                         *page_table = tmp | PAGE_TABLE;
 541                         page_table = (unsigned long *) tmp;
 542                 }
 543         }
 544         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 545         if (*page_table) {
 546                 printk("put_dirty_page: page already exists\n");
 547                 *page_table = 0;
 548                 invalidate();
 549         }
 550         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 551 /* no need for invalidate */
 552         return page;
 553 }
 554 
 555 /*
 556  * This routine handles present pages, when users try to write
 557  * to a shared page. It is done by copying the page to a new address
 558  * and decrementing the shared-page counter for the old page.
 559  *
 560  * Goto-purists beware: the only reason for goto's here is that it results
 561  * in better assembly code.. The "default" path will see no jumps at all.
 562  */
 563 void do_wp_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 564         unsigned long error_code)
 565 {
 566         unsigned long *pde, pte, old_page, prot;
 567         unsigned long new_page;
 568 
 569         new_page = __get_free_page(GFP_KERNEL);
 570         pde = PAGE_DIR_OFFSET(vma->vm_task->tss.cr3,address);
 571         pte = *pde;
 572         if (!(pte & PAGE_PRESENT))
 573                 goto end_wp_page;
 574         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 575                 goto bad_wp_pagetable;
 576         pte &= PAGE_MASK;
 577         pte += PAGE_PTR(address);
 578         old_page = *(unsigned long *) pte;
 579         if (!(old_page & PAGE_PRESENT))
 580                 goto end_wp_page;
 581         if (old_page >= high_memory)
 582                 goto bad_wp_page;
 583         if (old_page & PAGE_RW)
 584                 goto end_wp_page;
 585         vma->vm_task->mm->min_flt++;
 586         prot = (old_page & ~PAGE_MASK) | PAGE_RW | PAGE_DIRTY;
 587         old_page &= PAGE_MASK;
 588         if (mem_map[MAP_NR(old_page)] != 1) {
 589                 if (new_page) {
 590                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 591                                 ++vma->vm_task->mm->rss;
 592                         copy_page(old_page,new_page);
 593                         *(unsigned long *) pte = new_page | prot;
 594                         free_page(old_page);
 595                         invalidate();
 596                         return;
 597                 }
 598                 free_page(old_page);
 599                 oom(vma->vm_task);
 600                 *(unsigned long *) pte = BAD_PAGE | prot;
 601                 invalidate();
 602                 return;
 603         }
 604         *(unsigned long *) pte |= PAGE_RW | PAGE_DIRTY;
 605         invalidate();
 606         if (new_page)
 607                 free_page(new_page);
 608         return;
 609 bad_wp_page:
 610         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 611         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 612         send_sig(SIGKILL, vma->vm_task, 1);
 613         goto end_wp_page;
 614 bad_wp_pagetable:
 615         printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
 616         *pde = BAD_PAGETABLE | PAGE_TABLE;
 617         send_sig(SIGKILL, vma->vm_task, 1);
 618 end_wp_page:
 619         if (new_page)
 620                 free_page(new_page);
 621         return;
 622 }
 623 
 624 /*
 625  * Ugly, ugly, but the goto's result in better assembly..
 626  */
 627 int verify_area(int type, const void * addr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 628 {
 629         struct vm_area_struct * vma;
 630         unsigned long start = (unsigned long) addr;
 631 
 632         /* If the current user space is mapped to kernel space (for the
 633          * case where we use a fake user buffer with get_fs/set_fs()) we
 634          * don't expect to find the address in the user vm map.
 635          */
 636         if (get_fs() == get_ds())
 637                 return 0;
 638 
 639         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 640                 if (!vma)
 641                         goto bad_area;
 642                 if (vma->vm_end > start)
 643                         break;
 644         }
 645         if (vma->vm_start <= start)
 646                 goto good_area;
 647         if (!(vma->vm_flags & VM_GROWSDOWN))
 648                 goto bad_area;
 649         if (vma->vm_end - start > current->rlim[RLIMIT_STACK].rlim_cur)
 650                 goto bad_area;
 651 
 652 good_area:
 653         if (!wp_works_ok && type == VERIFY_WRITE)
 654                 goto check_wp_fault_by_hand;
 655         for (;;) {
 656                 struct vm_area_struct * next;
 657                 if (!(vma->vm_page_prot & PAGE_USER))
 658                         goto bad_area;
 659                 if (type != VERIFY_READ && !(vma->vm_page_prot & (PAGE_COW | PAGE_RW)))
 660                         goto bad_area;
 661                 if (vma->vm_end - start >= size)
 662                         return 0;
 663                 next = vma->vm_next;
 664                 if (!next || vma->vm_end != next->vm_start)
 665                         goto bad_area;
 666                 vma = next;
 667         }
 668 
 669 check_wp_fault_by_hand:
 670         size--;
 671         size += start & ~PAGE_MASK;
 672         size >>= PAGE_SHIFT;
 673         start &= PAGE_MASK;
 674 
 675         for (;;) {
 676                 if (!(vma->vm_page_prot & (PAGE_COW | PAGE_RW)))
 677                         goto bad_area;
 678                 do_wp_page(vma, start, PAGE_PRESENT);
 679                 if (!size)
 680                         return 0;
 681                 size--;
 682                 start += PAGE_SIZE;
 683                 if (start < vma->vm_end)
 684                         continue;
 685                 vma = vma->vm_next;
 686                 if (!vma || vma->vm_start != start)
 687                         break;
 688         }
 689 
 690 bad_area:
 691         return -EFAULT;
 692 }
 693 
 694 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 695 {
 696         unsigned long tmp;
 697 
 698         if (!(tmp = get_free_page(GFP_KERNEL))) {
 699                 oom(tsk);
 700                 tmp = BAD_PAGE;
 701         }
 702         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 703                 free_page(tmp);
 704 }
 705 
 706 /*
 707  * try_to_share() checks the page at address "address" in the task "p",
 708  * to see if it exists, and if it is clean. If so, share it with the current
 709  * task.
 710  *
 711  * NOTE! This assumes we have checked that p != current, and that they
 712  * share the same inode and can generally otherwise be shared.
 713  */
 714 static int try_to_share(unsigned long to_address, struct vm_area_struct * to_area,
     /* [previous][next][first][last][top][bottom][index][help] */
 715         unsigned long from_address, struct vm_area_struct * from_area,
 716         unsigned long newpage)
 717 {
 718         unsigned long from;
 719         unsigned long to;
 720         unsigned long from_page;
 721         unsigned long to_page;
 722 
 723         from_page = (unsigned long)PAGE_DIR_OFFSET(from_area->vm_task->tss.cr3,from_address);
 724         to_page = (unsigned long)PAGE_DIR_OFFSET(to_area->vm_task->tss.cr3,to_address);
 725 /* is there a page-directory at from? */
 726         from = *(unsigned long *) from_page;
 727         if (!(from & PAGE_PRESENT))
 728                 return 0;
 729         from &= PAGE_MASK;
 730         from_page = from + PAGE_PTR(from_address);
 731         from = *(unsigned long *) from_page;
 732 /* is the page present? */
 733         if (!(from & PAGE_PRESENT))
 734                 return 0;
 735 /* if it is private, it must be clean to be shared */
 736         if (from & PAGE_DIRTY) {
 737                 if (from_area->vm_page_prot & PAGE_COW)
 738                         return 0;
 739                 if (!(from_area->vm_page_prot & PAGE_RW))
 740                         return 0;
 741         }               
 742 /* is the page reasonable at all? */
 743         if (from >= high_memory)
 744                 return 0;
 745         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 746                 return 0;
 747 /* is the destination ok? */
 748         to = *(unsigned long *) to_page;
 749         if (!(to & PAGE_PRESENT))
 750                 return 0;
 751         to &= PAGE_MASK;
 752         to_page = to + PAGE_PTR(to_address);
 753         if (*(unsigned long *) to_page)
 754                 return 0;
 755 /* do we copy? */
 756         if (newpage) {
 757                 if (in_swap_cache(from)) { /* implies PAGE_DIRTY */
 758                         if (from_area->vm_page_prot & PAGE_COW)
 759                                 return 0;
 760                         if (!(from_area->vm_page_prot & PAGE_RW))
 761                                 return 0;
 762                 }
 763                 copy_page((from & PAGE_MASK), newpage);
 764                 *(unsigned long *) to_page = newpage | to_area->vm_page_prot;
 765                 return 1;
 766         }
 767 /* do a final swap-cache test before sharing them.. */
 768         if (in_swap_cache(from)) {
 769                 if (from_area->vm_page_prot & PAGE_COW)
 770                         return 0;
 771                 if (!(from_area->vm_page_prot & PAGE_RW))
 772                         return 0;
 773                 from |= PAGE_DIRTY;
 774                 *(unsigned long *) from_page = from;
 775                 delete_from_swap_cache(from);
 776                 invalidate();
 777         }
 778         mem_map[MAP_NR(from)]++;
 779 /* fill in the 'to' field, checking for COW-stuff */
 780         to = (from & (PAGE_MASK | PAGE_DIRTY)) | to_area->vm_page_prot;
 781         if (to & PAGE_COW)
 782                 to &= ~PAGE_RW;
 783         *(unsigned long *) to_page = to;
 784 /* Check if we need to do anything at all to the 'from' field */
 785         if (!(from & PAGE_RW))
 786                 return 1;
 787         if (!(from_area->vm_page_prot & PAGE_COW))
 788                 return 1;
 789 /* ok, need to mark it read-only, so invalidate any possible old TB entry */
 790         from &= ~PAGE_RW;
 791         *(unsigned long *) from_page = from;
 792         invalidate();
 793         return 1;
 794 }
 795 
 796 /*
 797  * share_page() tries to find a process that could share a page with
 798  * the current one.
 799  *
 800  * We first check if it is at all feasible by checking inode->i_count.
 801  * It should be >1 if there are other tasks sharing this inode.
 802  */
 803 static int share_page(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 804         unsigned long error_code, unsigned long newpage)
 805 {
 806         struct inode * inode;
 807         unsigned long offset;
 808         unsigned long from_address;
 809         unsigned long give_page;
 810         struct vm_area_struct * mpnt;
 811 
 812         if (!area || !(inode = area->vm_inode) || inode->i_count < 2)
 813                 return 0;
 814         /* do we need to copy or can we just share? */
 815         give_page = 0;
 816         if ((area->vm_page_prot & PAGE_COW) && (error_code & PAGE_RW)) {
 817                 if (!newpage)
 818                         return 0;
 819                 give_page = newpage;
 820         }
 821         offset = address - area->vm_start + area->vm_offset;
 822         /* See if there is something in the VM we can share pages with. */
 823         /* Traverse the entire circular i_mmap list, except `area' itself. */
 824         for (mpnt = area->vm_next_share; mpnt != area; mpnt = mpnt->vm_next_share) {
 825                 /* must be same inode */
 826                 if (mpnt->vm_inode != inode) {
 827                         printk("Aiee! Corrupt vm_area_struct i_mmap ring\n");
 828                         break;  
 829                 }
 830                 /* offsets must be mutually page-aligned */
 831                 if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK)
 832                         continue;
 833                 /* the other area must actually cover the wanted page.. */
 834                 from_address = offset + mpnt->vm_start - mpnt->vm_offset;
 835                 if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end)
 836                         continue;
 837                 /* .. NOW we can actually try to use the same physical page */
 838                 if (!try_to_share(address, area, from_address, mpnt, give_page))
 839                         continue;
 840                 /* free newpage if we never used it.. */
 841                 if (give_page || !newpage)
 842                         return 1;
 843                 free_page(newpage);
 844                 return 1;
 845         }
 846         return 0;
 847 }
 848 
 849 /*
 850  * fill in an empty page-table if none exists.
 851  */
 852 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 853 {
 854         unsigned long page;
 855         unsigned long *p;
 856 
 857         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 858         if (PAGE_PRESENT & *p)
 859                 return *p;
 860         if (*p) {
 861                 printk("get_empty_pgtable: bad page-directory entry \n");
 862                 *p = 0;
 863         }
 864         page = get_free_page(GFP_KERNEL);
 865         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 866         if (PAGE_PRESENT & *p) {
 867                 free_page(page);
 868                 return *p;
 869         }
 870         if (*p) {
 871                 printk("get_empty_pgtable: bad page-directory entry \n");
 872                 *p = 0;
 873         }
 874         if (page) {
 875                 *p = page | PAGE_TABLE;
 876                 return *p;
 877         }
 878         oom(current);
 879         *p = BAD_PAGETABLE | PAGE_TABLE;
 880         return 0;
 881 }
 882 
 883 static inline void do_swap_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 884         unsigned long address, unsigned long * pge, unsigned long entry)
 885 {
 886         unsigned long page;
 887 
 888         if (vma->vm_ops && vma->vm_ops->swapin)
 889                 page = vma->vm_ops->swapin(vma, entry);
 890         else
 891                 page = swap_in(entry);
 892         if (*pge != entry) {
 893                 free_page(page);
 894                 return;
 895         }
 896         page = page | vma->vm_page_prot;
 897         if (mem_map[MAP_NR(page)] > 1 && (page & PAGE_COW))
 898                 page &= ~PAGE_RW;
 899         ++vma->vm_task->mm->rss;
 900         ++vma->vm_task->mm->maj_flt;
 901         *pge = page;
 902         return;
 903 }
 904 
 905 void do_no_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 906         unsigned long error_code)
 907 {
 908         unsigned long page, entry, prot;
 909 
 910         page = get_empty_pgtable(vma->vm_task,address);
 911         if (!page)
 912                 return;
 913         page &= PAGE_MASK;
 914         page += PAGE_PTR(address);
 915         entry = *(unsigned long *) page;
 916         if (entry & PAGE_PRESENT)
 917                 return;
 918         if (entry) {
 919                 do_swap_page(vma, address, (unsigned long *) page, entry);
 920                 return;
 921         }
 922         address &= PAGE_MASK;
 923 
 924         if (!vma->vm_ops || !vma->vm_ops->nopage) {
 925                 ++vma->vm_task->mm->rss;
 926                 ++vma->vm_task->mm->min_flt;
 927                 get_empty_page(vma->vm_task,address);
 928                 return;
 929         }
 930         page = get_free_page(GFP_KERNEL);
 931         if (share_page(vma, address, error_code, page)) {
 932                 ++vma->vm_task->mm->min_flt;
 933                 ++vma->vm_task->mm->rss;
 934                 return;
 935         }
 936         if (!page) {
 937                 oom(current);
 938                 put_page(vma->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
 939                 return;
 940         }
 941         ++vma->vm_task->mm->maj_flt;
 942         ++vma->vm_task->mm->rss;
 943         prot = vma->vm_page_prot;
 944         /*
 945          * The fourth argument is "no_share", which tells the low-level code
 946          * to copy, not share the page even if sharing is possible.  It's
 947          * essentially an early COW detection ("moo at 5 AM").
 948          */
 949         page = vma->vm_ops->nopage(vma, address, page, (error_code & PAGE_RW) && (prot & PAGE_COW));
 950         if (share_page(vma, address, error_code, 0)) {
 951                 free_page(page);
 952                 return;
 953         }
 954         /*
 955          * This silly early PAGE_DIRTY setting removes a race
 956          * due to the bad i386 page protection.
 957          */
 958         if (error_code & PAGE_RW) {
 959                 prot |= PAGE_DIRTY;     /* can't be COW-shared: see "no_share" above */
 960         } else if ((prot & PAGE_COW) && mem_map[MAP_NR(page)] > 1)
 961                 prot &= ~PAGE_RW;
 962         if (put_page(vma->vm_task, page, address, prot))
 963                 return;
 964         free_page(page);
 965         oom(current);
 966 }
 967 
 968 /*
 969  * This routine handles page faults.  It determines the address,
 970  * and the problem, and then passes it off to one of the appropriate
 971  * routines.
 972  */
 973 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 974 {
 975         struct vm_area_struct * vma;
 976         unsigned long address;
 977         unsigned long page;
 978 
 979         /* get the address */
 980         __asm__("movl %%cr2,%0":"=r" (address));
 981         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 982                 if (!vma)
 983                         goto bad_area;
 984                 if (vma->vm_end > address)
 985                         break;
 986         }
 987         if (vma->vm_start <= address)
 988                 goto good_area;
 989         if (!(vma->vm_flags & VM_GROWSDOWN))
 990                 goto bad_area;
 991         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
 992                 goto bad_area;
 993         vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
 994         vma->vm_start = (address & PAGE_MASK);
 995 /*
 996  * Ok, we have a good vm_area for this memory access, so
 997  * we can handle it..
 998  */
 999 good_area:
1000         if (regs->eflags & VM_MASK) {
1001                 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
1002                 if (bit < 32)
1003                         current->tss.screen_bitmap |= 1 << bit;
1004         }
1005         if (!(vma->vm_page_prot & PAGE_USER))
1006                 goto bad_area;
1007         if (error_code & PAGE_PRESENT) {
1008                 if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
1009                         goto bad_area;
1010 #ifdef CONFIG_TEST_VERIFY_AREA
1011                 if (regs->cs == KERNEL_CS)
1012                         printk("WP fault at %08x\n", regs->eip);
1013 #endif
1014                 do_wp_page(vma, address, error_code);
1015                 return;
1016         }
1017         do_no_page(vma, address, error_code);
1018         return;
1019 
1020 /*
1021  * Something tried to access memory that isn't in our memory map..
1022  * Fix it, but check if it's kernel or user first..
1023  */
1024 bad_area:
1025         if (error_code & PAGE_USER) {
1026                 current->tss.cr2 = address;
1027                 current->tss.error_code = error_code;
1028                 current->tss.trap_no = 14;
1029                 send_sig(SIGSEGV, current, 1);
1030                 return;
1031         }
1032 /*
1033  * Oops. The kernel tried to access some bad page. We'll have to
1034  * terminate things with extreme prejudice.
1035  */
1036         if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
1037                 wp_works_ok = 1;
1038                 pg0[0] = PAGE_SHARED;
1039                 invalidate();
1040                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
1041                 return;
1042         }
1043         if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
1044                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
1045                 pg0[0] = PAGE_SHARED;
1046         } else
1047                 printk(KERN_ALERT "Unable to handle kernel paging request");
1048         printk(" at virtual address %08lx\n",address);
1049         __asm__("movl %%cr3,%0" : "=r" (page));
1050         printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
1051                 current->tss.cr3, page);
1052         page = ((unsigned long *) page)[address >> 22];
1053         printk(KERN_ALERT "*pde = %08lx\n", page);
1054         if (page & PAGE_PRESENT) {
1055                 page &= PAGE_MASK;
1056                 address &= 0x003ff000;
1057                 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
1058                 printk(KERN_ALERT "*pte = %08lx\n", page);
1059         }
1060         die_if_kernel("Oops", regs, error_code);
1061         do_exit(SIGKILL);
1062 }
1063 
1064 /*
1065  * BAD_PAGE is the page that is used for page faults when linux
1066  * is out-of-memory. Older versions of linux just did a
1067  * do_exit(), but using this instead means there is less risk
1068  * for a process dying in kernel mode, possibly leaving a inode
1069  * unused etc..
1070  *
1071  * BAD_PAGETABLE is the accompanying page-table: it is initialized
1072  * to point to BAD_PAGE entries.
1073  *
1074  * ZERO_PAGE is a special page that is used for zero-initialized
1075  * data and COW.
1076  */
1077 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1078 {
1079         extern char empty_bad_page_table[PAGE_SIZE];
1080 
1081         __asm__ __volatile__("cld ; rep ; stosl":
1082                 :"a" (BAD_PAGE + PAGE_TABLE),
1083                  "D" ((long) empty_bad_page_table),
1084                  "c" (PTRS_PER_PAGE)
1085                 :"di","cx");
1086         return (unsigned long) empty_bad_page_table;
1087 }
1088 
1089 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1090 {
1091         extern char empty_bad_page[PAGE_SIZE];
1092 
1093         __asm__ __volatile__("cld ; rep ; stosl":
1094                 :"a" (0),
1095                  "D" ((long) empty_bad_page),
1096                  "c" (PTRS_PER_PAGE)
1097                 :"di","cx");
1098         return (unsigned long) empty_bad_page;
1099 }
1100 
1101 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1102 {
1103         extern char empty_zero_page[PAGE_SIZE];
1104 
1105         __asm__ __volatile__("cld ; rep ; stosl":
1106                 :"a" (0),
1107                  "D" ((long) empty_zero_page),
1108                  "c" (PTRS_PER_PAGE)
1109                 :"di","cx");
1110         return (unsigned long) empty_zero_page;
1111 }
1112 
1113 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1114 {
1115         int i,free = 0,total = 0,reserved = 0;
1116         int shared = 0;
1117 
1118         printk("Mem-info:\n");
1119         show_free_areas();
1120         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
1121         i = high_memory >> PAGE_SHIFT;
1122         while (i-- > 0) {
1123                 total++;
1124                 if (mem_map[i] & MAP_PAGE_RESERVED)
1125                         reserved++;
1126                 else if (!mem_map[i])
1127                         free++;
1128                 else
1129                         shared += mem_map[i]-1;
1130         }
1131         printk("%d pages of RAM\n",total);
1132         printk("%d free pages\n",free);
1133         printk("%d reserved pages\n",reserved);
1134         printk("%d pages shared\n",shared);
1135         show_buffers();
1136 #ifdef CONFIG_NET
1137         show_net_buffers();
1138 #endif
1139 }
1140 
1141 extern unsigned long free_area_init(unsigned long, unsigned long);
1142 
1143 /*
1144  * paging_init() sets up the page tables - note that the first 4MB are
1145  * already mapped by head.S.
1146  *
1147  * This routines also unmaps the page at virtual kernel address 0, so
1148  * that we can trap those pesky NULL-reference errors in the kernel.
1149  */
1150 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1151 {
1152         unsigned long * pg_dir;
1153         unsigned long * pg_table;
1154         unsigned long tmp;
1155         unsigned long address;
1156 
1157 /*
1158  * Physical page 0 is special; it's not touched by Linux since BIOS
1159  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
1160  * and write protected to detect null pointer references in the
1161  * kernel.
1162  */
1163 #if 0
1164         memset((void *) 0, 0, PAGE_SIZE);
1165 #endif
1166         start_mem = PAGE_ALIGN(start_mem);
1167         address = 0;
1168         pg_dir = swapper_pg_dir;
1169         while (address < end_mem) {
1170                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1171                 if (!tmp) {
1172                         tmp = start_mem | PAGE_TABLE;
1173                         *(pg_dir + 768) = tmp;
1174                         start_mem += PAGE_SIZE;
1175                 }
1176                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1177                 pg_dir++;
1178                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1179                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1180                         if (address < end_mem)
1181                                 *pg_table = address | PAGE_SHARED;
1182                         else
1183                                 *pg_table = 0;
1184                         address += PAGE_SIZE;
1185                 }
1186         }
1187         invalidate();
1188         return free_area_init(start_mem, end_mem);
1189 }
1190 
1191 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1192               unsigned long start_mem, unsigned long end_mem)
1193 {
1194         int codepages = 0;
1195         int reservedpages = 0;
1196         int datapages = 0;
1197         unsigned long tmp;
1198         extern int etext;
1199 
1200         end_mem &= PAGE_MASK;
1201         high_memory = end_mem;
1202 
1203         /* mark usable pages in the mem_map[] */
1204         start_low_mem = PAGE_ALIGN(start_low_mem);
1205         start_mem = PAGE_ALIGN(start_mem);
1206 
1207         /*
1208          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
1209          * They seem to have done something stupid with the floppy
1210          * controller as well..
1211          */
1212         while (start_low_mem < 0x9f000) {
1213                 mem_map[MAP_NR(start_low_mem)] = 0;
1214                 start_low_mem += PAGE_SIZE;
1215         }
1216 
1217         while (start_mem < high_memory) {
1218                 mem_map[MAP_NR(start_mem)] = 0;
1219                 start_mem += PAGE_SIZE;
1220         }
1221 #ifdef CONFIG_SCSI
1222         scsi_mem_init(high_memory);
1223 #endif
1224 #ifdef CONFIG_SOUND
1225         sound_mem_init();
1226 #endif
1227         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
1228                 if (mem_map[MAP_NR(tmp)]) {
1229                         if (tmp >= 0xA0000 && tmp < 0x100000)
1230                                 reservedpages++;
1231                         else if (tmp < (unsigned long) &etext)
1232                                 codepages++;
1233                         else
1234                                 datapages++;
1235                         continue;
1236                 }
1237                 mem_map[MAP_NR(tmp)] = 1;
1238                 free_page(tmp);
1239         }
1240         tmp = nr_free_pages << PAGE_SHIFT;
1241         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1242                 tmp >> 10,
1243                 high_memory >> 10,
1244                 codepages << (PAGE_SHIFT-10),
1245                 reservedpages << (PAGE_SHIFT-10),
1246                 datapages << (PAGE_SHIFT-10));
1247 /* test if the WP bit is honoured in supervisor mode */
1248         wp_works_ok = -1;
1249         pg0[0] = PAGE_READONLY;
1250         invalidate();
1251         __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1252         pg0[0] = 0;
1253         invalidate();
1254         if (wp_works_ok < 0)
1255                 wp_works_ok = 0;
1256 #ifdef CONFIG_TEST_VERIFY_AREA
1257         wp_works_ok = 0;
1258 #endif
1259         return;
1260 }
1261 
1262 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1263 {
1264         int i;
1265 
1266         i = high_memory >> PAGE_SHIFT;
1267         val->totalram = 0;
1268         val->sharedram = 0;
1269         val->freeram = nr_free_pages << PAGE_SHIFT;
1270         val->bufferram = buffermem;
1271         while (i-- > 0)  {
1272                 if (mem_map[i] & MAP_PAGE_RESERVED)
1273                         continue;
1274                 val->totalram++;
1275                 if (!mem_map[i])
1276                         continue;
1277                 val->sharedram += mem_map[i]-1;
1278         }
1279         val->totalram <<= PAGE_SHIFT;
1280         val->sharedram <<= PAGE_SHIFT;
1281         return;
1282 }

/* [previous][next][first][last][top][bottom][index][help] */