root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. clone_page_tables
  6. copy_page_tables
  7. unmap_page_range
  8. zeromap_page_range
  9. remap_page_range
  10. put_page
  11. put_dirty_page
  12. do_wp_page
  13. verify_area
  14. get_empty_page
  15. try_to_share
  16. share_page
  17. get_empty_pgtable
  18. do_swap_page
  19. do_no_page
  20. do_page_fault
  21. __bad_pagetable
  22. __bad_page
  23. __zero_page
  24. show_mem
  25. paging_init
  26. mem_init
  27. si_meminfo
  28. file_mmap_nopage

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 /*
  32  * 05.04.94  -  Multi-page memory management added for v1.1.
  33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  34  */
  35 
  36 #include <asm/system.h>
  37 #include <linux/config.h>
  38 
  39 #include <linux/signal.h>
  40 #include <linux/sched.h>
  41 #include <linux/head.h>
  42 #include <linux/kernel.h>
  43 #include <linux/errno.h>
  44 #include <linux/string.h>
  45 #include <linux/types.h>
  46 #include <linux/ptrace.h>
  47 #include <linux/mman.h>
  48 #include <linux/segment.h>
  49 #include <asm/segment.h>
  50 
  51 /*
  52  * Define this if things work differently on a i386 and a i486:
  53  * it will (on a i486) warn about kernel memory accesses that are
  54  * done without a 'verify_area(VERIFY_WRITE,..)'
  55  */
  56 #undef CONFIG_TEST_VERIFY_AREA
  57 
  58 unsigned long high_memory = 0;
  59 
  60 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  61 
  62 extern void sound_mem_init(void);
  63 extern void die_if_kernel(char *,struct pt_regs *,long);
  64 extern void show_net_buffers(void);
  65 
  66 /*
  67  * The free_area_list arrays point to the queue heads of the free areas
  68  * of different sizes
  69  */
  70 int nr_swap_pages = 0;
  71 int nr_free_pages = 0;
  72 struct mem_list free_area_list[NR_MEM_LISTS];
  73 unsigned char * free_area_map[NR_MEM_LISTS];
  74 
  75 #define copy_page(from,to) \
  76 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
  77 
  78 unsigned short * mem_map = NULL;
  79 
  80 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
  81 
  82 /*
  83  * oom() prints a message (so that the user knows why the process died),
  84  * and gives the process an untrappable SIGKILL.
  85  */
  86 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         printk("\nOut of memory.\n");
  89         task->sigaction[SIGKILL-1].sa_handler = NULL;
  90         task->blocked &= ~(1<<(SIGKILL-1));
  91         send_sig(SIGKILL,task,1);
  92 }
  93 
  94 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96         int j;
  97         unsigned long pg_table = *page_dir;
  98         unsigned long * page_table;
  99 
 100         if (!pg_table)
 101                 return;
 102         *page_dir = 0;
 103         if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
 104                 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
 105                 return;
 106         }
 107         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
 108                 return;
 109         page_table = (unsigned long *) (pg_table & PAGE_MASK);
 110         for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
 111                 unsigned long pg = *page_table;
 112                 
 113                 if (!pg)
 114                         continue;
 115                 *page_table = 0;
 116                 if (pg & PAGE_PRESENT)
 117                         free_page(PAGE_MASK & pg);
 118                 else
 119                         swap_free(pg);
 120         }
 121         free_page(PAGE_MASK & pg_table);
 122 }
 123 
 124 /*
 125  * This function clears all user-level page tables of a process - this
 126  * is needed by execve(), so that old pages aren't in the way. Note that
 127  * unlike 'free_page_tables()', this function still leaves a valid
 128  * page-table-tree in memory: it just removes the user pages. The two
 129  * functions are similar, but there is a fundamental difference.
 130  */
 131 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         int i;
 134         unsigned long pg_dir;
 135         unsigned long * page_dir;
 136 
 137         if (!tsk)
 138                 return;
 139         if (tsk == task[0])
 140                 panic("task[0] (swapper) doesn't support exec()\n");
 141         pg_dir = tsk->tss.cr3;
 142         page_dir = (unsigned long *) pg_dir;
 143         if (!page_dir || page_dir == swapper_pg_dir) {
 144                 printk("Trying to clear kernel page-directory: not good\n");
 145                 return;
 146         }
 147         if (mem_map[MAP_NR(pg_dir)] > 1) {
 148                 unsigned long * new_pg;
 149 
 150                 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
 151                         oom(tsk);
 152                         return;
 153                 }
 154                 for (i = 768 ; i < 1024 ; i++)
 155                         new_pg[i] = page_dir[i];
 156                 free_page(pg_dir);
 157                 tsk->tss.cr3 = (unsigned long) new_pg;
 158                 return;
 159         }
 160         for (i = 0 ; i < 768 ; i++,page_dir++)
 161                 free_one_table(page_dir);
 162         invalidate();
 163         return;
 164 }
 165 
 166 /*
 167  * This function frees up all page tables of a process when it exits.
 168  */
 169 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         int i;
 172         unsigned long pg_dir;
 173         unsigned long * page_dir;
 174 
 175         if (!tsk)
 176                 return;
 177         if (tsk == task[0]) {
 178                 printk("task[0] (swapper) killed: unable to recover\n");
 179                 panic("Trying to free up swapper memory space");
 180         }
 181         pg_dir = tsk->tss.cr3;
 182         if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
 183                 printk("Trying to free kernel page-directory: not good\n");
 184                 return;
 185         }
 186         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 187         if (tsk == current)
 188                 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
 189         if (mem_map[MAP_NR(pg_dir)] > 1) {
 190                 free_page(pg_dir);
 191                 return;
 192         }
 193         page_dir = (unsigned long *) pg_dir;
 194         for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
 195                 free_one_table(page_dir);
 196         free_page(pg_dir);
 197         invalidate();
 198 }
 199 
 200 /*
 201  * clone_page_tables() clones the page table for a process - both
 202  * processes will have the exact same pages in memory. There are
 203  * probably races in the memory management with cloning, but we'll
 204  * see..
 205  */
 206 int clone_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         unsigned long pg_dir;
 209 
 210         pg_dir = current->tss.cr3;
 211         mem_map[MAP_NR(pg_dir)]++;
 212         tsk->tss.cr3 = pg_dir;
 213         return 0;
 214 }
 215 
 216 /*
 217  * copy_page_tables() just copies the whole process memory range:
 218  * note the special handling of RESERVED (ie kernel) pages, which
 219  * means that they are always shared by all processes.
 220  */
 221 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int i;
 224         unsigned long old_pg_dir, *old_page_dir;
 225         unsigned long new_pg_dir, *new_page_dir;
 226 
 227         if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
 228                 return -ENOMEM;
 229         old_pg_dir = current->tss.cr3;
 230         tsk->tss.cr3 = new_pg_dir;
 231         old_page_dir = (unsigned long *) old_pg_dir;
 232         new_page_dir = (unsigned long *) new_pg_dir;
 233         for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
 234                 int j;
 235                 unsigned long old_pg_table, *old_page_table;
 236                 unsigned long new_pg_table, *new_page_table;
 237 
 238                 old_pg_table = *old_page_dir;
 239                 if (!old_pg_table)
 240                         continue;
 241                 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
 242                         printk("copy_page_tables: bad page table: "
 243                                 "probable memory corruption");
 244                         *old_page_dir = 0;
 245                         continue;
 246                 }
 247                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 248                         *new_page_dir = old_pg_table;
 249                         continue;
 250                 }
 251                 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
 252                         free_page_tables(tsk);
 253                         return -ENOMEM;
 254                 }
 255                 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
 256                 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
 257                 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
 258                         unsigned long pg;
 259                         pg = *old_page_table;
 260                         if (!pg)
 261                                 continue;
 262                         if (!(pg & PAGE_PRESENT)) {
 263                                 *new_page_table = swap_duplicate(pg);
 264                                 continue;
 265                         }
 266                         if (pg > high_memory || (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)) {
 267                                 *new_page_table = pg;
 268                                 continue;
 269                         }
 270                         if (pg & PAGE_COW)
 271                                 pg &= ~PAGE_RW;
 272                         if (delete_from_swap_cache(pg))
 273                                 pg |= PAGE_DIRTY;
 274                         *new_page_table = pg;
 275                         *old_page_table = pg;
 276                         mem_map[MAP_NR(pg)]++;
 277                 }
 278                 *new_page_dir = new_pg_table | PAGE_TABLE;
 279         }
 280         invalidate();
 281         return 0;
 282 }
 283 
 284 /*
 285  * a more complete version of free_page_tables which performs with page
 286  * granularity.
 287  */
 288 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         unsigned long page, page_dir;
 291         unsigned long *page_table, *dir;
 292         unsigned long poff, pcnt, pc;
 293 
 294         if (from & ~PAGE_MASK) {
 295                 printk("unmap_page_range called with wrong alignment\n");
 296                 return -EINVAL;
 297         }
 298         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 299         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 300         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 301         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 302                 pcnt = size;
 303 
 304         for ( ; size > 0; ++dir, size -= pcnt,
 305              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 306                 if (!(page_dir = *dir)) {
 307                         poff = 0;
 308                         continue;
 309                 }
 310                 if (!(page_dir & PAGE_PRESENT)) {
 311                         printk("unmap_page_range: bad page directory.");
 312                         continue;
 313                 }
 314                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 315                 if (poff) {
 316                         page_table += poff;
 317                         poff = 0;
 318                 }
 319                 for (pc = pcnt; pc--; page_table++) {
 320                         if ((page = *page_table) != 0) {
 321                                 *page_table = 0;
 322                                 if (1 & page) {
 323                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 324                                                 if (current->mm->rss > 0)
 325                                                         --current->mm->rss;
 326                                         free_page(PAGE_MASK & page);
 327                                 } else
 328                                         swap_free(page);
 329                         }
 330                 }
 331                 if (pcnt == PTRS_PER_PAGE) {
 332                         *dir = 0;
 333                         free_page(PAGE_MASK & page_dir);
 334                 }
 335         }
 336         invalidate();
 337         return 0;
 338 }
 339 
 340 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 341 {
 342         unsigned long *page_table, *dir;
 343         unsigned long poff, pcnt;
 344         unsigned long page;
 345 
 346         if (mask) {
 347                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 348                         printk("zeromap_page_range: mask = %08x\n",mask);
 349                         return -EINVAL;
 350                 }
 351                 mask |= ZERO_PAGE;
 352         }
 353         if (from & ~PAGE_MASK) {
 354                 printk("zeromap_page_range: from = %08lx\n",from);
 355                 return -EINVAL;
 356         }
 357         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 358         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 359         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 360         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 361                 pcnt = size;
 362 
 363         while (size > 0) {
 364                 if (!(PAGE_PRESENT & *dir)) {
 365                                 /* clear page needed here?  SRB. */
 366                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 367                                 invalidate();
 368                                 return -ENOMEM;
 369                         }
 370                         if (PAGE_PRESENT & *dir) {
 371                                 free_page((unsigned long) page_table);
 372                                 page_table = (unsigned long *)(PAGE_MASK & *dir++);
 373                         } else
 374                                 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 375                 } else
 376                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 377                 page_table += poff;
 378                 poff = 0;
 379                 for (size -= pcnt; pcnt-- ;) {
 380                         if ((page = *page_table) != 0) {
 381                                 *page_table = 0;
 382                                 if (page & PAGE_PRESENT) {
 383                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 384                                                 if (current->mm->rss > 0)
 385                                                         --current->mm->rss;
 386                                         free_page(PAGE_MASK & page);
 387                                 } else
 388                                         swap_free(page);
 389                         }
 390                         *page_table++ = mask;
 391                 }
 392                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 393         }
 394         invalidate();
 395         return 0;
 396 }
 397 
 398 /*
 399  * maps a range of physical memory into the requested pages. the old
 400  * mappings are removed. any references to nonexistent pages results
 401  * in null mappings (currently treated as "copy-on-access")
 402  */
 403 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         unsigned long *page_table, *dir;
 406         unsigned long poff, pcnt;
 407         unsigned long page;
 408 
 409         if (mask) {
 410                 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
 411                         printk("remap_page_range: mask = %08x\n",mask);
 412                         return -EINVAL;
 413                 }
 414         }
 415         if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
 416                 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
 417                 return -EINVAL;
 418         }
 419         dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
 420         size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
 421         poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 422         if ((pcnt = PTRS_PER_PAGE - poff) > size)
 423                 pcnt = size;
 424 
 425         while (size > 0) {
 426                 if (!(PAGE_PRESENT & *dir)) {
 427                         /* clearing page here, needed?  SRB. */
 428                         if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
 429                                 invalidate();
 430                                 return -1;
 431                         }
 432                         *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
 433                 }
 434                 else
 435                         page_table = (unsigned long *)(PAGE_MASK & *dir++);
 436                 if (poff) {
 437                         page_table += poff;
 438                         poff = 0;
 439                 }
 440 
 441                 for (size -= pcnt; pcnt-- ;) {
 442                         if ((page = *page_table) != 0) {
 443                                 *page_table = 0;
 444                                 if (PAGE_PRESENT & page) {
 445                                         if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
 446                                                 if (current->mm->rss > 0)
 447                                                         --current->mm->rss;
 448                                         free_page(PAGE_MASK & page);
 449                                 } else
 450                                         swap_free(page);
 451                         }
 452 
 453                         /*
 454                          * the first condition should return an invalid access
 455                          * when the page is referenced. current assumptions
 456                          * cause it to be treated as demand allocation in some
 457                          * cases.
 458                          */
 459                         if (!mask)
 460                                 *page_table++ = 0;      /* not present */
 461                         else if (to >= high_memory)
 462                                 *page_table++ = (to | mask);
 463                         else if (!mem_map[MAP_NR(to)])
 464                                 *page_table++ = 0;      /* not present */
 465                         else {
 466                                 *page_table++ = (to | mask);
 467                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
 468                                         ++current->mm->rss;
 469                                         mem_map[MAP_NR(to)]++;
 470                                 }
 471                         }
 472                         to += PAGE_SIZE;
 473                 }
 474                 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
 475         }
 476         invalidate();
 477         return 0;
 478 }
 479 
 480 /*
 481  * This function puts a page in memory at the wanted address.
 482  * It returns the physical address of the page gotten, 0 if
 483  * out of memory (either when trying to access page-table or
 484  * page.)
 485  */
 486 unsigned long put_page(struct task_struct * tsk,unsigned long page,
     /* [previous][next][first][last][top][bottom][index][help] */
 487         unsigned long address,int prot)
 488 {
 489         unsigned long *page_table;
 490 
 491         if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
 492                 printk("put_page: prot = %08x\n",prot);
 493         if (page >= high_memory) {
 494                 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
 495                 return 0;
 496         }
 497         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 498         if ((*page_table) & PAGE_PRESENT)
 499                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 500         else {
 501                 printk("put_page: bad page directory entry\n");
 502                 oom(tsk);
 503                 *page_table = BAD_PAGETABLE | PAGE_TABLE;
 504                 return 0;
 505         }
 506         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 507         if (*page_table) {
 508                 printk("put_page: page already exists\n");
 509                 *page_table = 0;
 510                 invalidate();
 511         }
 512         *page_table = page | prot;
 513 /* no need for invalidate */
 514         return page;
 515 }
 516 
 517 /*
 518  * The previous function doesn't work very well if you also want to mark
 519  * the page dirty: exec.c wants this, as it has earlier changed the page,
 520  * and we want the dirty-status to be correct (for VM). Thus the same
 521  * routine, but this time we mark it dirty too.
 522  */
 523 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 524 {
 525         unsigned long tmp, *page_table;
 526 
 527         if (page >= high_memory)
 528                 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
 529         if (mem_map[MAP_NR(page)] != 1)
 530                 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
 531         page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 532         if (PAGE_PRESENT & *page_table)
 533                 page_table = (unsigned long *) (PAGE_MASK & *page_table);
 534         else {
 535                 if (!(tmp = get_free_page(GFP_KERNEL)))
 536                         return 0;
 537                 if (PAGE_PRESENT & *page_table) {
 538                         free_page(tmp);
 539                         page_table = (unsigned long *) (PAGE_MASK & *page_table);
 540                 } else {
 541                         *page_table = tmp | PAGE_TABLE;
 542                         page_table = (unsigned long *) tmp;
 543                 }
 544         }
 545         page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 546         if (*page_table) {
 547                 printk("put_dirty_page: page already exists\n");
 548                 *page_table = 0;
 549                 invalidate();
 550         }
 551         *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
 552 /* no need for invalidate */
 553         return page;
 554 }
 555 
 556 /*
 557  * This routine handles present pages, when users try to write
 558  * to a shared page. It is done by copying the page to a new address
 559  * and decrementing the shared-page counter for the old page.
 560  *
 561  * Goto-purists beware: the only reason for goto's here is that it results
 562  * in better assembly code.. The "default" path will see no jumps at all.
 563  */
 564 void do_wp_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 565         unsigned long error_code)
 566 {
 567         unsigned long *pde, pte, old_page, prot;
 568         unsigned long new_page;
 569 
 570         new_page = __get_free_page(GFP_KERNEL);
 571         pde = PAGE_DIR_OFFSET(vma->vm_task->tss.cr3,address);
 572         pte = *pde;
 573         if (!(pte & PAGE_PRESENT))
 574                 goto end_wp_page;
 575         if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
 576                 goto bad_wp_pagetable;
 577         pte &= PAGE_MASK;
 578         pte += PAGE_PTR(address);
 579         old_page = *(unsigned long *) pte;
 580         if (!(old_page & PAGE_PRESENT))
 581                 goto end_wp_page;
 582         if (old_page >= high_memory)
 583                 goto bad_wp_page;
 584         if (old_page & PAGE_RW)
 585                 goto end_wp_page;
 586         vma->vm_task->mm->min_flt++;
 587         prot = (old_page & ~PAGE_MASK) | PAGE_RW | PAGE_DIRTY;
 588         old_page &= PAGE_MASK;
 589         if (mem_map[MAP_NR(old_page)] != 1) {
 590                 if (new_page) {
 591                         if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
 592                                 ++vma->vm_task->mm->rss;
 593                         copy_page(old_page,new_page);
 594                         *(unsigned long *) pte = new_page | prot;
 595                         free_page(old_page);
 596                         invalidate();
 597                         return;
 598                 }
 599                 free_page(old_page);
 600                 oom(vma->vm_task);
 601                 *(unsigned long *) pte = BAD_PAGE | prot;
 602                 invalidate();
 603                 return;
 604         }
 605         *(unsigned long *) pte |= PAGE_RW | PAGE_DIRTY;
 606         invalidate();
 607         if (new_page)
 608                 free_page(new_page);
 609         return;
 610 bad_wp_page:
 611         printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
 612         *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
 613         send_sig(SIGKILL, vma->vm_task, 1);
 614         goto end_wp_page;
 615 bad_wp_pagetable:
 616         printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
 617         *pde = BAD_PAGETABLE | PAGE_TABLE;
 618         send_sig(SIGKILL, vma->vm_task, 1);
 619 end_wp_page:
 620         if (new_page)
 621                 free_page(new_page);
 622         return;
 623 }
 624 
 625 /*
 626  * Ugly, ugly, but the goto's result in better assembly..
 627  */
 628 int verify_area(int type, const void * addr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 629 {
 630         struct vm_area_struct * vma;
 631         unsigned long start = (unsigned long) addr;
 632 
 633         /* If the current user space is mapped to kernel space (for the
 634          * case where we use a fake user buffer with get_fs/set_fs()) we
 635          * don't expect to find the address in the user vm map.
 636          */
 637         if (get_fs() == get_ds())
 638                 return 0;
 639 
 640         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 641                 if (!vma)
 642                         goto bad_area;
 643                 if (vma->vm_end > start)
 644                         break;
 645         }
 646         if (vma->vm_start <= start)
 647                 goto good_area;
 648         if (!(vma->vm_flags & VM_GROWSDOWN))
 649                 goto bad_area;
 650         if (vma->vm_end - start > current->rlim[RLIMIT_STACK].rlim_cur)
 651                 goto bad_area;
 652 
 653 good_area:
 654         if (!wp_works_ok && type == VERIFY_WRITE)
 655                 goto check_wp_fault_by_hand;
 656         for (;;) {
 657                 struct vm_area_struct * next;
 658                 if (!(vma->vm_page_prot & PAGE_USER))
 659                         goto bad_area;
 660                 if (type != VERIFY_READ && !(vma->vm_page_prot & (PAGE_COW | PAGE_RW)))
 661                         goto bad_area;
 662                 if (vma->vm_end - start >= size)
 663                         return 0;
 664                 next = vma->vm_next;
 665                 if (!next || vma->vm_end != next->vm_start)
 666                         goto bad_area;
 667                 vma = next;
 668         }
 669 
 670 check_wp_fault_by_hand:
 671         size--;
 672         size += start & ~PAGE_MASK;
 673         size >>= PAGE_SHIFT;
 674         start &= PAGE_MASK;
 675 
 676         for (;;) {
 677                 if (!(vma->vm_page_prot & (PAGE_COW | PAGE_RW)))
 678                         goto bad_area;
 679                 do_wp_page(vma, start, PAGE_PRESENT);
 680                 if (!size)
 681                         return 0;
 682                 size--;
 683                 start += PAGE_SIZE;
 684                 if (start < vma->vm_end)
 685                         continue;
 686                 vma = vma->vm_next;
 687                 if (!vma || vma->vm_start != start)
 688                         break;
 689         }
 690 
 691 bad_area:
 692         return -EFAULT;
 693 }
 694 
 695 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 696 {
 697         unsigned long tmp;
 698 
 699         if (!(tmp = get_free_page(GFP_KERNEL))) {
 700                 oom(tsk);
 701                 tmp = BAD_PAGE;
 702         }
 703         if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
 704                 free_page(tmp);
 705 }
 706 
 707 /*
 708  * try_to_share() checks the page at address "address" in the task "p",
 709  * to see if it exists, and if it is clean. If so, share it with the current
 710  * task.
 711  *
 712  * NOTE! This assumes we have checked that p != current, and that they
 713  * share the same inode and can generally otherwise be shared.
 714  */
 715 static int try_to_share(unsigned long to_address, struct vm_area_struct * to_area,
     /* [previous][next][first][last][top][bottom][index][help] */
 716         unsigned long from_address, struct vm_area_struct * from_area,
 717         unsigned long newpage)
 718 {
 719         unsigned long from;
 720         unsigned long to;
 721         unsigned long from_page;
 722         unsigned long to_page;
 723 
 724         from_page = (unsigned long)PAGE_DIR_OFFSET(from_area->vm_task->tss.cr3,from_address);
 725         to_page = (unsigned long)PAGE_DIR_OFFSET(to_area->vm_task->tss.cr3,to_address);
 726 /* is there a page-directory at from? */
 727         from = *(unsigned long *) from_page;
 728         if (!(from & PAGE_PRESENT))
 729                 return 0;
 730         from &= PAGE_MASK;
 731         from_page = from + PAGE_PTR(from_address);
 732         from = *(unsigned long *) from_page;
 733 /* is the page present? */
 734         if (!(from & PAGE_PRESENT))
 735                 return 0;
 736 /* if it is private, it must be clean to be shared */
 737         if ((from_area->vm_page_prot & PAGE_COW) && (from & PAGE_DIRTY))
 738                 return 0;
 739 /* is the page reasonable at all? */
 740         if (from >= high_memory)
 741                 return 0;
 742         if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
 743                 return 0;
 744 /* is the destination ok? */
 745         to = *(unsigned long *) to_page;
 746         if (!(to & PAGE_PRESENT))
 747                 return 0;
 748         to &= PAGE_MASK;
 749         to_page = to + PAGE_PTR(to_address);
 750         if (*(unsigned long *) to_page)
 751                 return 0;
 752 /* do we copy? */
 753         if (newpage) {
 754                 if (in_swap_cache(from)) { /* implies PAGE_DIRTY */
 755                         if (from_area->vm_page_prot & PAGE_COW)
 756                                 return 0;
 757                 }
 758                 copy_page((from & PAGE_MASK), newpage);
 759                 *(unsigned long *) to_page = newpage | to_area->vm_page_prot;
 760                 return 1;
 761         }
 762 /* do a final swap-cache test before sharing them.. */
 763         if (in_swap_cache(from)) {
 764                 if (from_area->vm_page_prot & PAGE_COW)
 765                         return 0;
 766                 from |= PAGE_DIRTY;
 767                 *(unsigned long *) from_page = from;
 768                 delete_from_swap_cache(from);
 769                 invalidate();
 770         }
 771         mem_map[MAP_NR(from)]++;
 772 /* fill in the 'to' field, checking for COW-stuff */
 773         to = (from & (PAGE_MASK | PAGE_DIRTY)) | to_area->vm_page_prot;
 774         if (to & PAGE_COW)
 775                 to &= ~PAGE_RW;
 776         *(unsigned long *) to_page = to;
 777 /* Check if we need to do anything at all to the 'from' field */
 778         if (!(from & PAGE_RW))
 779                 return 1;
 780         if (!(from_area->vm_page_prot & PAGE_COW))
 781                 return 1;
 782 /* ok, need to mark it read-only, so invalidate any possible old TB entry */
 783         from &= ~PAGE_RW;
 784         *(unsigned long *) from_page = from;
 785         invalidate();
 786         return 1;
 787 }
 788 
 789 /*
 790  * share_page() tries to find a process that could share a page with
 791  * the current one.
 792  *
 793  * We first check if it is at all feasible by checking inode->i_count.
 794  * It should be >1 if there are other tasks sharing this inode.
 795  */
 796 static int share_page(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 797         unsigned long error_code, unsigned long newpage)
 798 {
 799         struct inode * inode;
 800         struct task_struct ** p;
 801         unsigned long offset;
 802         unsigned long from_address;
 803         unsigned long give_page;
 804 
 805         if (!area || !(inode = area->vm_inode) || inode->i_count < 2)
 806                 return 0;
 807         /* do we need to copy or can we just share? */
 808         give_page = 0;
 809         if ((area->vm_page_prot & PAGE_COW) && (error_code & PAGE_RW)) {
 810                 if (!newpage)
 811                         return 0;
 812                 give_page = newpage;
 813         }
 814         offset = address - area->vm_start + area->vm_offset;
 815         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 816                 struct vm_area_struct * mpnt;
 817                 if (!*p)
 818                         continue;
 819                 if (area->vm_task == *p)
 820                         continue;
 821                 /* Now see if there is something in the VMM that
 822                    we can share pages with */
 823                 for (mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) {
 824                         /* must be same inode */
 825                         if (mpnt->vm_inode != inode)
 826                                 continue;
 827                         /* offsets must be mutually page-aligned */
 828                         if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK)
 829                                 continue;
 830                         /* the other area must actually cover the wanted page.. */
 831                         from_address = offset + mpnt->vm_start - mpnt->vm_offset;
 832                         if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end)
 833                                 continue;
 834                         /* .. NOW we can actually try to use the same physical page */
 835                         if (!try_to_share(address, area, from_address, mpnt, give_page))
 836                                 continue;
 837                         /* free newpage if we never used it.. */
 838                         if (give_page || !newpage)
 839                                 return 1;
 840                         free_page(newpage);
 841                         return 1;
 842                 }
 843         }
 844         return 0;
 845 }
 846 
 847 /*
 848  * fill in an empty page-table if none exists.
 849  */
 850 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 851 {
 852         unsigned long page;
 853         unsigned long *p;
 854 
 855         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 856         if (PAGE_PRESENT & *p)
 857                 return *p;
 858         if (*p) {
 859                 printk("get_empty_pgtable: bad page-directory entry \n");
 860                 *p = 0;
 861         }
 862         page = get_free_page(GFP_KERNEL);
 863         p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
 864         if (PAGE_PRESENT & *p) {
 865                 free_page(page);
 866                 return *p;
 867         }
 868         if (*p) {
 869                 printk("get_empty_pgtable: bad page-directory entry \n");
 870                 *p = 0;
 871         }
 872         if (page) {
 873                 *p = page | PAGE_TABLE;
 874                 return *p;
 875         }
 876         oom(current);
 877         *p = BAD_PAGETABLE | PAGE_TABLE;
 878         return 0;
 879 }
 880 
 881 static inline void do_swap_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 882         unsigned long address, unsigned long * pge, unsigned long entry)
 883 {
 884         unsigned long page;
 885 
 886         if (vma->vm_ops && vma->vm_ops->swapin)
 887                 page = vma->vm_ops->swapin(vma, entry);
 888         else
 889                 page = swap_in(entry);
 890         if (*pge != entry) {
 891                 free_page(page);
 892                 return;
 893         }
 894         page = page | vma->vm_page_prot;
 895         if (mem_map[MAP_NR(page)] > 1 && (page & PAGE_COW))
 896                 page &= ~PAGE_RW;
 897         ++vma->vm_task->mm->rss;
 898         ++vma->vm_task->mm->maj_flt;
 899         *pge = page;
 900         return;
 901 }
 902 
 903 void do_no_page(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 904         unsigned long error_code)
 905 {
 906         unsigned long page, entry, prot;
 907 
 908         page = get_empty_pgtable(vma->vm_task,address);
 909         if (!page)
 910                 return;
 911         page &= PAGE_MASK;
 912         page += PAGE_PTR(address);
 913         entry = *(unsigned long *) page;
 914         if (entry & PAGE_PRESENT)
 915                 return;
 916         if (entry) {
 917                 do_swap_page(vma, address, (unsigned long *) page, entry);
 918                 return;
 919         }
 920         address &= PAGE_MASK;
 921 
 922         if (!vma->vm_ops || !vma->vm_ops->nopage) {
 923                 ++vma->vm_task->mm->rss;
 924                 ++vma->vm_task->mm->min_flt;
 925                 get_empty_page(vma->vm_task,address);
 926                 return;
 927         }
 928         page = get_free_page(GFP_KERNEL);
 929         if (share_page(vma, address, error_code, page)) {
 930                 ++vma->vm_task->mm->min_flt;
 931                 ++vma->vm_task->mm->rss;
 932                 return;
 933         }
 934         if (!page) {
 935                 oom(current);
 936                 put_page(vma->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
 937                 return;
 938         }
 939         ++vma->vm_task->mm->maj_flt;
 940         ++vma->vm_task->mm->rss;
 941         prot = vma->vm_page_prot;
 942         /*
 943          * The fourth argument is "no_share", which tells the low-level code
 944          * to copy, not share the page even if sharing is possible.  It's
 945          * essentially an early COW detection ("moo at 5 AM").
 946          */
 947         page = vma->vm_ops->nopage(vma, address, page, (error_code & PAGE_RW) && (prot & PAGE_COW));
 948         if (share_page(vma, address, error_code, 0)) {
 949                 free_page(page);
 950                 return;
 951         }
 952         /*
 953          * This silly early PAGE_DIRTY setting removes a race
 954          * due to the bad i386 page protection.
 955          */
 956         if (error_code & PAGE_RW) {
 957                 prot |= PAGE_DIRTY;     /* can't be COW-shared: see "no_share" above */
 958         } else if ((prot & PAGE_COW) && mem_map[MAP_NR(page)] > 1)
 959                 prot &= ~PAGE_RW;
 960         if (put_page(vma->vm_task, page, address, prot))
 961                 return;
 962         free_page(page);
 963         oom(current);
 964 }
 965 
 966 /*
 967  * This routine handles page faults.  It determines the address,
 968  * and the problem, and then passes it off to one of the appropriate
 969  * routines.
 970  */
 971 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 972 {
 973         struct vm_area_struct * vma;
 974         unsigned long address;
 975         unsigned long page;
 976 
 977         /* get the address */
 978         __asm__("movl %%cr2,%0":"=r" (address));
 979         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 980                 if (!vma)
 981                         goto bad_area;
 982                 if (vma->vm_end > address)
 983                         break;
 984         }
 985         if (vma->vm_start <= address)
 986                 goto good_area;
 987         if (!(vma->vm_flags & VM_GROWSDOWN))
 988                 goto bad_area;
 989         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
 990                 goto bad_area;
 991         vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
 992         vma->vm_start = (address & PAGE_MASK);
 993 /*
 994  * Ok, we have a good vm_area for this memory access, so
 995  * we can handle it..
 996  */
 997 good_area:
 998         if (regs->eflags & VM_MASK) {
 999                 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
1000                 if (bit < 32)
1001                         current->screen_bitmap |= 1 << bit;
1002         }
1003         if (!(vma->vm_page_prot & PAGE_USER))
1004                 goto bad_area;
1005         if (error_code & PAGE_PRESENT) {
1006                 if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
1007                         goto bad_area;
1008 #ifdef CONFIG_TEST_VERIFY_AREA
1009                 if (regs->cs == KERNEL_CS)
1010                         printk("WP fault at %08x\n", regs->eip);
1011 #endif
1012                 do_wp_page(vma, address, error_code);
1013                 return;
1014         }
1015         do_no_page(vma, address, error_code);
1016         return;
1017 
1018 /*
1019  * Something tried to access memory that isn't in our memory map..
1020  * Fix it, but check if it's kernel or user first..
1021  */
1022 bad_area:
1023         if (error_code & PAGE_USER) {
1024                 current->tss.cr2 = address;
1025                 current->tss.error_code = error_code;
1026                 current->tss.trap_no = 14;
1027                 send_sig(SIGSEGV, current, 1);
1028                 return;
1029         }
1030 /*
1031  * Oops. The kernel tried to access some bad page. We'll have to
1032  * terminate things with extreme prejudice.
1033  */
1034         if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
1035                 wp_works_ok = 1;
1036                 pg0[0] = PAGE_SHARED;
1037                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
1038                 return;
1039         }
1040         if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
1041                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
1042                 pg0[0] = PAGE_SHARED;
1043         } else
1044                 printk(KERN_ALERT "Unable to handle kernel paging request");
1045         printk(" at virtual address %08lx\n",address);
1046         __asm__("movl %%cr3,%0" : "=r" (page));
1047         printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
1048                 current->tss.cr3, page);
1049         page = ((unsigned long *) page)[address >> 22];
1050         printk(KERN_ALERT "*pde = %08lx\n", page);
1051         if (page & PAGE_PRESENT) {
1052                 page &= PAGE_MASK;
1053                 address &= 0x003ff000;
1054                 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
1055                 printk(KERN_ALERT "*pte = %08lx\n", page);
1056         }
1057         die_if_kernel("Oops", regs, error_code);
1058         do_exit(SIGKILL);
1059 }
1060 
1061 /*
1062  * BAD_PAGE is the page that is used for page faults when linux
1063  * is out-of-memory. Older versions of linux just did a
1064  * do_exit(), but using this instead means there is less risk
1065  * for a process dying in kernel mode, possibly leaving a inode
1066  * unused etc..
1067  *
1068  * BAD_PAGETABLE is the accompanying page-table: it is initialized
1069  * to point to BAD_PAGE entries.
1070  *
1071  * ZERO_PAGE is a special page that is used for zero-initialized
1072  * data and COW.
1073  */
1074 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1075 {
1076         extern char empty_bad_page_table[PAGE_SIZE];
1077 
1078         __asm__ __volatile__("cld ; rep ; stosl":
1079                 :"a" (BAD_PAGE + PAGE_TABLE),
1080                  "D" ((long) empty_bad_page_table),
1081                  "c" (PTRS_PER_PAGE)
1082                 :"di","cx");
1083         return (unsigned long) empty_bad_page_table;
1084 }
1085 
1086 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1087 {
1088         extern char empty_bad_page[PAGE_SIZE];
1089 
1090         __asm__ __volatile__("cld ; rep ; stosl":
1091                 :"a" (0),
1092                  "D" ((long) empty_bad_page),
1093                  "c" (PTRS_PER_PAGE)
1094                 :"di","cx");
1095         return (unsigned long) empty_bad_page;
1096 }
1097 
1098 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1099 {
1100         extern char empty_zero_page[PAGE_SIZE];
1101 
1102         __asm__ __volatile__("cld ; rep ; stosl":
1103                 :"a" (0),
1104                  "D" ((long) empty_zero_page),
1105                  "c" (PTRS_PER_PAGE)
1106                 :"di","cx");
1107         return (unsigned long) empty_zero_page;
1108 }
1109 
1110 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1111 {
1112         int i,free = 0,total = 0,reserved = 0;
1113         int shared = 0;
1114 
1115         printk("Mem-info:\n");
1116         show_free_areas();
1117         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
1118         i = high_memory >> PAGE_SHIFT;
1119         while (i-- > 0) {
1120                 total++;
1121                 if (mem_map[i] & MAP_PAGE_RESERVED)
1122                         reserved++;
1123                 else if (!mem_map[i])
1124                         free++;
1125                 else
1126                         shared += mem_map[i]-1;
1127         }
1128         printk("%d pages of RAM\n",total);
1129         printk("%d free pages\n",free);
1130         printk("%d reserved pages\n",reserved);
1131         printk("%d pages shared\n",shared);
1132         show_buffers();
1133 #ifdef CONFIG_NET
1134         show_net_buffers();
1135 #endif
1136 }
1137 
1138 extern unsigned long free_area_init(unsigned long, unsigned long);
1139 
1140 /*
1141  * paging_init() sets up the page tables - note that the first 4MB are
1142  * already mapped by head.S.
1143  *
1144  * This routines also unmaps the page at virtual kernel address 0, so
1145  * that we can trap those pesky NULL-reference errors in the kernel.
1146  */
1147 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1148 {
1149         unsigned long * pg_dir;
1150         unsigned long * pg_table;
1151         unsigned long tmp;
1152         unsigned long address;
1153 
1154 /*
1155  * Physical page 0 is special; it's not touched by Linux since BIOS
1156  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
1157  * and write protected to detect null pointer references in the
1158  * kernel.
1159  */
1160 #if 0
1161         memset((void *) 0, 0, PAGE_SIZE);
1162 #endif
1163         start_mem = PAGE_ALIGN(start_mem);
1164         address = 0;
1165         pg_dir = swapper_pg_dir;
1166         while (address < end_mem) {
1167                 tmp = *(pg_dir + 768);          /* at virtual addr 0xC0000000 */
1168                 if (!tmp) {
1169                         tmp = start_mem | PAGE_TABLE;
1170                         *(pg_dir + 768) = tmp;
1171                         start_mem += PAGE_SIZE;
1172                 }
1173                 *pg_dir = tmp;                  /* also map it in at 0x0000000 for init */
1174                 pg_dir++;
1175                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1176                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1177                         if (address < end_mem)
1178                                 *pg_table = address | PAGE_SHARED;
1179                         else
1180                                 *pg_table = 0;
1181                         address += PAGE_SIZE;
1182                 }
1183         }
1184         invalidate();
1185         return free_area_init(start_mem, end_mem);
1186 }
1187 
1188 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
1189               unsigned long start_mem, unsigned long end_mem)
1190 {
1191         int codepages = 0;
1192         int reservedpages = 0;
1193         int datapages = 0;
1194         unsigned long tmp;
1195         extern int etext;
1196 
1197         cli();
1198         end_mem &= PAGE_MASK;
1199         high_memory = end_mem;
1200 
1201         /* mark usable pages in the mem_map[] */
1202         start_low_mem = PAGE_ALIGN(start_low_mem);
1203         start_mem = PAGE_ALIGN(start_mem);
1204 
1205         /*
1206          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
1207          * They seem to have done something stupid with the floppy
1208          * controller as well..
1209          */
1210         while (start_low_mem < 0x9f000) {
1211                 mem_map[MAP_NR(start_low_mem)] = 0;
1212                 start_low_mem += PAGE_SIZE;
1213         }
1214 
1215         while (start_mem < high_memory) {
1216                 mem_map[MAP_NR(start_mem)] = 0;
1217                 start_mem += PAGE_SIZE;
1218         }
1219 #ifdef CONFIG_SOUND
1220         sound_mem_init();
1221 #endif
1222         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
1223                 if (mem_map[MAP_NR(tmp)]) {
1224                         if (tmp >= 0xA0000 && tmp < 0x100000)
1225                                 reservedpages++;
1226                         else if (tmp < (unsigned long) &etext)
1227                                 codepages++;
1228                         else
1229                                 datapages++;
1230                         continue;
1231                 }
1232                 mem_map[MAP_NR(tmp)] = 1;
1233                 free_page(tmp);
1234         }
1235         tmp = nr_free_pages << PAGE_SHIFT;
1236         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1237                 tmp >> 10,
1238                 high_memory >> 10,
1239                 codepages << (PAGE_SHIFT-10),
1240                 reservedpages << (PAGE_SHIFT-10),
1241                 datapages << (PAGE_SHIFT-10));
1242 /* test if the WP bit is honoured in supervisor mode */
1243         wp_works_ok = -1;
1244         pg0[0] = PAGE_READONLY;
1245         invalidate();
1246         __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1247         pg0[0] = 0;
1248         invalidate();
1249         if (wp_works_ok < 0)
1250                 wp_works_ok = 0;
1251 #ifdef CONFIG_TEST_VERIFY_AREA
1252         wp_works_ok = 0;
1253 #endif
1254         return;
1255 }
1256 
1257 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1258 {
1259         int i;
1260 
1261         i = high_memory >> PAGE_SHIFT;
1262         val->totalram = 0;
1263         val->sharedram = 0;
1264         val->freeram = nr_free_pages << PAGE_SHIFT;
1265         val->bufferram = buffermem;
1266         while (i-- > 0)  {
1267                 if (mem_map[i] & MAP_PAGE_RESERVED)
1268                         continue;
1269                 val->totalram++;
1270                 if (!mem_map[i])
1271                         continue;
1272                 val->sharedram += mem_map[i]-1;
1273         }
1274         val->totalram <<= PAGE_SHIFT;
1275         val->sharedram <<= PAGE_SHIFT;
1276         return;
1277 }
1278 
1279 
1280 /*
1281  * This handles a generic mmap of a disk file.
1282  */
1283 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
1284         unsigned long page, int no_share)
1285 {
1286         struct inode * inode = area->vm_inode;
1287         unsigned int block;
1288         int nr[8];
1289         int i, *p;
1290 
1291         address &= PAGE_MASK;
1292         block = address - area->vm_start + area->vm_offset;
1293         block >>= inode->i_sb->s_blocksize_bits;
1294         i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
1295         p = nr;
1296         do {
1297                 *p = bmap(inode,block);
1298                 i--;
1299                 block++;
1300                 p++;
1301         } while (i > 0);
1302         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
1303 }
1304 
1305 struct vm_operations_struct file_mmap = {
1306         NULL,                   /* open */
1307         NULL,                   /* close */
1308         file_mmap_nopage,       /* nopage */
1309         NULL,                   /* wppage */
1310         NULL,                   /* share */
1311         NULL,                   /* unmap */
1312 };

/* [previous][next][first][last][top][bottom][index][help] */