root/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. oom
  2. free_one_table
  3. clear_page_tables
  4. free_page_tables
  5. copy_page_tables
  6. unmap_page_range
  7. remap_page_range
  8. put_page
  9. put_dirty_page
  10. un_wp_page
  11. do_wp_page
  12. write_verify
  13. get_empty_page
  14. try_to_share
  15. share_page
  16. get_empty_pgtable
  17. do_no_page
  18. show_mem
  19. do_page_fault
  20. mem_init

   1 /*
   2  *  linux/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * demand-loading started 01.12.91 - seems it is high on the list of
   9  * things wanted, and it should be easy to implement. - Linus
  10  */
  11 
  12 /*
  13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14  * pages started 02.12.91, seems to work. - Linus.
  15  *
  16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17  * would have taken more than the 6M I have free, but it worked well as
  18  * far as I could see.
  19  *
  20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21  */
  22 
  23 /*
  24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25  * thought has to go into this. Oh, well..
  26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27  *              Found it. Everything seems to work now.
  28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29  */
  30 
  31 #include <asm/system.h>
  32 
  33 #include <linux/signal.h>
  34 #include <linux/sched.h>
  35 #include <linux/head.h>
  36 #include <linux/kernel.h>
  37 #include <linux/errno.h>
  38 #include <linux/string.h>
  39 
  40 unsigned long high_memory = 0;
  41 
  42 int nr_free_pages = 0;
  43 unsigned long free_page_list = 0;
  44 /*
  45  * The secondary free_page_list is used for malloc() etc things that
  46  * may need pages during interrupts etc. Normal get_free_page() operations
  47  * don't touch it, so it stays as a kind of "panic-list", that can be
  48  * accessed when all other mm tricks have failed.
  49  */
  50 int nr_secondary_pages = 0;
  51 unsigned long secondary_page_list = 0;
  52 
  53 #define copy_page(from,to) \
  54 __asm__("cld ; rep ; movsl"::"S" (from),"D" (to),"c" (1024):"cx","di","si")
  55 
  56 unsigned short * mem_map = NULL;
  57 
  58 /*
  59  * oom() prints a message (so that the user knows why the process died),
  60  * and gives the process an untrappable SIGSEGV.
  61  */
  62 void oom(struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
  63 {
  64         printk("\nout of memory\n");
  65         task->sigaction[SIGSEGV-1].sa_handler = NULL;
  66         task->blocked &= ~(1<<(SIGSEGV-1));
  67         send_sig(SIGSEGV,task,1);
  68 }
  69 
  70 static void free_one_table(unsigned long * page_dir)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         int j;
  73         unsigned long pg_table = *page_dir;
  74         unsigned long * page_table;
  75 
  76         if (!pg_table)
  77                 return;
  78         if (pg_table >= high_memory || !(pg_table & 1)) {
  79                 printk("Bad page table: [%08x]=%08x\n",page_dir,pg_table);
  80                 *page_dir = 0;
  81                 return;
  82         }
  83         *page_dir = 0;
  84         if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
  85                 return;
  86         page_table = (unsigned long *) (pg_table & 0xfffff000);
  87         for (j = 0 ; j < 1024 ; j++,page_table++) {
  88                 unsigned long pg = *page_table;
  89                 
  90                 if (!pg)
  91                         continue;
  92                 *page_table = 0;
  93                 if (1 & pg)
  94                         free_page(0xfffff000 & pg);
  95                 else
  96                         swap_free(pg >> 1);
  97         }
  98         free_page(0xfffff000 & pg_table);
  99 }
 100 
 101 /*
 102  * This function clears all user-level page tables of a process - this
 103  * is needed by execve(), so that old pages aren't in the way. Note that
 104  * unlike 'free_page_tables()', this function still leaves a valid
 105  * page-table-tree in memory: it just removes the user pages. The two
 106  * functions are similar, but there is a fundamental difference.
 107  */
 108 void clear_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 109 {
 110         int i;
 111         unsigned long * page_dir;
 112 
 113         if (!tsk)
 114                 return;
 115         if (tsk == task[0])
 116                 panic("task[0] (swapper) doesn't support exec() yet\n");
 117         page_dir = (unsigned long *) tsk->tss.cr3;
 118         if (!page_dir) {
 119                 printk("Trying to clear kernel page-directory: not good\n");
 120                 return;
 121         }
 122         for (i = 0 ; i < 768 ; i++,page_dir++)
 123                 free_one_table(page_dir);
 124         invalidate();
 125         return;
 126 }
 127 
 128 /*
 129  * This function frees up all page tables of a process when it exits.
 130  */
 131 void free_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         int i;
 134         unsigned long pg_dir;
 135         unsigned long * page_dir;
 136 
 137         if (!tsk)
 138                 return;
 139         if (tsk == task[0]) {
 140                 printk("task[0] (swapper) killed: unable to recover\n");
 141                 panic("Trying to free up swapper memory space");
 142         }
 143         pg_dir = tsk->tss.cr3;
 144         if (!pg_dir) {
 145                 printk("Trying to free kernel page-directory: not good\n");
 146                 return;
 147         }
 148         tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
 149         if (tsk == current)
 150                 __asm__ __volatile__("movl %0,%%cr3"::"a" (tsk->tss.cr3));
 151         page_dir = (unsigned long *) pg_dir;
 152         for (i = 0 ; i < 1024 ; i++,page_dir++)
 153                 free_one_table(page_dir);
 154         free_page(pg_dir);
 155         invalidate();
 156 }
 157 
 158 /*
 159  * copy_page_tables() just copies the whole process memory range:
 160  * note the special handling of RESERVED (ie kernel) pages, which
 161  * means that they are always shared by all processes.
 162  */
 163 int copy_page_tables(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165         int i;
 166         unsigned long old_pg_dir, *old_page_dir;
 167         unsigned long new_pg_dir, *new_page_dir;
 168 
 169         old_pg_dir = current->tss.cr3;
 170         new_pg_dir = get_free_page(GFP_KERNEL);
 171         if (!new_pg_dir)
 172                 return -ENOMEM;
 173         tsk->tss.cr3 = new_pg_dir;
 174         old_page_dir = (unsigned long *) old_pg_dir;
 175         new_page_dir = (unsigned long *) new_pg_dir;
 176         for (i = 0 ; i < 1024 ; i++,old_page_dir++,new_page_dir++) {
 177                 int j;
 178                 unsigned long old_pg_table, *old_page_table;
 179                 unsigned long new_pg_table, *new_page_table;
 180 
 181                 old_pg_table = *old_page_dir;
 182                 if (!old_pg_table)
 183                         continue;
 184                 if (old_pg_table >= high_memory || !(1 & old_pg_table)) {
 185                         printk("copy_page_tables: bad page table: "
 186                                 "probable memory corruption");
 187                         *old_page_dir = 0;
 188                         continue;
 189                 }
 190                 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
 191                         *new_page_dir = old_pg_table;
 192                         continue;
 193                 }
 194                 new_pg_table = get_free_page(GFP_KERNEL);
 195                 if (!new_pg_table) {
 196                         free_page_tables(tsk);
 197                         return -ENOMEM;
 198                 }
 199                 *new_page_dir = new_pg_table | PAGE_ACCESSED | 7;
 200                 old_page_table = (unsigned long *) (0xfffff000 & old_pg_table);
 201                 new_page_table = (unsigned long *) (0xfffff000 & new_pg_table);
 202                 for (j = 0 ; j < 1024 ; j++,old_page_table++,new_page_table++) {
 203                         unsigned long pg;
 204                         pg = *old_page_table;
 205                         if (!pg)
 206                                 continue;
 207                         if (!(pg & PAGE_PRESENT)) {
 208                                 swap_duplicate(pg>>1);
 209                                 *new_page_table = pg;
 210                                 continue;
 211                         }
 212                         pg &= ~2;
 213                         *new_page_table = pg;
 214                         if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
 215                                 continue;
 216                         *old_page_table = pg;
 217                         mem_map[MAP_NR(pg)]++;
 218                 }
 219         }
 220         invalidate();
 221         return 0;
 222 }
 223 
 224 /*
 225  * a more complete version of free_page_tables which performs with page
 226  * granularity.
 227  */
 228 int unmap_page_range(unsigned long from, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 {
 230         unsigned long page, page_dir;
 231         unsigned long *page_table, *dir;
 232         unsigned long poff, pcnt, pc;
 233 
 234         if (from & 0xfff)
 235                 panic("unmap_page_range called with wrong alignment");
 236         if (!from)
 237                 panic("unmap_page_range trying to free swapper memory space");
 238         size = (size + 0xfff) >> PAGE_SHIFT;
 239         dir = (unsigned long *) (current->tss.cr3 + ((from >> 20) & 0xffc));
 240         poff = (from >> PAGE_SHIFT) & 0x3ff;
 241         if ((pcnt = 1024 - poff) > size)
 242                 pcnt = size;
 243 
 244         for ( ; size > 0; ++dir, size -= pcnt,
 245              pcnt = (size > 1024 ? 1024 : size)) {
 246                 if (!(page_dir = *dir)) {
 247                         poff = 0;
 248                         continue;
 249                 }
 250                 if (!(page_dir & 1)) {
 251                         printk("unmap_page_range: bad page directory.");
 252                         continue;
 253                 }
 254                 page_table = (unsigned long *)(0xfffff000 & page_dir);
 255                 if (poff) {
 256                         page_table += poff;
 257                         poff = 0;
 258                 }
 259                 for (pc = pcnt; pc--; page_table++) {
 260                         if (page = *page_table) {
 261                                 --current->rss;
 262                                 *page_table = 0;
 263                                 if (1 & page)
 264                                         free_page(0xfffff000 & page);
 265                                 else
 266                                         swap_free(page >> 1);
 267                         }
 268                 }
 269                 if (pcnt == 1024) {
 270                         free_page(0xfffff000 & page_dir);
 271                         *dir = 0;
 272                 }
 273         }
 274         invalidate();
 275         return 0;
 276 }
 277 
 278 /*
 279  * maps a range of physical memory into the requested pages. the old
 280  * mappings are removed. any references to nonexistent pages results
 281  * in null mappings (currently treated as "copy-on-access")
 282  *
 283  * permiss is encoded as cxwr (copy,exec,write,read) where copy modifies
 284  * the behavior of write to be copy-on-write.
 285  *
 286  * due to current limitations, we actually have the following
 287  *              on              off
 288  * read:        yes             yes
 289  * write/copy:  yes/copy        copy/copy
 290  * exec:        yes             yes
 291  */
 292 int remap_page_range(unsigned long from, unsigned long to, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 293                  int permiss)
 294 {
 295         unsigned long *page_table, *dir;
 296         unsigned long poff, pcnt;
 297         unsigned long page;
 298 
 299         if ((from & 0xfff) || (to & 0xfff))
 300                 panic("remap_page_range called with wrong alignment");
 301         dir = (unsigned long *) (current->tss.cr3 + ((from >> 20) & 0xffc));
 302         size = (size + 0xfff) >> PAGE_SHIFT;
 303         poff = (from >> PAGE_SHIFT) & 0x3ff;
 304         if ((pcnt = 1024 - poff) > size)
 305                 pcnt = size;
 306 
 307         while (size > 0) {
 308                 if (!(1 & *dir)) {
 309                         if (!(page_table = (unsigned long *)get_free_page(GFP_KERNEL))) {
 310                                 invalidate();
 311                                 return -1;
 312                         }
 313                         *dir++ = ((unsigned long) page_table) | PAGE_ACCESSED | 7;
 314                 }
 315                 else
 316                         page_table = (unsigned long *)(0xfffff000 & *dir++);
 317                 if (poff) {
 318                         page_table += poff;
 319                         poff = 0;
 320                 }
 321 
 322                 for (size -= pcnt; pcnt-- ;) {
 323                         int mask;
 324 
 325                         mask = 4;
 326                         if (permiss & 1)
 327                                 mask |= 1;
 328                         if (permiss & 2) {
 329                                 if (permiss & 8)
 330                                         mask |= 1;
 331                                 else
 332                                         mask |= 3;
 333                         }
 334                         if (permiss & 4)
 335                                 mask |= 1;
 336 
 337                         if (page = *page_table) {
 338                                 *page_table = 0;
 339                                 --current->rss;
 340                                 if (1 & page)
 341                                         free_page(0xfffff000 & page);
 342                                 else
 343                                         swap_free(page >> 1);
 344                         }
 345 
 346                         /*
 347                          * i'm not sure of the second cond here. should we
 348                          * report failure?
 349                          * the first condition should return an invalid access
 350                          * when the page is referenced. current assumptions
 351                          * cause it to be treated as demand allocation.
 352                          */
 353                         if (mask == 4 || to >= high_memory || !mem_map[MAP_NR(to)])
 354                                 *page_table++ = 0;      /* not present */
 355                         else {
 356                                 ++current->rss;
 357                                 *page_table++ = (to | mask);
 358                                 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED))
 359                                         mem_map[MAP_NR(to)]++;
 360                         }
 361                         to += PAGE_SIZE;
 362                 }
 363                 pcnt = (size > 1024 ? 1024 : size);
 364         }
 365         invalidate();
 366         return 0;
 367 }
 368 
 369 /*
 370  * This function puts a page in memory at the wanted address.
 371  * It returns the physical address of the page gotten, 0 if
 372  * out of memory (either when trying to access page-table or
 373  * page.)
 374  */
 375 static unsigned long put_page(struct task_struct * tsk,unsigned long page,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 376 {
 377         unsigned long tmp, *page_table;
 378 
 379 /* NOTE !!! This uses the fact that _pg_dir=0 */
 380 
 381         if (page >= high_memory) {
 382                 printk("put_page: trying to put page %p at %p\n",page,address);
 383                 return 0;
 384         }
 385         tmp = mem_map[MAP_NR(page)];
 386         if (!(tmp & MAP_PAGE_RESERVED) && (tmp != 1)) {
 387                 printk("put_page: mem_map disagrees with %p at %p\n",page,address);
 388                 return 0;
 389         }
 390         page_table = (unsigned long *) (tsk->tss.cr3 + ((address>>20) & 0xffc));
 391         if ((*page_table)&1)
 392                 page_table = (unsigned long *) (0xfffff000 & *page_table);
 393         else {
 394                 tmp = get_free_page(GFP_KERNEL);
 395                 if (!tmp) {
 396                         oom(tsk);
 397                         tmp = BAD_PAGETABLE;
 398                 }
 399                 *page_table = tmp | PAGE_ACCESSED | 7;
 400                 return 0;
 401         }
 402         page_table += (address >> PAGE_SHIFT) & 0x3ff;
 403         if (*page_table) {
 404                 printk("put_page: page already exists\n");
 405                 *page_table = 0;
 406                 invalidate();
 407         }
 408         *page_table = page | PAGE_ACCESSED | 7;
 409 /* no need for invalidate */
 410         return page;
 411 }
 412 
 413 /*
 414  * The previous function doesn't work very well if you also want to mark
 415  * the page dirty: exec.c wants this, as it has earlier changed the page,
 416  * and we want the dirty-status to be correct (for VM). Thus the same
 417  * routine, but this time we mark it dirty too.
 418  */
 419 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 420 {
 421         unsigned long tmp, *page_table;
 422 
 423 /* NOTE !!! This uses the fact that _pg_dir=0 */
 424 
 425         if (page >= high_memory)
 426                 printk("put_dirty_page: trying to put page %p at %p\n",page,address);
 427         if (mem_map[MAP_NR(page)] != 1)
 428                 printk("mem_map disagrees with %p at %p\n",page,address);
 429         page_table = (unsigned long *) (tsk->tss.cr3 + ((address>>20) & 0xffc));
 430         if ((*page_table)&1)
 431                 page_table = (unsigned long *) (0xfffff000 & *page_table);
 432         else {
 433                 if (!(tmp=get_free_page(GFP_KERNEL)))
 434                         return 0;
 435                 *page_table = tmp|7;
 436                 page_table = (unsigned long *) tmp;
 437         }
 438         page_table += (address >> PAGE_SHIFT) & 0x3ff;
 439         if (*page_table) {
 440                 printk("put_dirty_page: page already exists\n");
 441                 *page_table = 0;
 442                 invalidate();
 443         }
 444         *page_table = page | (PAGE_DIRTY | PAGE_ACCESSED | 7);
 445 /* no need for invalidate */
 446         return page;
 447 }
 448 
 449 static void un_wp_page(unsigned long * table_entry, struct task_struct * task)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         unsigned long old_page;
 452         unsigned long new_page = 0;
 453         unsigned long dirty;
 454 
 455 repeat:
 456         old_page = *table_entry;
 457         if (!(old_page & 1)) {
 458                 if (new_page)
 459                         free_page(new_page);
 460                 return;
 461         }
 462         dirty = old_page & PAGE_DIRTY;
 463         old_page &= 0xfffff000;
 464         if (old_page >= high_memory) {
 465                 if (new_page)
 466                         free_page(new_page);
 467                 printk("bad page address\n\r");
 468                 send_sig(SIGSEGV, task, 1);
 469                 *table_entry = BAD_PAGE | 7;
 470                 return;
 471         }
 472         if (mem_map[MAP_NR(old_page)]==1) {
 473                 *table_entry |= 2;
 474                 invalidate();
 475                 if (new_page)
 476                         free_page(new_page);
 477                 return;
 478         }
 479         if (!new_page && (new_page=get_free_page(GFP_KERNEL)))
 480                 goto repeat;
 481         if (new_page)
 482                 copy_page(old_page,new_page);
 483         else {
 484                 new_page = BAD_PAGE;
 485                 oom(task);
 486         }
 487         *table_entry = new_page | dirty | PAGE_ACCESSED | 7;
 488         free_page(old_page);
 489         invalidate();
 490 }       
 491 
 492 /*
 493  * This routine handles present pages, when users try to write
 494  * to a shared page. It is done by copying the page to a new address
 495  * and decrementing the shared-page counter for the old page.
 496  *
 497  * If it's in code space we exit with a segment error.
 498  */
 499 void do_wp_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 500         struct task_struct * tsk, unsigned long user_esp)
 501 {
 502         unsigned long pde, pte, page;
 503 
 504         pde = tsk->tss.cr3 + ((address>>20) & 0xffc);
 505         pte = *(unsigned long *) pde;
 506         if ((pte & 3) != 3) {
 507                 printk("do_wp_page: bogus page-table at address %08x (%08x)\n",address,pte);
 508                 *(unsigned long *) pde = BAD_PAGETABLE | 7;
 509                 send_sig(SIGSEGV, tsk, 1);
 510                 return;
 511         }
 512         pte &= 0xfffff000;
 513         pte += (address>>10) & 0xffc;
 514         page = *(unsigned long *) pte;
 515         if ((page & 3) != 1) {
 516                 printk("do_wp_page: bogus page at address %08x (%08x)\n",address,page);
 517                 *(unsigned long *) pte = BAD_PAGE | 7;
 518                 send_sig(SIGSEGV, tsk, 1);
 519                 return;
 520         }
 521         tsk->min_flt++;
 522         un_wp_page((unsigned long *) pte, tsk);
 523 }
 524 
 525 void write_verify(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527         unsigned long page;
 528 
 529         page = *(unsigned long *) (current->tss.cr3 + ((address>>20) & 0xffc));
 530         if (!(page & PAGE_PRESENT))
 531                 return;
 532         page &= 0xfffff000;
 533         page += ((address>>10) & 0xffc);
 534         if ((3 & *(unsigned long *) page) == 1)  /* non-writeable, present */
 535                 un_wp_page((unsigned long *) page, current);
 536         return;
 537 }
 538 
 539 static void get_empty_page(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 540 {
 541         unsigned long tmp;
 542 
 543         tmp = get_free_page(GFP_KERNEL);
 544         if (!tmp) {
 545                 oom(tsk);
 546                 tmp = BAD_PAGE;
 547         }
 548         if (!put_page(tsk,tmp,address))
 549                 free_page(tmp);
 550 }
 551 
 552 /*
 553  * try_to_share() checks the page at address "address" in the task "p",
 554  * to see if it exists, and if it is clean. If so, share it with the current
 555  * task.
 556  *
 557  * NOTE! This assumes we have checked that p != current, and that they
 558  * share the same executable or library.
 559  */
 560 static int try_to_share(unsigned long address, struct task_struct * tsk,
     /* [previous][next][first][last][top][bottom][index][help] */
 561         struct task_struct * p)
 562 {
 563         unsigned long from;
 564         unsigned long to;
 565         unsigned long from_page;
 566         unsigned long to_page;
 567         unsigned long phys_addr;
 568 
 569         from_page = p->tss.cr3 + ((address>>20) & 0xffc);
 570         to_page = tsk->tss.cr3 + ((address>>20) & 0xffc);
 571 /* is there a page-directory at from? */
 572         from = *(unsigned long *) from_page;
 573         if (!(from & 1))
 574                 return 0;
 575         from &= 0xfffff000;
 576         from_page = from + ((address>>10) & 0xffc);
 577         phys_addr = *(unsigned long *) from_page;
 578 /* is the page clean and present? */
 579         if ((phys_addr & 0x41) != 0x01)
 580                 return 0;
 581         phys_addr &= 0xfffff000;
 582         if (phys_addr >= high_memory)
 583                 return 0;
 584         if (mem_map[MAP_NR(phys_addr)] & MAP_PAGE_RESERVED)
 585                 return 0;
 586         to = *(unsigned long *) to_page;
 587         if (!(to & 1)) {
 588                 to = get_free_page(GFP_KERNEL);
 589                 if (!to)
 590                         return 0;
 591                 *(unsigned long *) to_page = to | PAGE_ACCESSED | 7;
 592         }
 593         to &= 0xfffff000;
 594         to_page = to + ((address>>10) & 0xffc);
 595         if (1 & *(unsigned long *) to_page)
 596                 panic("try_to_share: to_page already exists");
 597 /* share them: write-protect */
 598         *(unsigned long *) from_page &= ~2;
 599         *(unsigned long *) to_page = *(unsigned long *) from_page;
 600         invalidate();
 601         phys_addr >>= PAGE_SHIFT;
 602         mem_map[phys_addr]++;
 603         return 1;
 604 }
 605 
 606 /*
 607  * share_page() tries to find a process that could share a page with
 608  * the current one. Address is the address of the wanted page relative
 609  * to the current data space.
 610  *
 611  * We first check if it is at all feasible by checking executable->i_count.
 612  * It should be >1 if there are other tasks sharing this inode.
 613  */
 614 static int share_page(struct task_struct * tsk, struct inode * inode, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 615 {
 616         struct task_struct ** p;
 617         int i;
 618 
 619         if (!inode || inode->i_count < 2)
 620                 return 0;
 621         for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
 622                 if (!*p)
 623                         continue;
 624                 if (tsk == *p)
 625                         continue;
 626                 if (inode != (*p)->executable) {
 627                         for (i=0; i < (*p)->numlibraries; i++)
 628                                 if (inode == (*p)->libraries[i].library)
 629                                         break;
 630                         if (i >= (*p)->numlibraries)
 631                                 continue;
 632                 }
 633                 if (try_to_share(address,tsk,*p))
 634                         return 1;
 635         }
 636         return 0;
 637 }
 638 
 639 /*
 640  * fill in an empty page-table if none exists
 641  */
 642 static unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 643 {
 644         unsigned long page = 0;
 645         unsigned long *p;
 646 repeat:
 647         p = (unsigned long *) (tsk->tss.cr3 + ((address >> 20) & 0xffc));
 648         if (1 & *p) {
 649                 free_page(page);
 650                 return *p;
 651         }
 652         if (*p) {
 653                 printk("get_empty_pgtable: bad page-directory entry \n");
 654                 *p = 0;
 655         }
 656         if (page) {
 657                 *p = page | PAGE_ACCESSED | 7;
 658                 return *p;
 659         }
 660         if (page = get_free_page(GFP_KERNEL))
 661                 goto repeat;
 662         oom(current);
 663         *p = BAD_PAGETABLE | 7;
 664         return 0;
 665 }
 666 
 667 void do_no_page(unsigned long error_code, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 668         struct task_struct *tsk, unsigned long user_esp)
 669 {
 670         int nr[4];
 671         unsigned long tmp;
 672         unsigned long page;
 673         unsigned int block,i;
 674         struct inode * inode;
 675 
 676         page = get_empty_pgtable(tsk,address);
 677         if (!page)
 678                 return;
 679         page &= 0xfffff000;
 680         page += (address >> 10) & 0xffc;
 681         tmp = *(unsigned long *) page;
 682         if (tmp & 1) {
 683                 printk("bogus do_no_page\n");
 684                 return;
 685         }
 686         ++tsk->rss;
 687         if (tmp) {
 688                 ++tsk->maj_flt;
 689                 swap_in((unsigned long *) page);
 690                 return;
 691         }
 692         address &= 0xfffff000;
 693         inode = NULL;
 694         block = 0;
 695         if (address < tsk->end_data) {
 696                 inode = tsk->executable;
 697                 block = 1 + address / BLOCK_SIZE;
 698         } else {
 699                 i = tsk->numlibraries;
 700                 while (i-- > 0) {
 701                         if (address < tsk->libraries[i].start)
 702                                 continue;
 703                         block = address - tsk->libraries[i].start;
 704                         if (block >= tsk->libraries[i].length + tsk->libraries[i].bss)
 705                                 continue;
 706                         inode = tsk->libraries[i].library;
 707                         if (block < tsk->libraries[i].length)
 708                                 block = 1 + block / BLOCK_SIZE;
 709                         else
 710                                 block = 0;
 711                         break;
 712                 }
 713         }
 714         if (!inode) {
 715                 ++tsk->min_flt;
 716                 get_empty_page(tsk,address);
 717                 if (tsk != current)
 718                         return;
 719                 if (address < tsk->brk)
 720                         return;
 721                 if (address+8192 >= (user_esp & 0xfffff000))
 722                         return;
 723                 send_sig(SIGSEGV,tsk,1);
 724                 return;
 725         }
 726         if (share_page(tsk,inode,address)) {
 727                 ++tsk->min_flt;
 728                 return;
 729         }
 730         ++tsk->maj_flt;
 731         page = get_free_page(GFP_KERNEL);
 732         if (!page) {
 733                 oom(current);
 734                 put_page(tsk,BAD_PAGE,address);
 735                 return;
 736         }
 737         if (block) {
 738                 for (i=0 ; i<4 ; block++,i++)
 739                         nr[i] = bmap(inode,block);
 740                 bread_page(page,inode->i_dev,nr);
 741         }
 742         i = address + PAGE_SIZE - tsk->end_data;
 743         if (i > PAGE_SIZE-1)
 744                 i = 0;
 745         tmp = page + PAGE_SIZE;
 746         while (i--) {
 747                 tmp--;
 748                 *(char *)tmp = 0;
 749         }
 750         if (put_page(tsk,page,address))
 751                 return;
 752         free_page(page);
 753         oom(current);
 754 }
 755 
 756 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 757 {
 758         int i,free = 0,total = 0,reserved = 0;
 759         int shared = 0;
 760 
 761         printk("Mem-info:\n");
 762         printk("Free pages:      %6d\n",nr_free_pages);
 763         printk("Secondary pages: %6d\n",nr_secondary_pages);
 764         printk("Buffer heads:    %6d\n",nr_buffer_heads);
 765         printk("Buffer blocks:   %6d\n",nr_buffers);
 766         i = high_memory >> PAGE_SHIFT;
 767         while (i-- > 0) {
 768                 total++;
 769                 if (mem_map[i] & MAP_PAGE_RESERVED)
 770                         reserved++;
 771                 else if (!mem_map[i])
 772                         free++;
 773                 else
 774                         shared += mem_map[i]-1;
 775         }
 776         printk("%d pages of RAM\n",total);
 777         printk("%d free pages\n",free);
 778         printk("%d reserved pages\n",reserved);
 779         printk("%d pages shared\n",shared);
 780 }
 781 
 782 
 783 /*
 784  * This routine handles page faults.  It determines the address,
 785  * and the problem, and then passes it off to one of the appropriate
 786  * routines.
 787  */
 788 void do_page_fault(unsigned long *esp, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         unsigned long address;
 791         unsigned long user_esp = 0;
 792 
 793         /* get the address */
 794         __asm__("movl %%cr2,%0":"=r" (address));
 795         if (address >= TASK_SIZE) {
 796                 printk("Unable to handle kernel paging request at address %08x\n",address);
 797                 do_exit(SIGSEGV);
 798         }
 799         if (esp[2] & VM_MASK) {
 800                 unsigned int bit;
 801 
 802                 bit = (address - 0xA0000) >> PAGE_SHIFT;
 803                 if (bit < 32)
 804                         current->screen_bitmap |= 1 << bit;
 805         } else
 806                 if ((0xffff & esp[1]) == 0xf)
 807                         user_esp = esp[3];
 808         if (!(error_code & 1))
 809                 do_no_page(error_code, address, current, user_esp);
 810         else
 811                 do_wp_page(error_code, address, current, user_esp);
 812 }
 813 
 814 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
 815               unsigned long start_mem, unsigned long end_mem)
 816 {
 817         int codepages = 0;
 818         int reservedpages = 0;
 819         int datapages = 0;
 820         unsigned long tmp;
 821         unsigned short * p;
 822 
 823         cli();
 824         end_mem &= 0xfffff000;
 825         high_memory = end_mem;
 826         start_mem += 0x0000000f;
 827         start_mem &= 0xfffffff0;
 828         tmp = MAP_NR(end_mem);
 829         mem_map = (unsigned short *) start_mem;
 830         p = mem_map + tmp;
 831         start_mem = (unsigned long) p;
 832         while (p > mem_map)
 833                 *--p = MAP_PAGE_RESERVED;
 834         start_low_mem += 0x00000fff;
 835         start_low_mem &= 0xfffff000;
 836         start_mem += 0x00000fff;
 837         start_mem &= 0xfffff000;
 838         while (start_low_mem < 0xA0000) {
 839                 mem_map[MAP_NR(start_low_mem)] = 0;
 840                 start_low_mem += 4096;
 841         }
 842         while (start_mem < end_mem) {
 843                 mem_map[MAP_NR(start_mem)] = 0;
 844                 start_mem += 4096;
 845         }
 846         free_page_list = 0;
 847         nr_free_pages = 0;
 848         for (tmp = 0 ; tmp < end_mem ; tmp += 4096) {
 849                 if (mem_map[MAP_NR(tmp)]) {
 850                         if (tmp < 0xA0000)
 851                                 codepages++;
 852                         else if (tmp < 0x100000)
 853                                 reservedpages++;
 854                         else
 855                                 datapages++;
 856                         continue;
 857                 }
 858                 *(unsigned long *) tmp = free_page_list;
 859                 free_page_list = tmp;
 860                 nr_free_pages++;
 861         }
 862         tmp = nr_free_pages << PAGE_SHIFT;
 863         printk("Memory: %dk/%dk available (%dk kernel, %dk reserved, %dk data)\n",
 864                 tmp >> 10,
 865                 end_mem >> 10,
 866                 codepages << 2,
 867                 reservedpages << 2,
 868                 datapages << 2);
 869         return;
 870 }

/* [previous][next][first][last][top][bottom][index][help] */