root/arch/ppc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo
  8. MMU_free_item
  9. MMU_get_item
  10. MMU_init
  11. MMU_get_page
  12. MMU_map_page
  13. MMU_hash_page
  14. invalidate
  15. cache_mode

   1 /*
   2  *  arch/ppc/mm/init.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to PPC by Gary Thomas
   6  */
   7 
   8 
   9 #include <linux/config.h>
  10 #include <linux/signal.h>
  11 #include <linux/sched.h>
  12 #include <linux/head.h>
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/types.h>
  17 #include <linux/ptrace.h>
  18 #include <linux/mman.h>
  19 #include <linux/mm.h>
  20 
  21 #include <asm/pgtable.h>
  22 
  23 
  24 /* made this a static array since alpha and intel aren't.
  25    thomas made it a dynamic array and had to add lots of stuff to other parts
  26    of linux to make sure the pages were contigous and such.  the static array
  27    seems much easier
  28    making it 8k for now.  will change later.
  29       -- Cort
  30    */
  31 pgd_t swapper_pg_dir[1024];
  32 /*pgd_t *swapper_pg_dir;*/
  33 
  34 pte *MMU_get_page(void);
  35 
  36 
  37 #if 0
  38 #include <asm/system.h>
  39 #include <asm/segment.h>
  40 #include <asm/mipsconfig.h>
  41 
  42 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  43 #endif
  44 
  45 #ifdef CONFIG_DESKSTATION_TYNE
  46 extern void deskstation_tyne_dma_init(void);
  47 #endif
  48 #ifdef CONFIG_SCSI
  49 extern void scsi_mem_init(unsigned long);
  50 #endif
  51 #ifdef CONFIG_SOUND
  52 extern void sound_mem_init(void);
  53 #endif
  54 extern void die_if_kernel(char *,struct pt_regs *,long);
  55 extern void show_net_buffers(void);
  56 
  57 /*
  58  * BAD_PAGE is the page that is used for page faults when linux
  59  * is out-of-memory. Older versions of linux just did a
  60  * do_exit(), but using this instead means there is less risk
  61  * for a process dying in kernel mode, possibly leaving a inode
  62  * unused etc..
  63  *
  64  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  65  * to point to BAD_PAGE entries.
  66  *
  67  * ZERO_PAGE is a special page that is used for zero-initialized
  68  * data and COW.
  69  */
  70 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         panic("__bad_pagetable");
  73 #if 0
  74         extern char empty_bad_page_table[PAGE_SIZE];
  75         unsigned long dummy;
  76 
  77         __asm__ __volatile__(
  78                 ".set\tnoreorder\n\t"
  79                 "1:\tsw\t%2,(%0)\n\t"
  80                 "subu\t%1,%1,1\n\t"
  81                 "bne\t$0,%1,1b\n\t"
  82                 "addiu\t%0,%0,1\n\t"
  83                 ".set\treorder"
  84                 :"=r" (dummy),
  85                  "=r" (dummy)
  86                 :"r" (pte_val(BAD_PAGE)),
  87                  "0" ((long) empty_bad_page_table),
  88                  "1" (PTRS_PER_PAGE));
  89 
  90         return (pte_t *) empty_bad_page_table;
  91 #endif
  92 }
  93 
  94 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96         panic("__bad_page");
  97 #if 0
  98         extern char empty_bad_page[PAGE_SIZE];
  99         unsigned long dummy;
 100 
 101         __asm__ __volatile__(
 102                 ".set\tnoreorder\n\t"
 103                 "1:\tsw\t$0,(%0)\n\t"
 104                 "subu\t%1,%1,1\n\t"
 105                 "bne\t$0,%1,1b\n\t"
 106                 "addiu\t%0,%0,1\n\t"
 107                 ".set\treorder"
 108                 :"=r" (dummy),
 109                  "=r" (dummy)
 110                 :"0" ((long) empty_bad_page),
 111                  "1" (PTRS_PER_PAGE));
 112 
 113         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
 114 #endif
 115 }
 116 
 117 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119 #if 0
 120         panic("__zero_page");
 121 #else   
 122         extern char empty_zero_page[PAGE_SIZE];
 123         bzero(empty_zero_page, PAGE_SIZE);
 124         return (unsigned long) empty_zero_page;
 125 #endif  
 126 }
 127 
 128 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         int i,free = 0,total = 0,reserved = 0;
 131         int shared = 0;
 132 
 133         printk("Mem-info:\n");
 134         show_free_areas();
 135         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 136         i = high_memory >> PAGE_SHIFT;
 137         while (i-- > 0) {
 138                 total++;
 139                 if (mem_map[i].reserved)
 140                         reserved++;
 141                 else if (!mem_map[i].count)
 142                         free++;
 143                 else
 144                         shared += mem_map[i].count-1;
 145         }
 146         printk("%d pages of RAM\n",total);
 147         printk("%d free pages\n",free);
 148         printk("%d reserved pages\n",reserved);
 149         printk("%d pages shared\n",shared);
 150         show_buffers();
 151 #ifdef CONFIG_NET
 152         show_net_buffers();
 153 #endif
 154 }
 155 
 156 extern unsigned long free_area_init(unsigned long, unsigned long);
 157 
 158 /*
 159  * paging_init() sets up the page tables - note that the first 4MB are
 160  * already mapped by head.S.
 161  *
 162  * This routines also unmaps the page at virtual kernel address 0, so
 163  * that we can trap those pesky NULL-reference errors in the kernel.
 164  */
 165 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167 
 168 #if 0   
 169         pgd_t * pg_dir;
 170         pte_t * pg_table;
 171         unsigned long tmp;
 172         unsigned long address;
 173 
 174         start_mem = PAGE_ALIGN(start_mem);
 175         address = 0;
 176         pg_dir = swapper_pg_dir;
 177         while (address < end_mem) {
 178                 if (pgd_none(pg_dir[0])) {
 179                         pgd_set(pg_dir, (pte_t *) start_mem);
 180                         start_mem += PAGE_SIZE;
 181                 }
 182                 /*
 183                  * also map it in at 0x00000000 for init
 184                  */
 185                 pg_table = (pte_t *) pgd_page(pg_dir[0]);
 186                 pgd_set(pg_dir, pg_table);
 187                 pg_dir++;
 188                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
 189                         if (address < end_mem)
 190                                 *pg_table = mk_pte(address, PAGE_SHARED);
 191                         else
 192                                 pte_clear(pg_table);
 193                         address += PAGE_SIZE;
 194                 }
 195         }
 196 #if KERNELBASE == KSEG0
 197         cacheflush();
 198 #endif
 199         invalidate();
 200 #endif
 201         return free_area_init(start_mem, end_mem);
 202 }
 203 
 204 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         int codepages = 0;
 207         int reservedpages = 0;
 208         int datapages = 0;
 209         unsigned long tmp;
 210         extern int etext;
 211         
 212         end_mem &= PAGE_MASK;
 213         high_memory = end_mem;
 214 
 215         /* mark usable pages in the mem_map[] */
 216         start_mem = PAGE_ALIGN(start_mem);
 217 
 218 #if 0
 219 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
 220 #endif
 221         while (start_mem < high_memory) {
 222                 mem_map[MAP_NR(start_mem)].reserved = 0;
 223                 start_mem += PAGE_SIZE;
 224         }
 225 #ifdef CONFIG_DESKSTATION_TYNE
 226         deskstation_tyne_dma_init();
 227 #endif
 228 #ifdef CONFIG_SCSI
 229         scsi_mem_init(high_memory);
 230 #endif
 231 #ifdef CONFIG_SOUND
 232         sound_mem_init();
 233 #endif
 234         for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
 235         {
 236                 if (mem_map[MAP_NR(tmp)].reserved)
 237                 {
 238                         /*
 239                          * We don't have any reserved pages on the
 240                          * MIPS systems supported until now
 241                          */
 242                         if (0)
 243                         {
 244                                 reservedpages++;
 245                         } else if (tmp < (unsigned long) &etext)
 246                         {
 247                                 codepages++;
 248                         } else
 249                         {
 250                                 datapages++;
 251                         }
 252                         continue;
 253                 }
 254                 mem_map[MAP_NR(tmp)].count = 1;
 255                 free_page(tmp);
 256         }
 257         tmp = nr_free_pages << PAGE_SHIFT;
 258         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 259                 tmp >> 10,
 260                 ((int)high_memory - (int)KERNELBASE) >> 10,
 261                 codepages << (PAGE_SHIFT-10),
 262                 reservedpages << (PAGE_SHIFT-10),
 263                 datapages << (PAGE_SHIFT-10));
 264         invalidate();
 265         return;
 266 }
 267 
 268 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 269 {
 270 #if 0   
 271         int i;
 272 
 273         i = high_memory >> PAGE_SHIFT;
 274         val->totalram = 0;
 275         val->sharedram = 0;
 276         val->freeram = nr_free_pages << PAGE_SHIFT;
 277         val->bufferram = buffermem;
 278         while (i-- > 0)  {
 279                 if (mem_map[i] & MAP_PAGE_RESERVED)
 280                         continue;
 281                 val->totalram++;
 282                 if (!mem_map[i])
 283                         continue;
 284                 val->sharedram += mem_map[i]-1;
 285         }
 286         val->totalram <<= PAGE_SHIFT;
 287         val->sharedram <<= PAGE_SHIFT;
 288         return;
 289 #endif  
 290 }
 291 
 292 /* Kernel MMU setup & lowest level hardware support */
 293 
 294 /* Hardwired MMU segments */
 295 
 296 /* Segment 0x8XXXXXXX, 0xCXXXXXXX always mapped (for I/O) */
 297 /* Segment 0x9XXXXXXX mapped during init */
 298 
 299 BAT BAT0 =
 300    {
 301         {
 302                 0x80000000>>17,         /* bepi */
 303                 BL_256M,                /* bl */
 304                 1,                      /* vs */
 305                 1,                      /* vp */
 306         },
 307         {
 308                 0x80000000>>17,         /* brpn */
 309                 1,                      /* w */
 310                 1,                      /* i (cache disabled) */
 311                 0,                      /* m */
 312                 1,                      /* g */
 313                 BPP_RW                  /* pp */
 314         }
 315    };
 316 BAT BAT1 =
 317    {
 318         {
 319                 0xC0000000>>17,         /* bepi */
 320                 BL_256M,                /* bl */
 321                 1,                      /* vs */
 322                 1,                      /* vp */
 323         },
 324         {
 325                 0xC0000000>>17,         /* brpn */
 326                 1,                      /* w */
 327                 1,                      /* i (cache disabled) */
 328                 0,                      /* m */
 329                 1,                      /* g */
 330                 BPP_RW                  /* pp */
 331         }
 332    };
 333 BAT BAT2 =
 334    {
 335 /* map kernel with bats 0 = yes */
 336 #if 1
 337         {
 338                 0x00000000>>17,         /* bepi */
 339                 BL_256M,                /* bl */
 340                 0,                      /* vs */
 341                 0,                      /* vp */
 342         },
 343         {
 344                 0x00000000>>17,         /* brpn */
 345                 1,                      /* w */
 346                 1,                      /* i (cache disabled) */
 347                 0,                      /* m */
 348                 0,                      /* g */
 349                 BPP_RW                  /* pp */
 350         }
 351 #else
 352         {
 353                 0x90000000>>17,         /* bepi */
 354                 BL_256M,                /* bl */
 355                 1,                      /* vs */
 356                 1,                      /* vp */
 357         },
 358         {
 359                 0x00000000>>17,         /* brpn */
 360                 1,                      /* w */
 361                 0,                      /* i (cache enabled) */
 362                 0,                      /* m */
 363                 0,                      /* g */
 364                 BPP_RW                  /* pp */
 365         }
 366 #endif
 367    };
 368 BAT BAT3 =
 369    {
 370         {
 371                 0x00000000>>17,         /* bepi */
 372                 BL_256M,                /* bl */
 373                 0,                      /* vs */
 374                 0,                      /* vp */
 375         },
 376         {
 377                 0x00000000>>17,         /* brpn */
 378                 1,                      /* w */
 379                 1,                      /* i (cache disabled) */
 380                 0,                      /* m */
 381                 0,                      /* g */
 382                 BPP_RW                  /* pp */
 383         }
 384    };
 385 BAT TMP_BAT2 =
 386    { /* 0x9XXXXXXX -> 0x0XXXXXXX */
 387         {
 388                 0x90000000>>17,         /* bepi */
 389                 BL_256M,                /* bl */
 390                 1,                      /* vs */
 391                 1,                      /* vp */
 392         },
 393         {
 394                 0x00000000>>17,         /* brpn */
 395                 1,                      /* w */
 396                 0,                      /* i (cache enabled) */
 397                 0,                      /* m */
 398                 0,                      /* g */
 399                 BPP_RW                  /* pp */
 400         }
 401    };
 402 
 403 unsigned long _SDR1;            /* Hardware SDR1 image */
 404 PTE *Hash;
 405 int Hash_size, Hash_mask;
 406 int cache_is_copyback = 1;
 407 int kernel_pages_are_copyback = 1;
 408 
 409 #define NUM_MAPPINGS 8
 410 struct
 411    {
 412         int va, pa, task;
 413    } last_mappings[NUM_MAPPINGS];
 414 int next_mapping = 0;
 415 
 416 /* Generic linked list */
 417 struct item
 418    {
 419         struct item *next;
 420    };
 421    
 422 #ifndef NULL   
 423 #define NULL 0
 424 #endif
 425 
 426 #define MAX_CONTEXTS    16
 427 #define MAX_MMU_PAGES   8
 428 
 429 static struct item _free_pages;
 430 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
 431 
 432 /*
 433  * Routines to support generic linked lists.
 434  */
 435 
 436 MMU_free_item(struct item *hdr, struct item *elem)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438         if (hdr->next == (struct item *)NULL)
 439         { /* First item in list */
 440                 elem->next = (struct item *)NULL;
 441         } else
 442         {
 443                 elem->next = hdr->next;
 444         }
 445         hdr->next = elem;
 446 }
 447 
 448 struct item *
 449 MMU_get_item(struct item *hdr)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         struct item *item;
 452         if ((item = hdr->next) != (struct item *)NULL)
 453         {
 454                 item = hdr->next;
 455                 hdr->next = item->next;
 456         }
 457         return (item);
 458 }
 459 
 460 /*
 461  * This code is called to create a minimal mapped environment.
 462  * It is called with the MMU on, but with only a BAT register
 463  * set up to cover the code/data.  After this routine runs,
 464  * the BAT mapping is withdrawn and all mappings must be complete.
 465  */
 466 
 467 extern char _start[], _end[];
 468  
 469 void MMU_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 470 {
 471         int i, p;
 472         SEGREG *segs;
 473 /*      _printk("MMU init - started\n");*/
 474         find_end_of_memory();
 475 /*      _printk("  Start at 0x%08X, End at 0x%08X, Hash at 0x%08X\n", _start, _end, Hash);*/
 476         _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
 477         p = (int)mmu_pages;
 478         p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
 479         _free_pages.next = (struct item *)NULL;
 480         for (i = 0;  i < MAX_MMU_PAGES;  i++)
 481         {
 482                 MMU_free_item(&_free_pages, (struct item *)p);
 483                 p += MMU_PAGE_SIZE;
 484         }
 485         /* Force initial page tables */
 486         /*swapper_pg_dir = (pgd_t *)MMU_get_page();*/
 487         init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
 488 
 489         /* Segment registers */
 490         segs = (SEGREG *)init_task.tss.segs;
 491         for (i = 0;  i < 16;  i++)
 492         {
 493                 segs[i].ks = 0;
 494                 segs[i].kp = 1;
 495                 segs[i].vsid = i;
 496         }
 497         /* Map kernel TEXT+DATA+BSS */
 498 #if 0   
 499         for (i = (int)_start;  i <= (int)_end;  i += MMU_PAGE_SIZE)
 500 #else
 501         /* Other parts of the kernel expect ALL RAM to be mapped */     
 502         for (i = (int)_start;  i <= (int)Hash;  i += MMU_PAGE_SIZE)
 503 #endif  
 504         {
 505                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 506         }
 507         /* Map hardware HASH table */
 508         for (i = (int)Hash;  i < (int)Hash+Hash_size;  i += MMU_PAGE_SIZE)
 509         {
 510                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 511         }
 512 /*      _printk("MMU init - done!\n");*/
 513 }
 514 
 515 pte *
 516 MMU_get_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 517 {
 518         pte *pg;
 519         if ((pg = (pte *)MMU_get_item(&_free_pages)))
 520         {
 521                 bzero((char *)pg, MMU_PAGE_SIZE);
 522         }
 523 /*      _printk("MMU Allocate Page at %08X\n", pg);*/
 524         return(pg);
 525 }
 526 
 527 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 528 {
 529         pte *pd, *pg;
 530 if (va < (unsigned long)0x90000000)     
 531   _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
 532         if ((pte **)tss->pg_tables == (pte **)NULL)
 533         { /* Allocate upper level page map */
 534                 (pte **)tss->pg_tables = (pte **)MMU_get_page();
 535                 if ((pte **)tss->pg_tables == (pte **)NULL)
 536                 {
 537                         _panic("Out of MMU pages (PD)\n");
 538                 }
 539         }
 540         /* Use upper 10 bits of VA to index the first level map */
 541         pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
 542         if (pd == (pte *)NULL)
 543         { /* Need to allocate second-level table */
 544                 pd = (pte *)MMU_get_page();
 545                 if (pd == (pte *)NULL)
 546                 {
 547                         _panic("Out of MMU pages (PG)\n");
 548                 }
 549                 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
 550         }
 551         /* Use middle 10 bits of VA to index the second-level map */
 552         pg = &pd[(va>>PT_SHIFT)&PT_MASK];
 553         *(long *)pg = 0;  /* Clear out entry */
 554         pg->page_num = pa>>PG_SHIFT;
 555         pg->flags = flags;
 556         MMU_hash_page(tss, va, pg);
 557 }
 558 
 559 /*
 560  * Insert(create) a hardware page table entry
 561  */
 562 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
     /* [previous][next][first][last][top][bottom][index][help] */
 563 {
 564         int hash, page_index, segment, i, h, _h, api, vsid, perms;
 565         PTE *_pte, *empty, *slot;
 566         PTE *slot0, *slot1;
 567         extern char _etext;
 568 
 569 
 570 /*      printk("hashing tss = %x va = %x pg = %x\n", tss, va, pg);*/
 571 /* TEMP */
 572         last_mappings[next_mapping].va = va;
 573         last_mappings[next_mapping].pa = pg?*(int *)pg:0;
 574         last_mappings[next_mapping].task = current;
 575         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 576 
 577 /* TEMP */      
 578         page_index = ((int)va & 0x0FFFF000) >> 12;
 579         segment = (unsigned int)va >> 28;
 580         api = page_index >> 10;
 581         vsid = ((SEGREG *)tss->segs)[segment].vsid;
 582         empty = slot = (PTE *)NULL;
 583         for (_h = 0;  _h < 2;  _h++)
 584         {
 585                 hash = page_index ^ vsid;               
 586                 if (_h)
 587                 {
 588                         hash = ~hash;  /* Secondary hash uses ones-complement */
 589                 }
 590                 hash &= 0x3FF | (Hash_mask << 10);
 591                 hash *= 8;  /* Eight entries / hash bucket */
 592                 _pte = &Hash[hash];
 593                 /* Save slot addresses in case we have to purge */
 594                 if (_h)
 595                 {
 596                         slot1 = _pte;
 597                 } else
 598                 {
 599                         slot0 = _pte;
 600                 }
 601                 for (i = 0;  i < 8;  i++, _pte++)
 602                 {
 603                         if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
 604                         { /* Found it! */
 605                                 h = _h;
 606                                 slot = _pte;
 607                                 goto found_it;
 608                         }
 609                         if ((empty == (PTE *)NULL) && !_pte->v)
 610                         {
 611                                 h = _h;
 612                                 empty = _pte;
 613                         }
 614                 }
 615         }
 616         if (slot == (PTE *)NULL)
 617         {
 618                 if (pg == (pte *)NULL)
 619                 {
 620                         return (0);
 621                 }
 622                 if (empty == (PTE *)NULL)
 623                 { /* Table is totally full! */
 624 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
 625 printk("Slot0:\n");
 626 _pte = slot0;
 627 for (i = 0;  i < 8;  i++, _pte++)
 628 {
 629         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 630 }
 631 printk("Slot1:\n");
 632 _pte = slot1;
 633 for (i = 0;  i < 8;  i++, _pte++)
 634 {
 635         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 636 }
 637 cnpause();
 638 printk("Last mappings:\n");
 639 for (i = 0;  i < NUM_MAPPINGS;  i++)
 640 {
 641         printk("  VA: %08x, PA: %08X, TASK: %08X\n",
 642                 last_mappings[next_mapping].va,
 643                 last_mappings[next_mapping].pa,
 644                 last_mappings[next_mapping].task);
 645         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 646 }
 647 cnpause();
 648                         _panic("Hash table full!\n");
 649                 }
 650                 slot = empty;
 651         }
 652 found_it:
 653 #if 0
 654 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);     
 655 #endif
 656         _tlbie(va); /* Clear TLB */
 657         if (pg)
 658         { /* Fill in table */
 659                 slot->v = 1;
 660                 slot->vsid = vsid;
 661                 slot->h = h;
 662                 slot->api = api;
 663                 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
 664                 {
 665                         slot->rpn = pg->page_num - (KERNELBASE>>12);
 666                 } else
 667                 {
 668                         slot->rpn = pg->page_num;
 669                 }
 670                 slot->r = 0;
 671                 slot->c = 0;
 672                 slot->i = 0;
 673                 slot->g = 0;
 674                 if (cache_is_copyback)
 675                 {
 676                         if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
 677                         { /* All User & Kernel TEXT pages are copy-back */
 678                                 slot->w = 0;
 679                                 slot->m = 1;
 680                         } else
 681                         { /* Kernel DATA pages are write-thru */
 682                                 slot->w = 1;
 683                                 slot->m = 0;
 684                         }
 685                 } else
 686                 {
 687                         slot->w = 1;
 688                         slot->m = 0;
 689                 }
 690                 if (pg->flags & _PAGE_USER)
 691                 {
 692                         if (pg->flags & _PAGE_RW)
 693                         { /* Read/write page */
 694                                 perms = PP_RWRW;
 695                         } else
 696                         { /* Read only page */
 697                                 perms = PP_RWRX;
 698                         }
 699                 } else
 700                 { /* Kernel pages */
 701                         perms = PP_RWRW;
 702                         perms = PP_RWXX;
 703                 }
 704 #ifdef SHOW_FAULTS
 705 if (va < KERNELBASE)            
 706 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
 707 #endif
 708                 slot->pp = perms;
 709                 return (0);
 710         } else
 711         { /* Pull entry from tables */
 712                 int flags = 0;
 713                 if (slot->r) flags |= _PAGE_ACCESSED;
 714                 if (slot->c) flags |= _PAGE_DIRTY;
 715                 slot->v = 0;
 716 #ifdef SHOW_FAULTS
 717 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
 718 #endif
 719                 return (flags);
 720         }
 721 }
 722 
 723 /*
 724  * Invalidate the MMU [hardware] tables (for current task?)
 725  */
 726 void
 727 invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 728 {
 729   int i, j, flags;
 730   unsigned long address;
 731   pgd_t *pgd;
 732   pte_t *_pte;
 733 #if 0
 734   _tlbia();  /* Flush TLB entries */
 735 #endif
 736   pgd = pgd_offset(current->mm, 0);
 737   if (!pgd) return;  /* No map? */
 738   address = 0;
 739   for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
 740   {
 741     if (*(long *)pgd)
 742     {
 743       /* I know there are only two levels, but the macros don't */
 744       _pte = pte_offset(pmd_offset(pgd,0),0);
 745       if (_pte)
 746       {
 747         for (j = 0;  j < PTRS_PER_PTE;  j++)
 748         {
 749           if (pte_present(*_pte))
 750           {
 751             flags = MMU_hash_page(&current->tss, address, 0);
 752             ((pte *)_pte)->flags |= flags;
 753           }
 754           _pte++;
 755           address += PAGE_SIZE;
 756         }
 757       } else
 758       {
 759         address += PAGE_SIZE*PTRS_PER_PTE;
 760       }
 761     } else
 762     {
 763       address += PAGE_SIZE*PTRS_PER_PTE;
 764     }
 765     pgd++;
 766   }
 767 } 
 768 
 769 void
 770 cache_mode(char *str, int *ints)
     /* [previous][next][first][last][top][bottom][index][help] */
 771 {
 772         cache_is_copyback = ints[0];
 773 }
 774 
 775 

/* [previous][next][first][last][top][bottom][index][help] */