root/arch/ppc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo
  8. MMU_free_item
  9. MMU_get_item
  10. MMU_init
  11. MMU_get_page
  12. MMU_map_page
  13. MMU_hash_page
  14. invalidate
  15. cache_mode

   1 /*
   2  *  arch/ppc/mm/init.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to PPC by Gary Thomas
   6  */
   7 
   8 
   9 #include <linux/config.h>
  10 #include <linux/signal.h>
  11 #include <linux/sched.h>
  12 #include <linux/head.h>
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/types.h>
  17 #include <linux/ptrace.h>
  18 #include <linux/mman.h>
  19 #include <linux/mm.h>
  20 
  21 #include <asm/pgtable.h>
  22 
  23 
  24 /* made this a static array since alpha and intel aren't.
  25    thomas made it a dynamic array and had to add lots of stuff to other parts
  26    of linux to make sure the pages were contigous and such.  the static array
  27    seems much easier
  28    making it 8k for now.  will change later.
  29       -- Cort
  30    */
  31 pgd_t swapper_pg_dir[1024];
  32 /*pgd_t *swapper_pg_dir;*/
  33 
  34 pte *MMU_get_page(void);
  35 
  36 
  37 #if 0
  38 #include <asm/system.h>
  39 #include <asm/segment.h>
  40 #include <asm/mipsconfig.h>
  41 
  42 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  43 #endif
  44 
  45 #ifdef CONFIG_DESKSTATION_TYNE
  46 extern void deskstation_tyne_dma_init(void);
  47 #endif
  48 #ifdef CONFIG_SOUND
  49 extern void sound_mem_init(void);
  50 #endif
  51 extern void die_if_kernel(char *,struct pt_regs *,long);
  52 extern void show_net_buffers(void);
  53 
  54 /*
  55  * BAD_PAGE is the page that is used for page faults when linux
  56  * is out-of-memory. Older versions of linux just did a
  57  * do_exit(), but using this instead means there is less risk
  58  * for a process dying in kernel mode, possibly leaving a inode
  59  * unused etc..
  60  *
  61  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  62  * to point to BAD_PAGE entries.
  63  *
  64  * ZERO_PAGE is a special page that is used for zero-initialized
  65  * data and COW.
  66  */
  67 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  68 {
  69         panic("__bad_pagetable");
  70 #if 0
  71         extern char empty_bad_page_table[PAGE_SIZE];
  72         unsigned long dummy;
  73 
  74         __asm__ __volatile__(
  75                 ".set\tnoreorder\n\t"
  76                 "1:\tsw\t%2,(%0)\n\t"
  77                 "subu\t%1,%1,1\n\t"
  78                 "bne\t$0,%1,1b\n\t"
  79                 "addiu\t%0,%0,1\n\t"
  80                 ".set\treorder"
  81                 :"=r" (dummy),
  82                  "=r" (dummy)
  83                 :"r" (pte_val(BAD_PAGE)),
  84                  "0" ((long) empty_bad_page_table),
  85                  "1" (PTRS_PER_PAGE));
  86 
  87         return (pte_t *) empty_bad_page_table;
  88 #endif
  89 }
  90 
  91 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  92 {
  93         panic("__bad_page");
  94 #if 0
  95         extern char empty_bad_page[PAGE_SIZE];
  96         unsigned long dummy;
  97 
  98         __asm__ __volatile__(
  99                 ".set\tnoreorder\n\t"
 100                 "1:\tsw\t$0,(%0)\n\t"
 101                 "subu\t%1,%1,1\n\t"
 102                 "bne\t$0,%1,1b\n\t"
 103                 "addiu\t%0,%0,1\n\t"
 104                 ".set\treorder"
 105                 :"=r" (dummy),
 106                  "=r" (dummy)
 107                 :"0" ((long) empty_bad_page),
 108                  "1" (PTRS_PER_PAGE));
 109 
 110         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
 111 #endif
 112 }
 113 
 114 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 115 {
 116 #if 0
 117         panic("__zero_page");
 118 #else   
 119         extern char empty_zero_page[PAGE_SIZE];
 120         bzero(empty_zero_page, PAGE_SIZE);
 121         return (unsigned long) empty_zero_page;
 122 #endif  
 123 }
 124 
 125 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 126 {
 127         int i,free = 0,total = 0,reserved = 0;
 128         int shared = 0;
 129 
 130         printk("Mem-info:\n");
 131         show_free_areas();
 132         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 133         i = high_memory >> PAGE_SHIFT;
 134         while (i-- > 0) {
 135                 total++;
 136                 if (mem_map[i].reserved)
 137                         reserved++;
 138                 else if (!mem_map[i].count)
 139                         free++;
 140                 else
 141                         shared += mem_map[i].count-1;
 142         }
 143         printk("%d pages of RAM\n",total);
 144         printk("%d free pages\n",free);
 145         printk("%d reserved pages\n",reserved);
 146         printk("%d pages shared\n",shared);
 147         show_buffers();
 148 #ifdef CONFIG_NET
 149         show_net_buffers();
 150 #endif
 151 }
 152 
 153 extern unsigned long free_area_init(unsigned long, unsigned long);
 154 
 155 /*
 156  * paging_init() sets up the page tables - note that the first 4MB are
 157  * already mapped by head.S.
 158  *
 159  * This routines also unmaps the page at virtual kernel address 0, so
 160  * that we can trap those pesky NULL-reference errors in the kernel.
 161  */
 162 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164 
 165 #if 0   
 166         pgd_t * pg_dir;
 167         pte_t * pg_table;
 168         unsigned long tmp;
 169         unsigned long address;
 170 
 171         start_mem = PAGE_ALIGN(start_mem);
 172         address = 0;
 173         pg_dir = swapper_pg_dir;
 174         while (address < end_mem) {
 175                 if (pgd_none(pg_dir[0])) {
 176                         pgd_set(pg_dir, (pte_t *) start_mem);
 177                         start_mem += PAGE_SIZE;
 178                 }
 179                 /*
 180                  * also map it in at 0x00000000 for init
 181                  */
 182                 pg_table = (pte_t *) pgd_page(pg_dir[0]);
 183                 pgd_set(pg_dir, pg_table);
 184                 pg_dir++;
 185                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
 186                         if (address < end_mem)
 187                                 *pg_table = mk_pte(address, PAGE_SHARED);
 188                         else
 189                                 pte_clear(pg_table);
 190                         address += PAGE_SIZE;
 191                 }
 192         }
 193 #if KERNELBASE == KSEG0
 194         cacheflush();
 195 #endif
 196         invalidate();
 197 #endif
 198         return free_area_init(start_mem, end_mem);
 199 }
 200 
 201 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 202 {
 203         int codepages = 0;
 204         int reservedpages = 0;
 205         int datapages = 0;
 206         unsigned long tmp;
 207         extern int etext;
 208         
 209         end_mem &= PAGE_MASK;
 210         high_memory = end_mem;
 211 
 212         /* mark usable pages in the mem_map[] */
 213         start_mem = PAGE_ALIGN(start_mem);
 214 
 215 #if 0
 216 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
 217 #endif
 218         while (start_mem < high_memory) {
 219                 mem_map[MAP_NR(start_mem)].reserved = 0;
 220                 start_mem += PAGE_SIZE;
 221         }
 222 #ifdef CONFIG_DESKSTATION_TYNE
 223         deskstation_tyne_dma_init();
 224 #endif
 225 #ifdef CONFIG_SOUND
 226         sound_mem_init();
 227 #endif
 228         for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
 229         {
 230                 if (mem_map[MAP_NR(tmp)].reserved)
 231                 {
 232                         /*
 233                          * We don't have any reserved pages on the
 234                          * MIPS systems supported until now
 235                          */
 236                         if (0)
 237                         {
 238                                 reservedpages++;
 239                         } else if (tmp < (unsigned long) &etext)
 240                         {
 241                                 codepages++;
 242                         } else
 243                         {
 244                                 datapages++;
 245                         }
 246                         continue;
 247                 }
 248                 mem_map[MAP_NR(tmp)].count = 1;
 249                 free_page(tmp);
 250         }
 251         tmp = nr_free_pages << PAGE_SHIFT;
 252         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 253                 tmp >> 10,
 254                 ((int)high_memory - (int)KERNELBASE) >> 10,
 255                 codepages << (PAGE_SHIFT-10),
 256                 reservedpages << (PAGE_SHIFT-10),
 257                 datapages << (PAGE_SHIFT-10));
 258         invalidate();
 259         return;
 260 }
 261 
 262 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 263 {
 264 #if 0   
 265         int i;
 266 
 267         i = high_memory >> PAGE_SHIFT;
 268         val->totalram = 0;
 269         val->sharedram = 0;
 270         val->freeram = nr_free_pages << PAGE_SHIFT;
 271         val->bufferram = buffermem;
 272         while (i-- > 0)  {
 273                 if (mem_map[i] & MAP_PAGE_RESERVED)
 274                         continue;
 275                 val->totalram++;
 276                 if (!mem_map[i])
 277                         continue;
 278                 val->sharedram += mem_map[i]-1;
 279         }
 280         val->totalram <<= PAGE_SHIFT;
 281         val->sharedram <<= PAGE_SHIFT;
 282         return;
 283 #endif  
 284 }
 285 
 286 /* Kernel MMU setup & lowest level hardware support */
 287 
 288 /* Hardwired MMU segments */
 289 
 290 /* Segment 0x8XXXXXXX, 0xCXXXXXXX always mapped (for I/O) */
 291 /* Segment 0x9XXXXXXX mapped during init */
 292 
 293 BAT BAT0 =
 294    {
 295         {
 296                 0x80000000>>17,         /* bepi */
 297                 BL_256M,                /* bl */
 298                 1,                      /* vs */
 299                 1,                      /* vp */
 300         },
 301         {
 302                 0x80000000>>17,         /* brpn */
 303                 1,                      /* w */
 304                 1,                      /* i (cache disabled) */
 305                 0,                      /* m */
 306                 1,                      /* g */
 307                 BPP_RW                  /* pp */
 308         }
 309    };
 310 BAT BAT1 =
 311    {
 312         {
 313                 0xC0000000>>17,         /* bepi */
 314                 BL_256M,                /* bl */
 315                 1,                      /* vs */
 316                 1,                      /* vp */
 317         },
 318         {
 319                 0xC0000000>>17,         /* brpn */
 320                 1,                      /* w */
 321                 1,                      /* i (cache disabled) */
 322                 0,                      /* m */
 323                 1,                      /* g */
 324                 BPP_RW                  /* pp */
 325         }
 326    };
 327 BAT BAT2 =
 328    {
 329 /* map kernel with bats 0 = yes */
 330 #if 1
 331         {
 332                 0x00000000>>17,         /* bepi */
 333                 BL_256M,                /* bl */
 334                 0,                      /* vs */
 335                 0,                      /* vp */
 336         },
 337         {
 338                 0x00000000>>17,         /* brpn */
 339                 1,                      /* w */
 340                 1,                      /* i (cache disabled) */
 341                 0,                      /* m */
 342                 0,                      /* g */
 343                 BPP_RW                  /* pp */
 344         }
 345 #else
 346         {
 347                 0x90000000>>17,         /* bepi */
 348                 BL_256M,                /* bl */
 349                 1,                      /* vs */
 350                 1,                      /* vp */
 351         },
 352         {
 353                 0x00000000>>17,         /* brpn */
 354                 1,                      /* w */
 355                 0,                      /* i (cache enabled) */
 356                 0,                      /* m */
 357                 0,                      /* g */
 358                 BPP_RW                  /* pp */
 359         }
 360 #endif
 361    };
 362 BAT BAT3 =
 363    {
 364         {
 365                 0x00000000>>17,         /* bepi */
 366                 BL_256M,                /* bl */
 367                 0,                      /* vs */
 368                 0,                      /* vp */
 369         },
 370         {
 371                 0x00000000>>17,         /* brpn */
 372                 1,                      /* w */
 373                 1,                      /* i (cache disabled) */
 374                 0,                      /* m */
 375                 0,                      /* g */
 376                 BPP_RW                  /* pp */
 377         }
 378    };
 379 BAT TMP_BAT2 =
 380    { /* 0x9XXXXXXX -> 0x0XXXXXXX */
 381         {
 382                 0x90000000>>17,         /* bepi */
 383                 BL_256M,                /* bl */
 384                 1,                      /* vs */
 385                 1,                      /* vp */
 386         },
 387         {
 388                 0x00000000>>17,         /* brpn */
 389                 1,                      /* w */
 390                 0,                      /* i (cache enabled) */
 391                 0,                      /* m */
 392                 0,                      /* g */
 393                 BPP_RW                  /* pp */
 394         }
 395    };
 396 
 397 unsigned long _SDR1;            /* Hardware SDR1 image */
 398 PTE *Hash;
 399 int Hash_size, Hash_mask;
 400 int cache_is_copyback = 1;
 401 int kernel_pages_are_copyback = 1;
 402 
 403 #define NUM_MAPPINGS 8
 404 struct
 405    {
 406         int va, pa, task;
 407    } last_mappings[NUM_MAPPINGS];
 408 int next_mapping = 0;
 409 
 410 /* Generic linked list */
 411 struct item
 412    {
 413         struct item *next;
 414    };
 415    
 416 #ifndef NULL   
 417 #define NULL 0
 418 #endif
 419 
 420 #define MAX_CONTEXTS    16
 421 #define MAX_MMU_PAGES   8
 422 
 423 static struct item _free_pages;
 424 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
 425 
 426 /*
 427  * Routines to support generic linked lists.
 428  */
 429 
 430 MMU_free_item(struct item *hdr, struct item *elem)
     /* [previous][next][first][last][top][bottom][index][help] */
 431 {
 432         if (hdr->next == (struct item *)NULL)
 433         { /* First item in list */
 434                 elem->next = (struct item *)NULL;
 435         } else
 436         {
 437                 elem->next = hdr->next;
 438         }
 439         hdr->next = elem;
 440 }
 441 
 442 struct item *
 443 MMU_get_item(struct item *hdr)
     /* [previous][next][first][last][top][bottom][index][help] */
 444 {
 445         struct item *item;
 446         if ((item = hdr->next) != (struct item *)NULL)
 447         {
 448                 item = hdr->next;
 449                 hdr->next = item->next;
 450         }
 451         return (item);
 452 }
 453 
 454 /*
 455  * This code is called to create a minimal mapped environment.
 456  * It is called with the MMU on, but with only a BAT register
 457  * set up to cover the code/data.  After this routine runs,
 458  * the BAT mapping is withdrawn and all mappings must be complete.
 459  */
 460 
 461 extern char _start[], _end[];
 462  
 463 void MMU_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 464 {
 465         int i, p;
 466         SEGREG *segs;
 467 /*      _printk("MMU init - started\n");*/
 468         find_end_of_memory();
 469 /*      _printk("  Start at 0x%08X, End at 0x%08X, Hash at 0x%08X\n", _start, _end, Hash);*/
 470         _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
 471         p = (int)mmu_pages;
 472         p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
 473         _free_pages.next = (struct item *)NULL;
 474         for (i = 0;  i < MAX_MMU_PAGES;  i++)
 475         {
 476                 MMU_free_item(&_free_pages, (struct item *)p);
 477                 p += MMU_PAGE_SIZE;
 478         }
 479         /* Force initial page tables */
 480         /*swapper_pg_dir = (pgd_t *)MMU_get_page();*/
 481         init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
 482 
 483         /* Segment registers */
 484         segs = (SEGREG *)init_task.tss.segs;
 485         for (i = 0;  i < 16;  i++)
 486         {
 487                 segs[i].ks = 0;
 488                 segs[i].kp = 1;
 489                 segs[i].vsid = i;
 490         }
 491         /* Map kernel TEXT+DATA+BSS */
 492 #if 0   
 493         for (i = (int)_start;  i <= (int)_end;  i += MMU_PAGE_SIZE)
 494 #else
 495         /* Other parts of the kernel expect ALL RAM to be mapped */     
 496         for (i = (int)_start;  i <= (int)Hash;  i += MMU_PAGE_SIZE)
 497 #endif  
 498         {
 499                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 500         }
 501         /* Map hardware HASH table */
 502         for (i = (int)Hash;  i < (int)Hash+Hash_size;  i += MMU_PAGE_SIZE)
 503         {
 504                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 505         }
 506 /*      _printk("MMU init - done!\n");*/
 507 }
 508 
 509 pte *
 510 MMU_get_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 511 {
 512         pte *pg;
 513         if ((pg = (pte *)MMU_get_item(&_free_pages)))
 514         {
 515                 bzero((char *)pg, MMU_PAGE_SIZE);
 516         }
 517 /*      _printk("MMU Allocate Page at %08X\n", pg);*/
 518         return(pg);
 519 }
 520 
 521 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 522 {
 523         pte *pd, *pg;
 524 if (va < (unsigned long)0x90000000)     
 525   _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
 526         if ((pte **)tss->pg_tables == (pte **)NULL)
 527         { /* Allocate upper level page map */
 528                 (pte **)tss->pg_tables = (pte **)MMU_get_page();
 529                 if ((pte **)tss->pg_tables == (pte **)NULL)
 530                 {
 531                         _panic("Out of MMU pages (PD)\n");
 532                 }
 533         }
 534         /* Use upper 10 bits of VA to index the first level map */
 535         pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
 536         if (pd == (pte *)NULL)
 537         { /* Need to allocate second-level table */
 538                 pd = (pte *)MMU_get_page();
 539                 if (pd == (pte *)NULL)
 540                 {
 541                         _panic("Out of MMU pages (PG)\n");
 542                 }
 543                 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
 544         }
 545         /* Use middle 10 bits of VA to index the second-level map */
 546         pg = &pd[(va>>PT_SHIFT)&PT_MASK];
 547         *(long *)pg = 0;  /* Clear out entry */
 548         pg->page_num = pa>>PG_SHIFT;
 549         pg->flags = flags;
 550         MMU_hash_page(tss, va, pg);
 551 }
 552 
 553 /*
 554  * Insert(create) a hardware page table entry
 555  */
 556 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
     /* [previous][next][first][last][top][bottom][index][help] */
 557 {
 558         int hash, page_index, segment, i, h, _h, api, vsid, perms;
 559         PTE *_pte, *empty, *slot;
 560         PTE *slot0, *slot1;
 561         extern char _etext;
 562 
 563 
 564 /*      printk("hashing tss = %x va = %x pg = %x\n", tss, va, pg);*/
 565 /* TEMP */
 566         last_mappings[next_mapping].va = va;
 567         last_mappings[next_mapping].pa = pg?*(int *)pg:0;
 568         last_mappings[next_mapping].task = current;
 569         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 570 
 571 /* TEMP */      
 572         page_index = ((int)va & 0x0FFFF000) >> 12;
 573         segment = (unsigned int)va >> 28;
 574         api = page_index >> 10;
 575         vsid = ((SEGREG *)tss->segs)[segment].vsid;
 576         empty = slot = (PTE *)NULL;
 577         for (_h = 0;  _h < 2;  _h++)
 578         {
 579                 hash = page_index ^ vsid;               
 580                 if (_h)
 581                 {
 582                         hash = ~hash;  /* Secondary hash uses ones-complement */
 583                 }
 584                 hash &= 0x3FF | (Hash_mask << 10);
 585                 hash *= 8;  /* Eight entries / hash bucket */
 586                 _pte = &Hash[hash];
 587                 /* Save slot addresses in case we have to purge */
 588                 if (_h)
 589                 {
 590                         slot1 = _pte;
 591                 } else
 592                 {
 593                         slot0 = _pte;
 594                 }
 595                 for (i = 0;  i < 8;  i++, _pte++)
 596                 {
 597                         if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
 598                         { /* Found it! */
 599                                 h = _h;
 600                                 slot = _pte;
 601                                 goto found_it;
 602                         }
 603                         if ((empty == (PTE *)NULL) && !_pte->v)
 604                         {
 605                                 h = _h;
 606                                 empty = _pte;
 607                         }
 608                 }
 609         }
 610         if (slot == (PTE *)NULL)
 611         {
 612                 if (pg == (pte *)NULL)
 613                 {
 614                         return (0);
 615                 }
 616                 if (empty == (PTE *)NULL)
 617                 { /* Table is totally full! */
 618 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
 619 printk("Slot0:\n");
 620 _pte = slot0;
 621 for (i = 0;  i < 8;  i++, _pte++)
 622 {
 623         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 624 }
 625 printk("Slot1:\n");
 626 _pte = slot1;
 627 for (i = 0;  i < 8;  i++, _pte++)
 628 {
 629         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 630 }
 631 cnpause();
 632 printk("Last mappings:\n");
 633 for (i = 0;  i < NUM_MAPPINGS;  i++)
 634 {
 635         printk("  VA: %08x, PA: %08X, TASK: %08X\n",
 636                 last_mappings[next_mapping].va,
 637                 last_mappings[next_mapping].pa,
 638                 last_mappings[next_mapping].task);
 639         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 640 }
 641 cnpause();
 642                         _panic("Hash table full!\n");
 643                 }
 644                 slot = empty;
 645         }
 646 found_it:
 647 #if 0
 648 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);     
 649 #endif
 650         _tlbie(va); /* Clear TLB */
 651         if (pg)
 652         { /* Fill in table */
 653                 slot->v = 1;
 654                 slot->vsid = vsid;
 655                 slot->h = h;
 656                 slot->api = api;
 657                 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
 658                 {
 659                         slot->rpn = pg->page_num - (KERNELBASE>>12);
 660                 } else
 661                 {
 662                         slot->rpn = pg->page_num;
 663                 }
 664                 slot->r = 0;
 665                 slot->c = 0;
 666                 slot->i = 0;
 667                 slot->g = 0;
 668                 if (cache_is_copyback)
 669                 {
 670                         if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
 671                         { /* All User & Kernel TEXT pages are copy-back */
 672                                 slot->w = 0;
 673                                 slot->m = 1;
 674                         } else
 675                         { /* Kernel DATA pages are write-thru */
 676                                 slot->w = 1;
 677                                 slot->m = 0;
 678                         }
 679                 } else
 680                 {
 681                         slot->w = 1;
 682                         slot->m = 0;
 683                 }
 684                 if (pg->flags & _PAGE_USER)
 685                 {
 686                         if (pg->flags & _PAGE_RW)
 687                         { /* Read/write page */
 688                                 perms = PP_RWRW;
 689                         } else
 690                         { /* Read only page */
 691                                 perms = PP_RWRX;
 692                         }
 693                 } else
 694                 { /* Kernel pages */
 695                         perms = PP_RWRW;
 696                         perms = PP_RWXX;
 697                 }
 698 #ifdef SHOW_FAULTS
 699 if (va < KERNELBASE)            
 700 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
 701 #endif
 702                 slot->pp = perms;
 703                 return (0);
 704         } else
 705         { /* Pull entry from tables */
 706                 int flags = 0;
 707                 if (slot->r) flags |= _PAGE_ACCESSED;
 708                 if (slot->c) flags |= _PAGE_DIRTY;
 709                 slot->v = 0;
 710 #ifdef SHOW_FAULTS
 711 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
 712 #endif
 713                 return (flags);
 714         }
 715 }
 716 
 717 /*
 718  * Invalidate the MMU [hardware] tables (for current task?)
 719  */
 720 void
 721 invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 722 {
 723   int i, j, flags;
 724   unsigned long address;
 725   pgd_t *pgd;
 726   pte_t *_pte;
 727 #if 0
 728   _tlbia();  /* Flush TLB entries */
 729 #endif
 730   pgd = pgd_offset(current->mm, 0);
 731   if (!pgd) return;  /* No map? */
 732   address = 0;
 733   for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
 734   {
 735     if (*(long *)pgd)
 736     {
 737       /* I know there are only two levels, but the macros don't */
 738       _pte = pte_offset(pmd_offset(pgd,0),0);
 739       if (_pte)
 740       {
 741         for (j = 0;  j < PTRS_PER_PTE;  j++)
 742         {
 743           if (pte_present(*_pte))
 744           {
 745             flags = MMU_hash_page(&current->tss, address, 0);
 746             ((pte *)_pte)->flags |= flags;
 747           }
 748           _pte++;
 749           address += PAGE_SIZE;
 750         }
 751       } else
 752       {
 753         address += PAGE_SIZE*PTRS_PER_PTE;
 754       }
 755     } else
 756     {
 757       address += PAGE_SIZE*PTRS_PER_PTE;
 758     }
 759     pgd++;
 760   }
 761 } 
 762 
 763 void
 764 cache_mode(char *str, int *ints)
     /* [previous][next][first][last][top][bottom][index][help] */
 765 {
 766         cache_is_copyback = ints[0];
 767 }
 768 
 769 

/* [previous][next][first][last][top][bottom][index][help] */