root/arch/ppc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo
  8. MMU_free_item
  9. MMU_get_item
  10. MMU_init
  11. MMU_get_page
  12. MMU_map_page
  13. MMU_hash_page
  14. invalidate
  15. cache_mode

   1 /*
   2  *  arch/ppc/mm/init.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to PPC by Gary Thomas
   6  */
   7 
   8 #include <linux/config.h>
   9 #include <linux/signal.h>
  10 #include <linux/sched.h>
  11 #include <linux/head.h>
  12 #include <linux/kernel.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/types.h>
  16 #include <linux/ptrace.h>
  17 #include <linux/mman.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/pgtable.h>
  21 
  22 
  23 /* made this a static array since alpha and intel aren't.
  24    thomas made it a dynamic array and had to add lots of stuff to other parts
  25    of linux to make sure the pages were contigous and such.  the static array
  26    seems much easier
  27    making it 8k for now.  will change later.
  28       -- Cort
  29    */
  30 pgd_t swapper_pg_dir[1024*8];
  31 /*pgd_t *swapper_pg_dir;*/
  32 
  33 pte *MMU_get_page(void);
  34 
  35 
  36 #if 0
  37 #include <asm/system.h>
  38 #include <asm/segment.h>
  39 #include <asm/mipsconfig.h>
  40 
  41 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  42 #endif
  43 
  44 #ifdef CONFIG_DESKSTATION_TYNE
  45 extern void deskstation_tyne_dma_init(void);
  46 #endif
  47 #ifdef CONFIG_SCSI
  48 extern void scsi_mem_init(unsigned long);
  49 #endif
  50 #ifdef CONFIG_SOUND
  51 extern void sound_mem_init(void);
  52 #endif
  53 extern void die_if_kernel(char *,struct pt_regs *,long);
  54 extern void show_net_buffers(void);
  55 
  56 /*
  57  * BAD_PAGE is the page that is used for page faults when linux
  58  * is out-of-memory. Older versions of linux just did a
  59  * do_exit(), but using this instead means there is less risk
  60  * for a process dying in kernel mode, possibly leaving a inode
  61  * unused etc..
  62  *
  63  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  64  * to point to BAD_PAGE entries.
  65  *
  66  * ZERO_PAGE is a special page that is used for zero-initialized
  67  * data and COW.
  68  */
  69 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  70 {
  71         panic("__bad_pagetable");
  72 #if 0
  73         extern char empty_bad_page_table[PAGE_SIZE];
  74         unsigned long dummy;
  75 
  76         __asm__ __volatile__(
  77                 ".set\tnoreorder\n\t"
  78                 "1:\tsw\t%2,(%0)\n\t"
  79                 "subu\t%1,%1,1\n\t"
  80                 "bne\t$0,%1,1b\n\t"
  81                 "addiu\t%0,%0,1\n\t"
  82                 ".set\treorder"
  83                 :"=r" (dummy),
  84                  "=r" (dummy)
  85                 :"r" (pte_val(BAD_PAGE)),
  86                  "0" ((long) empty_bad_page_table),
  87                  "1" (PTRS_PER_PAGE));
  88 
  89         return (pte_t *) empty_bad_page_table;
  90 #endif
  91 }
  92 
  93 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         panic("__bad_page");
  96 #if 0
  97         extern char empty_bad_page[PAGE_SIZE];
  98         unsigned long dummy;
  99 
 100         __asm__ __volatile__(
 101                 ".set\tnoreorder\n\t"
 102                 "1:\tsw\t$0,(%0)\n\t"
 103                 "subu\t%1,%1,1\n\t"
 104                 "bne\t$0,%1,1b\n\t"
 105                 "addiu\t%0,%0,1\n\t"
 106                 ".set\treorder"
 107                 :"=r" (dummy),
 108                  "=r" (dummy)
 109                 :"0" ((long) empty_bad_page),
 110                  "1" (PTRS_PER_PAGE));
 111 
 112         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
 113 #endif
 114 }
 115 
 116 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 117 {
 118 #if 0
 119         panic("__zero_page");
 120 #else   
 121         extern char empty_zero_page[PAGE_SIZE];
 122         bzero(empty_zero_page, PAGE_SIZE);
 123         return (unsigned long) empty_zero_page;
 124 #endif  
 125 }
 126 
 127 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 128 {
 129         int i,free = 0,total = 0,reserved = 0;
 130         int shared = 0;
 131 
 132         printk("Mem-info:\n");
 133         show_free_areas();
 134         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 135         i = high_memory >> PAGE_SHIFT;
 136         while (i-- > 0) {
 137                 total++;
 138                 if (mem_map[i].reserved)
 139                         reserved++;
 140                 else if (!mem_map[i].count)
 141                         free++;
 142                 else
 143                         shared += mem_map[i].count-1;
 144         }
 145         printk("%d pages of RAM\n",total);
 146         printk("%d free pages\n",free);
 147         printk("%d reserved pages\n",reserved);
 148         printk("%d pages shared\n",shared);
 149         show_buffers();
 150 #ifdef CONFIG_NET
 151         show_net_buffers();
 152 #endif
 153 }
 154 
 155 extern unsigned long free_area_init(unsigned long, unsigned long);
 156 
 157 /*
 158  * paging_init() sets up the page tables - note that the first 4MB are
 159  * already mapped by head.S.
 160  *
 161  * This routines also unmaps the page at virtual kernel address 0, so
 162  * that we can trap those pesky NULL-reference errors in the kernel.
 163  */
 164 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166 
 167 #if 0   
 168         pgd_t * pg_dir;
 169         pte_t * pg_table;
 170         unsigned long tmp;
 171         unsigned long address;
 172 
 173         start_mem = PAGE_ALIGN(start_mem);
 174         address = 0;
 175         pg_dir = swapper_pg_dir;
 176         while (address < end_mem) {
 177                 if (pgd_none(pg_dir[0])) {
 178                         pgd_set(pg_dir, (pte_t *) start_mem);
 179                         start_mem += PAGE_SIZE;
 180                 }
 181                 /*
 182                  * also map it in at 0x00000000 for init
 183                  */
 184                 pg_table = (pte_t *) pgd_page(pg_dir[0]);
 185                 pgd_set(pg_dir, pg_table);
 186                 pg_dir++;
 187                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
 188                         if (address < end_mem)
 189                                 *pg_table = mk_pte(address, PAGE_SHARED);
 190                         else
 191                                 pte_clear(pg_table);
 192                         address += PAGE_SIZE;
 193                 }
 194         }
 195 #if KERNELBASE == KSEG0
 196         cacheflush();
 197 #endif
 198         invalidate();
 199 #endif
 200         return free_area_init(start_mem, end_mem);
 201 }
 202 
 203 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 204 {
 205         int codepages = 0;
 206         int reservedpages = 0;
 207         int datapages = 0;
 208         unsigned long tmp;
 209         extern int etext;
 210         
 211         end_mem &= PAGE_MASK;
 212         high_memory = end_mem;
 213 
 214         /* mark usable pages in the mem_map[] */
 215         start_mem = PAGE_ALIGN(start_mem);
 216 
 217 #if 0
 218 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
 219 #endif
 220         while (start_mem < high_memory) {
 221                 mem_map[MAP_NR(start_mem)].reserved = 0;
 222                 start_mem += PAGE_SIZE;
 223         }
 224 #ifdef CONFIG_DESKSTATION_TYNE
 225         deskstation_tyne_dma_init();
 226 #endif
 227 #ifdef CONFIG_SCSI
 228         scsi_mem_init(high_memory);
 229 #endif
 230 #ifdef CONFIG_SOUND
 231         sound_mem_init();
 232 #endif
 233         for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
 234         {
 235                 if (mem_map[MAP_NR(tmp)].reserved)
 236                 {
 237                         /*
 238                          * We don't have any reserved pages on the
 239                          * MIPS systems supported until now
 240                          */
 241                         if (0)
 242                         {
 243                                 reservedpages++;
 244                         } else if (tmp < (unsigned long) &etext)
 245                         {
 246                                 codepages++;
 247                         } else
 248                         {
 249                                 datapages++;
 250                         }
 251                         continue;
 252                 }
 253                 mem_map[MAP_NR(tmp)].count = 1;
 254                 free_page(tmp);
 255         }
 256         tmp = nr_free_pages << PAGE_SHIFT;
 257         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 258                 tmp >> 10,
 259                 ((int)high_memory - (int)KERNELBASE) >> 10,
 260                 codepages << (PAGE_SHIFT-10),
 261                 reservedpages << (PAGE_SHIFT-10),
 262                 datapages << (PAGE_SHIFT-10));
 263         invalidate();
 264         return;
 265 }
 266 
 267 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 268 {
 269 #if 0   
 270         int i;
 271 
 272         i = high_memory >> PAGE_SHIFT;
 273         val->totalram = 0;
 274         val->sharedram = 0;
 275         val->freeram = nr_free_pages << PAGE_SHIFT;
 276         val->bufferram = buffermem;
 277         while (i-- > 0)  {
 278                 if (mem_map[i] & MAP_PAGE_RESERVED)
 279                         continue;
 280                 val->totalram++;
 281                 if (!mem_map[i])
 282                         continue;
 283                 val->sharedram += mem_map[i]-1;
 284         }
 285         val->totalram <<= PAGE_SHIFT;
 286         val->sharedram <<= PAGE_SHIFT;
 287         return;
 288 #endif  
 289 }
 290 
 291 /* Kernel MMU setup & lowest level hardware support */
 292 
 293 /* Hardwired MMU segments */
 294 
 295 /* Segment 0x8XXXXXXX, 0xCXXXXXXX always mapped (for I/O) */
 296 /* Segment 0x9XXXXXXX mapped during init */
 297 
 298 BAT BAT0 =
 299    {
 300         {
 301                 0x80000000>>17,         /* bepi */
 302                 BL_256M,                /* bl */
 303                 1,                      /* vs */
 304                 1,                      /* vp */
 305         },
 306         {
 307                 0x80000000>>17,         /* brpn */
 308                 1,                      /* w */
 309                 1,                      /* i (cache disabled) */
 310                 0,                      /* m */
 311                 1,                      /* g */
 312                 BPP_RW                  /* pp */
 313         }
 314    };
 315 BAT BAT1 =
 316    {
 317         {
 318                 0xC0000000>>17,         /* bepi */
 319                 BL_256M,                /* bl */
 320                 1,                      /* vs */
 321                 1,                      /* vp */
 322         },
 323         {
 324                 0xC0000000>>17,         /* brpn */
 325                 1,                      /* w */
 326                 1,                      /* i (cache disabled) */
 327                 0,                      /* m */
 328                 1,                      /* g */
 329                 BPP_RW                  /* pp */
 330         }
 331    };
 332 BAT BAT2 =
 333    {
 334 #if 1
 335         {
 336                 0x00000000>>17,         /* bepi */
 337                 BL_256M,                /* bl */
 338                 0,                      /* vs */
 339                 0,                      /* vp */
 340         },
 341         {
 342                 0x00000000>>17,         /* brpn */
 343                 1,                      /* w */
 344                 1,                      /* i (cache disabled) */
 345                 0,                      /* m */
 346                 0,                      /* g */
 347                 BPP_RW                  /* pp */
 348         }
 349 #else
 350         {
 351                 0x90000000>>17,         /* bepi */
 352                 BL_256M,                /* bl */
 353                 1,                      /* vs */
 354                 1,                      /* vp */
 355         },
 356         {
 357                 0x00000000>>17,         /* brpn */
 358                 1,                      /* w */
 359                 0,                      /* i (cache enabled) */
 360                 0,                      /* m */
 361                 0,                      /* g */
 362                 BPP_RW                  /* pp */
 363         }
 364 #endif
 365    };
 366 BAT BAT3 =
 367    {
 368         {
 369                 0x00000000>>17,         /* bepi */
 370                 BL_256M,                /* bl */
 371                 0,                      /* vs */
 372                 0,                      /* vp */
 373         },
 374         {
 375                 0x00000000>>17,         /* brpn */
 376                 1,                      /* w */
 377                 1,                      /* i (cache disabled) */
 378                 0,                      /* m */
 379                 0,                      /* g */
 380                 BPP_RW                  /* pp */
 381         }
 382    };
 383 BAT TMP_BAT2 =
 384    { /* 0x9XXXXXXX -> 0x0XXXXXXX */
 385         {
 386                 0x90000000>>17,         /* bepi */
 387                 BL_256M,                /* bl */
 388                 1,                      /* vs */
 389                 1,                      /* vp */
 390         },
 391         {
 392                 0x00000000>>17,         /* brpn */
 393                 1,                      /* w */
 394                 0,                      /* i (cache enabled) */
 395                 0,                      /* m */
 396                 0,                      /* g */
 397                 BPP_RW                  /* pp */
 398         }
 399    };
 400 
 401 unsigned long _SDR1;            /* Hardware SDR1 image */
 402 PTE *Hash;
 403 int Hash_size, Hash_mask;
 404 int cache_is_copyback = 1;
 405 int kernel_pages_are_copyback = 1;
 406 
 407 #define NUM_MAPPINGS 8
 408 struct
 409    {
 410         int va, pa, task;
 411    } last_mappings[NUM_MAPPINGS];
 412 int next_mapping = 0;
 413 
 414 /* Generic linked list */
 415 struct item
 416    {
 417         struct item *next;
 418    };
 419    
 420 #ifndef NULL   
 421 #define NULL 0
 422 #endif
 423 
 424 #define MAX_CONTEXTS    16
 425 #define MAX_MMU_PAGES   8
 426 
 427 static struct item _free_pages;
 428 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
 429 
 430 /*
 431  * Routines to support generic linked lists.
 432  */
 433 
 434 MMU_free_item(struct item *hdr, struct item *elem)
     /* [previous][next][first][last][top][bottom][index][help] */
 435 {
 436         if (hdr->next == (struct item *)NULL)
 437         { /* First item in list */
 438                 elem->next = (struct item *)NULL;
 439         } else
 440         {
 441                 elem->next = hdr->next;
 442         }
 443         hdr->next = elem;
 444 }
 445 
 446 struct item *
 447 MMU_get_item(struct item *hdr)
     /* [previous][next][first][last][top][bottom][index][help] */
 448 {
 449         struct item *item;
 450         if ((item = hdr->next) != (struct item *)NULL)
 451         {
 452                 item = hdr->next;
 453                 hdr->next = item->next;
 454         }
 455         return (item);
 456 }
 457 
 458 /*
 459  * This code is called to create a minimal mapped environment.
 460  * It is called with the MMU on, but with only a BAT register
 461  * set up to cover the code/data.  After this routine runs,
 462  * the BAT mapping is withdrawn and all mappings must be complete.
 463  */
 464 
 465 extern char _start[], _end[];
 466  
 467 void MMU_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 468 {
 469         int i, p;
 470         SEGREG *segs;
 471         _printk("MMU init - started\n");
 472         find_end_of_memory();
 473         _printk("  Start at 0x%08X, End at 0x%08X, Hash at 0x%08X\n", _start, _end, Hash);
 474         _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
 475         p = (int)mmu_pages;
 476         p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
 477         _free_pages.next = (struct item *)NULL;
 478         for (i = 0;  i < MAX_MMU_PAGES;  i++)
 479         {
 480                 MMU_free_item(&_free_pages, (struct item *)p);
 481                 p += MMU_PAGE_SIZE;
 482         }
 483         /* Force initial page tables */
 484         /*swapper_pg_dir = (pgd_t *)MMU_get_page();*/
 485         init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
 486 
 487         /* Segment registers */
 488         segs = (SEGREG *)init_task.tss.segs;
 489         for (i = 0;  i < 16;  i++)
 490         {
 491                 segs[i].ks = 0;
 492                 segs[i].kp = 1;
 493                 segs[i].vsid = i;
 494         }
 495         /* Map kernel TEXT+DATA+BSS */
 496 #if 0   
 497         for (i = (int)_start;  i <= (int)_end;  i += MMU_PAGE_SIZE)
 498 #else
 499         /* Other parts of the kernel expect ALL RAM to be mapped */     
 500         for (i = (int)_start;  i <= (int)Hash;  i += MMU_PAGE_SIZE)
 501 #endif  
 502         {
 503                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 504         }
 505         /* Map hardware HASH table */
 506         for (i = (int)Hash;  i < (int)Hash+Hash_size;  i += MMU_PAGE_SIZE)
 507         {
 508                 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
 509         }
 510         _printk("MMU init - done!\n");
 511 }
 512 
 513 pte *
 514 MMU_get_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 515 {
 516         pte *pg;
 517         if ((pg = (pte *)MMU_get_item(&_free_pages)))
 518         {
 519                 bzero((char *)pg, MMU_PAGE_SIZE);
 520         }
 521         _printk("MMU Allocate Page at %08X\n", pg);
 522         return(pg);
 523 }
 524 
 525 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527         pte *pd, *pg;
 528 if (va < (unsigned long)0x90000000)     
 529   _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
 530         if ((pte **)tss->pg_tables == (pte **)NULL)
 531         { /* Allocate upper level page map */
 532                 (pte **)tss->pg_tables = (pte **)MMU_get_page();
 533                 if ((pte **)tss->pg_tables == (pte **)NULL)
 534                 {
 535                         _panic("Out of MMU pages (PD)\n");
 536                 }
 537         }
 538         /* Use upper 10 bits of VA to index the first level map */
 539         pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
 540         if (pd == (pte *)NULL)
 541         { /* Need to allocate second-level table */
 542                 pd = (pte *)MMU_get_page();
 543                 if (pd == (pte *)NULL)
 544                 {
 545                         _panic("Out of MMU pages (PG)\n");
 546                 }
 547                 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
 548         }
 549         /* Use middle 10 bits of VA to index the second-level map */
 550         pg = &pd[(va>>PT_SHIFT)&PT_MASK];
 551         *(long *)pg = 0;  /* Clear out entry */
 552         pg->page_num = pa>>PG_SHIFT;
 553         pg->flags = flags;
 554         MMU_hash_page(tss, va, pg);
 555 }
 556 
 557 /*
 558  * Insert(create) a hardware page table entry
 559  */
 560 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
     /* [previous][next][first][last][top][bottom][index][help] */
 561 {
 562         int hash, page_index, segment, i, h, _h, api, vsid, perms;
 563         PTE *_pte, *empty, *slot;
 564         PTE *slot0, *slot1;
 565         extern char _etext;
 566 
 567 
 568 /*      printk("hashing tss = %x va = %x pg = %x\n", tss, va, pg);*/
 569 /* TEMP */
 570         last_mappings[next_mapping].va = va;
 571         last_mappings[next_mapping].pa = pg?*(int *)pg:0;
 572         last_mappings[next_mapping].task = current;
 573         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 574 
 575 /* TEMP */      
 576         page_index = ((int)va & 0x0FFFF000) >> 12;
 577         segment = (unsigned int)va >> 28;
 578         api = page_index >> 10;
 579         vsid = ((SEGREG *)tss->segs)[segment].vsid;
 580         empty = slot = (PTE *)NULL;
 581         for (_h = 0;  _h < 2;  _h++)
 582         {
 583                 hash = page_index ^ vsid;               
 584                 if (_h)
 585                 {
 586                         hash = ~hash;  /* Secondary hash uses ones-complement */
 587                 }
 588                 hash &= 0x3FF | (Hash_mask << 10);
 589                 hash *= 8;  /* Eight entries / hash bucket */
 590                 _pte = &Hash[hash];
 591                 /* Save slot addresses in case we have to purge */
 592                 if (_h)
 593                 {
 594                         slot1 = _pte;
 595                 } else
 596                 {
 597                         slot0 = _pte;
 598                 }
 599                 for (i = 0;  i < 8;  i++, _pte++)
 600                 {
 601                         if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
 602                         { /* Found it! */
 603                                 h = _h;
 604                                 slot = _pte;
 605                                 goto found_it;
 606                         }
 607                         if ((empty == (PTE *)NULL) && !_pte->v)
 608                         {
 609                                 h = _h;
 610                                 empty = _pte;
 611                         }
 612                 }
 613         }
 614         if (slot == (PTE *)NULL)
 615         {
 616                 if (pg == (pte *)NULL)
 617                 {
 618                         return (0);
 619                 }
 620                 if (empty == (PTE *)NULL)
 621                 { /* Table is totally full! */
 622 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
 623 printk("Slot0:\n");
 624 _pte = slot0;
 625 for (i = 0;  i < 8;  i++, _pte++)
 626 {
 627         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 628 }
 629 printk("Slot1:\n");
 630 _pte = slot1;
 631 for (i = 0;  i < 8;  i++, _pte++)
 632 {
 633         printk("  V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
 634 }
 635 cnpause();
 636 printk("Last mappings:\n");
 637 for (i = 0;  i < NUM_MAPPINGS;  i++)
 638 {
 639         printk("  VA: %08x, PA: %08X, TASK: %08X\n",
 640                 last_mappings[next_mapping].va,
 641                 last_mappings[next_mapping].pa,
 642                 last_mappings[next_mapping].task);
 643         if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
 644 }
 645 cnpause();
 646                         _panic("Hash table full!\n");
 647                 }
 648                 slot = empty;
 649         }
 650 found_it:
 651 #if 0
 652 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);     
 653 #endif
 654         _tlbie(va); /* Clear TLB */
 655         if (pg)
 656         { /* Fill in table */
 657                 slot->v = 1;
 658                 slot->vsid = vsid;
 659                 slot->h = h;
 660                 slot->api = api;
 661                 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
 662                 {
 663                         slot->rpn = pg->page_num - (KERNELBASE>>12);
 664                 } else
 665                 {
 666                         slot->rpn = pg->page_num;
 667                 }
 668                 slot->r = 0;
 669                 slot->c = 0;
 670                 slot->i = 0;
 671                 slot->g = 0;
 672                 if (cache_is_copyback)
 673                 {
 674                         if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
 675                         { /* All User & Kernel TEXT pages are copy-back */
 676                                 slot->w = 0;
 677                                 slot->m = 1;
 678                         } else
 679                         { /* Kernel DATA pages are write-thru */
 680                                 slot->w = 1;
 681                                 slot->m = 0;
 682                         }
 683                 } else
 684                 {
 685                         slot->w = 1;
 686                         slot->m = 0;
 687                 }
 688                 if (pg->flags & _PAGE_USER)
 689                 {
 690                         if (pg->flags & _PAGE_RW)
 691                         { /* Read/write page */
 692                                 perms = PP_RWRW;
 693                         } else
 694                         { /* Read only page */
 695                                 perms = PP_RWRX;
 696                         }
 697                 } else
 698                 { /* Kernel pages */
 699                         perms = PP_RWRW;
 700                         perms = PP_RWXX;
 701                 }
 702 #ifdef SHOW_FAULTS
 703 if (va < KERNELBASE)            
 704 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
 705 #endif
 706                 slot->pp = perms;
 707                 return (0);
 708         } else
 709         { /* Pull entry from tables */
 710                 int flags = 0;
 711                 if (slot->r) flags |= _PAGE_ACCESSED;
 712                 if (slot->c) flags |= _PAGE_DIRTY;
 713                 slot->v = 0;
 714 #ifdef SHOW_FAULTS
 715 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
 716 #endif
 717                 return (flags);
 718         }
 719 }
 720 
 721 /*
 722  * Invalidate the MMU [hardware] tables (for current task?)
 723  */
 724 void
 725 invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 726 {
 727   int i, j, flags;
 728   unsigned long address;
 729   pgd_t *pgd;
 730   pte_t *_pte;
 731 #if 0
 732   _tlbia();  /* Flush TLB entries */
 733 #endif
 734   pgd = pgd_offset(current->mm, 0);
 735   if (!pgd) return;  /* No map? */
 736   address = 0;
 737   for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
 738   {
 739     if (*(long *)pgd)
 740     {
 741       /* I know there are only two levels, but the macros don't */
 742       _pte = pte_offset(pmd_offset(pgd,0),0);
 743       if (_pte)
 744       {
 745         for (j = 0;  j < PTRS_PER_PTE;  j++)
 746         {
 747           if (pte_present(*_pte))
 748           {
 749             flags = MMU_hash_page(&current->tss, address, 0);
 750             ((pte *)_pte)->flags |= flags;
 751           }
 752           _pte++;
 753           address += PAGE_SIZE;
 754         }
 755       } else
 756       {
 757         address += PAGE_SIZE*PTRS_PER_PTE;
 758       }
 759     } else
 760     {
 761       address += PAGE_SIZE*PTRS_PER_PTE;
 762     }
 763     pgd++;
 764   }
 765 } 
 766 
 767 void
 768 cache_mode(char *str, int *ints)
     /* [previous][next][first][last][top][bottom][index][help] */
 769 {
 770         cache_is_copyback = ints[0];
 771 }
 772 
 773 

/* [previous][next][first][last][top][bottom][index][help] */