root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_swap
  2. gensrmmu_read_physical
  3. msparc_read_physical
  4. gensrmmu_write_physical
  5. msparc_write_physical
  6. srmmu_pmd_align
  7. srmmu_pgdir_align
  8. srmmu_vmalloc_start
  9. srmmu_pgd_page
  10. srmmu_pmd_page
  11. srmmu_pte_page
  12. srmmu_pte_none
  13. srmmu_pte_present
  14. srmmu_pte_inuse
  15. srmmu_pte_clear
  16. srmmu_pte_reuse
  17. srmmu_pmd_none
  18. srmmu_pmd_bad
  19. srmmu_pmd_present
  20. srmmu_pmd_inuse
  21. srmmu_pmd_clear
  22. srmmu_pmd_reuse
  23. srmmu_pgd_none
  24. srmmu_pgd_bad
  25. srmmu_pgd_present
  26. srmmu_pgd_inuse
  27. srmmu_pgd_clear
  28. srmmu_pgd_reuse
  29. srmmu_pte_write
  30. srmmu_pte_dirty
  31. srmmu_pte_young
  32. srmmu_pte_wrprotect
  33. srmmu_pte_mkclean
  34. srmmu_pte_mkold
  35. srmmu_pte_mkwrite
  36. srmmu_pte_mkdirty
  37. srmmu_pte_mkyoung
  38. srmmu_mk_pte
  39. srmmu_mk_pte_io
  40. srmmu_ctxd_set
  41. srmmu_pgd_set
  42. srmmu_pmd_set
  43. srmmu_pte_modify
  44. srmmu_pgd_offset
  45. srmmu_pmd_offset
  46. srmmu_pte_offset
  47. srmmu_update_rootmmu_dir
  48. srmmu_pte_free_kernel
  49. srmmu_pte_alloc_kernel
  50. srmmu_pmd_free_kernel
  51. srmmu_pmd_alloc_kernel
  52. srmmu_pte_free
  53. srmmu_pte_alloc
  54. srmmu_pmd_free
  55. srmmu_pmd_alloc
  56. srmmu_pgd_free
  57. srmmu_pgd_alloc
  58. tsunami_invalidate_all
  59. tsunami_invalidate_mm
  60. tsunami_invalidate_range
  61. tsunami_invalidate_page
  62. swift_invalidate_all
  63. swift_invalidate_mm
  64. swift_invalidate_range
  65. swift_invalidate_page
  66. viking_invalidate_all
  67. viking_invalidate_mm
  68. viking_invalidate_range
  69. viking_invalidate_page
  70. cypress_invalidate_all
  71. cypress_invalidate_mm
  72. cypress_invalidate_range
  73. cypress_invalidate_page
  74. hypersparc_invalidate_all
  75. hypersparc_invalidate_mm
  76. hypersparc_invalidate_range
  77. hypersparc_invalidate_page
  78. srmmu_set_pte
  79. srmmu_quick_kernel_fault
  80. alloc_context
  81. srmmu_switch_to_context
  82. srmmu_mapioaddr
  83. srmmu_lockarea
  84. srmmu_unlockarea
  85. srmmu_map_dvma_pages_for_iommu
  86. srmmu_uncache_iommu_page_table
  87. iommu_init
  88. srmmu_get_scsi_buffer
  89. srmmu_release_scsi_buffer
  90. srmmu_alloc_task_struct
  91. srmmu_alloc_kernel_stack
  92. srmmu_free_task_struct
  93. srmmu_free_kernel_stack
  94. srmmu_init_alloc
  95. srmmu_allocate_ptable_skeleton
  96. srmmu_inherit_prom_mappings
  97. srmmu_map_dvma_pages_for_cpu
  98. srmmu_map_kernel
  99. srmmu_paging_init
  100. srmmu_test_wp
  101. srmmu_mmu_info
  102. srmmu_update_mmu_cache
  103. srmmu_exit_hook
  104. srmmu_flush_hook
  105. srmmu_is_bad
  106. init_hypersparc
  107. init_cypress_common
  108. init_cypress_604
  109. init_cypress_605
  110. init_swift
  111. init_tsunami
  112. init_viking
  113. get_srmmu_type
  114. patch_window_trap_handlers
  115. ld_mmu_srmmu

   1 /* $Id: srmmu.c,v 1.34 1996/03/01 07:16:23 davem Exp $
   2  * srmmu.c:  SRMMU specific routines for memory management.
   3  *
   4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
   6  */
   7 
   8 #include <linux/kernel.h>
   9 
  10 #include <asm/page.h>
  11 #include <asm/pgtable.h>
  12 #include <asm/io.h>
  13 #include <asm/kdebug.h>
  14 #include <asm/vaddrs.h>
  15 #include <asm/traps.h>
  16 #include <asm/mp.h>
  17 #include <asm/mbus.h>
  18 #include <asm/cache.h>
  19 #include <asm/oplib.h>
  20 #include <asm/sbus.h>
  21 #include <asm/iommu.h>
  22 
  23 /* Now the cpu specific definitions. */
  24 #include <asm/viking.h>
  25 #include <asm/ross.h>
  26 #include <asm/tsunami.h>
  27 #include <asm/swift.h>
  28 
  29 enum mbus_module srmmu_modtype;
  30 unsigned int hwbug_bitmask;
  31 
  32 int hyper_cache_size;
  33 
  34 ctxd_t *srmmu_context_table;
  35 
  36 /* In general all page table modifications should use the V8 atomic
  37  * swap instruction.  This insures the mmu and the cpu are in sync
  38  * with respect to ref/mod bits in the page tables.
  39  */
  40 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
  41 {
  42         __asm__ __volatile__("swap [%1], %0\n\t" :
  43                              "=&r" (value), "=&r" (addr) :
  44                              "0" (value), "1" (addr));
  45         return value;
  46 }
  47 
  48 /* Functions really use this, not srmmu_swap directly. */
  49 #define srmmu_set_entry(ptr, newentry) \
  50         srmmu_swap((unsigned long *) (ptr), (newentry))
  51 
  52 /* We still don't use these at all, perhaps we don't need them
  53  * at all.
  54  */
  55 unsigned long (*srmmu_read_physical)(unsigned long paddr);
  56 void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
  57 
  58 static unsigned long gensrmmu_read_physical(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         unsigned long word;
  61 
  62         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
  63                              "=r" (word) :
  64                              "r" (paddr), "i" (ASI_M_BYPASS) :
  65                              "memory");
  66         return word;
  67 }
  68 
  69 static unsigned long msparc_read_physical(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  70 {
  71         unsigned long word, flags;
  72 
  73         save_flags(flags); cli();
  74         __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
  75                              "or  %%g1, %4, %%g2\n\t"
  76                              "sta %%g2, [%%g0] %3\n\t"
  77                              "lda [%1] %2, %0\n\t"
  78                              "sta %%g1, [%%g0] %3\n\t" :
  79                              "=r" (word) :
  80                              "r" (paddr), "i" (ASI_M_BYPASS),
  81                              "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
  82                              "g1", "g2", "memory");
  83         restore_flags(flags);
  84         return word;
  85 }
  86 
  87 static void gensrmmu_write_physical(unsigned long paddr, unsigned long word)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
  90                              "r" (word), "r" (paddr), "i" (ASI_M_BYPASS) :
  91                              "memory");
  92 }
  93 
  94 static void msparc_write_physical(unsigned long paddr, unsigned long word)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96         unsigned long flags;
  97 
  98         save_flags(flags); cli();
  99         __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
 100                              "or  %%g1, %4, %%g2\n\t"
 101                              "sta %%g2, [%%g0] %3\n\t"
 102                              "sta %0, [%1] %2\n\t"
 103                              "sta %%g1, [%%g0] %3\n\t" : :
 104                              "r" (word), "r" (paddr), "i" (ASI_M_BYPASS),
 105                              "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
 106                              "g1", "g2", "memory");
 107         restore_flags(flags);
 108 }
 109 
 110 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 111 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 112 
 113 static unsigned long srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115         return SRMMU_VMALLOC_START;
 116 }
 117 
 118 static unsigned long srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 { return PAGE_OFFSET + ((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
 120 
 121 static unsigned long srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 { return PAGE_OFFSET + ((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
 123 
 124 static unsigned long srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 { return PAGE_OFFSET + ((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
 126 
 127 static int srmmu_pte_none(pte_t pte)          { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 128 static int srmmu_pte_present(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
 130 
 131 static int srmmu_pte_inuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
 133 
 134 static void srmmu_pte_clear(pte_t *ptep)      { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 135 static void srmmu_pte_reuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 136 {
 137         if(!mem_map[MAP_NR(ptep)].reserved)
 138                 mem_map[MAP_NR(ptep)].count++;
 139 }
 140 
 141 static int srmmu_pmd_none(pmd_t pmd)          { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 142 static int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 144 
 145 static int srmmu_pmd_present(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 146 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 147 
 148 static int srmmu_pmd_inuse(pmd_t *pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
 150 
 151 static void srmmu_pmd_clear(pmd_t *pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 152 static void srmmu_pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 153 {
 154         if (!mem_map[MAP_NR(pmdp)].reserved)
 155                 mem_map[MAP_NR(pmdp)].count++;
 156 }
 157 
 158 static int srmmu_pgd_none(pgd_t pgd)          { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 159 static int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 160 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 161 
 162 static int srmmu_pgd_present(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 164 
 165 static int srmmu_pgd_inuse(pgd_t *pgdp)       { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
 166 static void srmmu_pgd_clear(pgd_t * pgdp)     { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 167 static void srmmu_pgd_reuse(pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 168 {
 169         if (!mem_map[MAP_NR(pgdp)].reserved)
 170                 mem_map[MAP_NR(pgdp)].count++;
 171 }
 172 
 173 static int srmmu_pte_write(pte_t pte)         { return pte_val(pte) & SRMMU_WRITE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 174 static int srmmu_pte_dirty(pte_t pte)         { return pte_val(pte) & SRMMU_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 175 static int srmmu_pte_young(pte_t pte)         { return pte_val(pte) & SRMMU_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 176 
 177 static pte_t srmmu_pte_wrprotect(pte_t pte)   { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
     /* [previous][next][first][last][top][bottom][index][help] */
 178 static pte_t srmmu_pte_mkclean(pte_t pte)     { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 179 static pte_t srmmu_pte_mkold(pte_t pte)       { pte_val(pte) &= ~SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 180 static pte_t srmmu_pte_mkwrite(pte_t pte)     { pte_val(pte) |= SRMMU_WRITE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 181 static pte_t srmmu_pte_mkdirty(pte_t pte)     { pte_val(pte) |= SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 182 static pte_t srmmu_pte_mkyoung(pte_t pte)     { pte_val(pte) |= SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 183 
 184 /*
 185  * Conversion functions: convert a page and protection to a page entry,
 186  * and a page entry and page directory to the page they refer to.
 187  */
 188 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 189 { pte_t pte; pte_val(pte) = ((page - PAGE_OFFSET) >> 4) | pgprot_val(pgprot); return pte; }
 190 
 191 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 { pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
 193 
 194 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 { srmmu_set_entry(ctxp, (SRMMU_ET_PTD | ((((unsigned long) pgdp) - PAGE_OFFSET) >> 4))); }
 196 
 197 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 { srmmu_set_entry(pgdp, (SRMMU_ET_PTD | ((((unsigned long) pmdp) - PAGE_OFFSET) >> 4))); }
 199 
 200 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 { srmmu_set_entry(pmdp, (SRMMU_ET_PTD | ((((unsigned long) ptep) - PAGE_OFFSET) >> 4))); }
 202 
 203 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 204 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
 205 
 206 /* to find an entry in a top-level page table... */
 207 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 208 {
 209         return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 210 }
 211 
 212 /* Find an entry in the second-level page table.. */
 213 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 {
 215         return (pmd_t *) pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 216 }
 217 
 218 /* Find an entry in the third-level page table.. */ 
 219 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 {
 221         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 222 }
 223 
 224 /* This must update the context table entry for this process. */
 225 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) 
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         if(tsk->mm->context != NO_CONTEXT)
 228                 srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
 229 }
 230 
 231 /*
 232  * Allocate and free page tables. The xxx_kernel() versions are
 233  * used to allocate a kernel page table - this turns on ASN bits
 234  * if any, and marks the page tables reserved.
 235  */
 236 static void srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         mem_map[MAP_NR(pte)].reserved = 0;
 239         free_page((unsigned long) pte);
 240 }
 241 
 242 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 243 {
 244         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 245         if(srmmu_pmd_none(*pmd)) {
 246                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 247                 if(srmmu_pmd_none(*pmd)) {
 248                         if(page) {
 249                                 srmmu_pmd_set(pmd, page);
 250                                 mem_map[MAP_NR(page)].reserved = 1;
 251                                 return page + address;
 252                         }
 253                         srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 254                         return NULL;
 255                 }
 256                 free_page((unsigned long) page);
 257         }
 258         if(srmmu_pmd_bad(*pmd)) {
 259                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 260                 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 261                 return NULL;
 262         }
 263         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 264 }
 265 
 266 static void srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268         mem_map[MAP_NR(pmd)].reserved = 0;
 269         free_page((unsigned long) pmd);
 270 }
 271 
 272 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 273 {
 274         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 275         if(srmmu_pgd_none(*pgd)) {
 276                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 277                 if(srmmu_pgd_none(*pgd)) {
 278                         if(page) {
 279                                 srmmu_pgd_set(pgd, page);
 280                                 mem_map[MAP_NR(page)].reserved = 1;
 281                                 return page + address;
 282                         }
 283                         srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 284                         return NULL;
 285                 }
 286                 free_page((unsigned long) page);
 287         }
 288         if(srmmu_pgd_bad(*pgd)) {
 289                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 290                 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 291                 return NULL;
 292         }
 293         return (pmd_t *) pgd_page(*pgd) + address;
 294 }
 295 
 296 static void srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 297 {
 298         free_page((unsigned long) pte);
 299 }
 300 
 301 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 302 {
 303         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 304         if(srmmu_pmd_none(*pmd)) {
 305                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 306                 if(srmmu_pmd_none(*pmd)) {
 307                         if(page) {
 308                                 srmmu_pmd_set(pmd, page);
 309                                 return page + address;
 310                         }
 311                         srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 312                         return NULL;
 313                 }
 314                 free_page((unsigned long) page);
 315         }
 316         if(srmmu_pmd_bad(*pmd)) {
 317                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 318                 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 319                 return NULL;
 320         }
 321         return (pte_t *) pmd_page(*pmd) + address;
 322 }
 323 
 324 /* Real three-level page tables on SRMMU. */
 325 static void srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327         free_page((unsigned long) pmd);
 328 }
 329 
 330 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 331 {
 332         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 333         if(srmmu_pgd_none(*pgd)) {
 334                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 335                 if(srmmu_pgd_none(*pgd)) {
 336                         if(page) {
 337                                 srmmu_pgd_set(pgd, page);
 338                                 return page + address;
 339                         }
 340                         srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 341                         return NULL;
 342                 }
 343                 free_page((unsigned long) page);
 344         }
 345         if(srmmu_pgd_bad(*pgd)) {
 346                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 347                 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 348                 return NULL;
 349         }
 350         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 351 }
 352 
 353 static void srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 354 {
 355         free_page((unsigned long) pgd);
 356 }
 357 
 358 static pgd_t *srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 359 {
 360         return (pgd_t *) get_free_page(GFP_KERNEL);
 361 }
 362 
 363 /* Tsunami invalidates.  It's page level tlb invalidation is not very
 364  * useful at all, you must be in the context that page exists in to
 365  * get a match.  It might be worthwhile to try someday though...
 366  */
 367 /* static */ inline void tsunami_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         tsunami_invalidate_icache();
 370         tsunami_invalidate_dcache();
 371         srmmu_flush_whole_tlb();
 372 }
 373 static void tsunami_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 374 {
 375         tsunami_invalidate_all();
 376 }
 377 
 378 static void tsunami_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 379 {
 380         tsunami_invalidate_all();
 381 }
 382 
 383 /* XXX do page level tlb flushes at some point XXX */
 384 static void tsunami_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 385 {
 386         tsunami_invalidate_all();
 387 }
 388 
 389 /* Swift invalidates.  It has the recommended SRMMU specification flushing
 390  * facilities, so we can do things in a more fine grained fashion than we
 391  * could on the tsunami.  Let's watch out for HARDWARE BUGS...
 392  */
 393 static inline void swift_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         unsigned long addr = 0;
 396 
 397         /* Invalidate all cache tags */
 398         for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16) {
 399                 swift_inv_insn_tag(addr); /* whiz- */
 400                 swift_inv_data_tag(addr); /* bang */
 401         }
 402         srmmu_flush_whole_tlb();
 403 }
 404 
 405 static void swift_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 406 {
 407         unsigned long flags;
 408         int cc, ncc = mm->context;
 409 
 410         if(ncc == NO_CONTEXT)
 411                 return;
 412 
 413         /* have context will travel... */
 414         save_flags(flags); cli();
 415         cc = srmmu_get_context();
 416         if(cc != ncc)
 417                 srmmu_set_context(ncc);
 418 
 419         swift_flush_context(); /* POOF! */
 420         srmmu_flush_tlb_ctx(); /* POW! */
 421 
 422         if(cc != ncc)
 423                 srmmu_set_context(cc);
 424         restore_flags(flags);
 425 }
 426 
 427 static void swift_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 428 {
 429         unsigned long flags, addr;
 430         int cc, ncc = mm->context;
 431 
 432         if(ncc == NO_CONTEXT)
 433                 return;
 434 
 435         save_flags(flags); cli();
 436         cc = srmmu_get_context();
 437         if(cc != ncc)
 438                 srmmu_set_context(ncc);
 439 
 440         /* XXX Inefficient, we don't do the best we can... XXX */
 441         addr = start & SRMMU_PGDIR_MASK;
 442         while(addr < end) {
 443                 swift_flush_region(addr);
 444                 srmmu_flush_tlb_region(addr);
 445                 addr += SRMMU_PGDIR_SIZE;
 446         }
 447 
 448         if(cc != ncc)
 449                 srmmu_set_context(cc);
 450         restore_flags(flags);
 451 }
 452 
 453 static void swift_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 454 {
 455         unsigned long flags;
 456         int cc, ncc = vmp->vm_mm->context;
 457 
 458         if(ncc == NO_CONTEXT)
 459                 return;
 460 
 461         save_flags(flags); cli();
 462         cc = srmmu_get_context();
 463         if(cc != ncc)
 464                 srmmu_set_context(ncc);
 465 
 466         swift_flush_page(page);
 467         srmmu_flush_tlb_page(page);
 468 
 469         if(cc != ncc)
 470                 srmmu_set_context(cc);
 471         restore_flags(flags);
 472 }
 473 
 474 /* The following are all MBUS based SRMMU modules, and therefore could
 475  * be found in a multiprocessor configuration.
 476  */
 477 
 478 /* Viking invalidates.  For Sun's mainline MBUS processor it is pretty much
 479  * a crappy mmu.  The on-chip I&D caches only have full flushes, no fine
 480  * grained cache invalidations.  It only has these "flash clear" things
 481  * just like the MicroSparcI.  Added to this many revs of the chip are
 482  * teaming with hardware buggery.
 483  *
 484  * XXX need to handle SMP broadcast invalidations! XXX
 485  */
 486 static inline void viking_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 487 {
 488         viking_flush_icache();
 489         viking_flush_dcache();
 490         srmmu_flush_whole_tlb();
 491 }
 492 static void viking_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 493 {
 494         unsigned long flags;
 495         int cc, ncc = mm->context;
 496 
 497         if(ncc == NO_CONTEXT)
 498                 return;
 499 
 500         save_flags(flags); cli();
 501         cc = srmmu_get_context();
 502         if(cc != ncc)
 503                 srmmu_set_context(ncc);
 504 
 505         viking_flush_icache();
 506         viking_flush_dcache();
 507         srmmu_flush_tlb_ctx();
 508 
 509         if(cc != ncc)
 510                 srmmu_set_context(cc);
 511         restore_flags(flags);
 512 }
 513 
 514 static void viking_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 515 {
 516         unsigned long flags, addr;
 517         int cc, ncc = mm->context;
 518 
 519         if(ncc == NO_CONTEXT)
 520                 return;
 521 
 522         save_flags(flags); cli();
 523         cc = srmmu_get_context();
 524         if(cc != ncc)
 525                 srmmu_set_context(ncc);
 526 
 527         /* XXX Inefficient, we don't do the best we can... XXX */
 528         viking_flush_icache();
 529         viking_flush_dcache();
 530         addr = start & SRMMU_PGDIR_MASK;
 531         while(addr < end) {
 532                 srmmu_flush_tlb_region(addr);
 533                 addr += SRMMU_PGDIR_SIZE;
 534         }
 535 
 536         if(cc != ncc)
 537                 srmmu_set_context(cc);
 538         restore_flags(flags);
 539 }
 540 static void viking_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 541 {
 542         unsigned long flags;
 543         int cc, ncc = vmp->vm_mm->context;
 544 
 545         if(ncc == NO_CONTEXT)
 546                 return;
 547 
 548         save_flags(flags); cli();
 549         cc = srmmu_get_context();
 550         if(cc != ncc)
 551                 srmmu_set_context(ncc);
 552 
 553         viking_flush_icache();
 554         viking_flush_dcache();
 555         srmmu_flush_tlb_page(page);
 556 
 557         if(cc != ncc)
 558                 srmmu_set_context(cc);
 559         restore_flags(flags);
 560 }
 561 
 562 /* Cypress invalidates. */
 563 static inline void cypress_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 564 {
 565         srmmu_flush_whole_tlb();
 566 }
 567 static void cypress_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 568 {
 569         unsigned long flags;
 570         int cc, ncc = mm->context;
 571 
 572         if(ncc == NO_CONTEXT)
 573                 return;
 574 
 575         /* have context will travel... */
 576         save_flags(flags); cli();
 577         cc = srmmu_get_context();
 578         if(cc != ncc)
 579                 srmmu_set_context(ncc);
 580 
 581         cypress_flush_context(); /* POOF! */
 582         srmmu_flush_whole_tlb(); /* POW! */
 583 
 584         if(cc != ncc)
 585                 srmmu_set_context(cc);
 586         restore_flags(flags);
 587 }
 588 static void cypress_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 589 {
 590         unsigned long flags, addr;
 591         int cc, ncc = mm->context;
 592 
 593         if(ncc == NO_CONTEXT)
 594                 return;
 595 
 596         save_flags(flags); cli();
 597         cc = srmmu_get_context();
 598         if(cc != ncc)
 599                 srmmu_set_context(ncc);
 600 
 601         /* XXX Inefficient, we don't do the best we can... XXX */
 602         addr = start & SRMMU_PGDIR_MASK;
 603         while(addr < end) {
 604                 cypress_flush_region(addr);
 605                 addr += SRMMU_PGDIR_SIZE;
 606         }
 607         srmmu_flush_whole_tlb();
 608 
 609         if(cc != ncc)
 610                 srmmu_set_context(cc);
 611         restore_flags(flags);
 612 }
 613 
 614 static void cypress_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 615 {
 616         unsigned long flags;
 617         int cc, ncc = vmp->vm_mm->context;
 618 
 619         if(ncc == NO_CONTEXT)
 620                 return;
 621 
 622         save_flags(flags); cli();
 623         cc = srmmu_get_context();
 624         if(cc != ncc)
 625                 srmmu_set_context(ncc);
 626 
 627         swift_flush_page(page);
 628         srmmu_flush_whole_tlb();
 629 
 630         if(cc != ncc)
 631                 srmmu_set_context(cc);
 632         restore_flags(flags);
 633 }
 634 
 635 /* Hypersparc invalidates. */
 636 static inline void hypersparc_invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 637 {
 638 
 639         hyper_flush_whole_icache();
 640         srmmu_flush_whole_tlb();
 641 }
 642 
 643 static void hypersparc_invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 644 {
 645 
 646 }
 647 
 648 static void hypersparc_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 649 {
 650 
 651 }
 652 
 653 static void hypersparc_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 654 {
 655 
 656 }
 657 
 658 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
 659 {
 660         srmmu_set_entry(ptep, pte_val(pteval));
 661 }
 662 
 663 static void srmmu_quick_kernel_fault(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 664 {
 665         printk("SRMMU: quick_kernel_fault called for %08lx\n", address);
 666         panic("Srmmu bolixed...");
 667 }
 668 
 669 static inline void alloc_context(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 670 {
 671         struct ctx_list *ctxp;
 672 
 673         ctxp = ctx_free.next;
 674         if(ctxp != &ctx_free) {
 675                 remove_from_ctx_list(ctxp);
 676                 add_to_used_ctxlist(ctxp);
 677                 mm->context = ctxp->ctx_number;
 678                 ctxp->ctx_mm = mm;
 679                 return;
 680         }
 681         ctxp = ctx_used.next;
 682         if(ctxp->ctx_mm == current->mm)
 683                 ctxp = ctxp->next;
 684         if(ctxp == &ctx_used)
 685                 panic("out of mmu contexts");
 686         remove_from_ctx_list(ctxp);
 687         add_to_used_ctxlist(ctxp);
 688         ctxp->ctx_mm->context = NO_CONTEXT;
 689         ctxp->ctx_mm = mm;
 690         mm->context = ctxp->ctx_number;
 691 }
 692 
 693 static void srmmu_switch_to_context(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 694 {
 695         /* Kernel threads can execute in any context and so can tasks
 696          * sleeping in the middle of exiting. If this task has already
 697          * been allocated a piece of the mmu realestate, just jump to
 698          * it.
 699          */
 700         if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
 701            (tsk->flags & PF_EXITING))
 702                 return;
 703         if(tsk->mm->context == NO_CONTEXT) {
 704                 alloc_context(tsk->mm);
 705                 srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
 706         }
 707         srmmu_set_context(tsk->mm->context);
 708 }
 709 
 710 /* Low level IO area allocation on the SRMMU. */
 711 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
     /* [previous][next][first][last][top][bottom][index][help] */
 712 {
 713         pgd_t *pgdp;
 714         pmd_t *pmdp;
 715         pte_t *ptep;
 716         unsigned long tmp;
 717 
 718         physaddr &= PAGE_MASK;
 719         pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
 720         pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 721         ptep = srmmu_pte_offset(pmdp, virt_addr);
 722         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 723 
 724         /* I need to test whether this is consistant over all
 725          * sun4m's.  The bus_type represents the upper 4 bits of
 726          * 36-bit physical address on the I/O space lines...
 727          */
 728         tmp |= (bus_type << 28);
 729         if(rdonly)
 730                 tmp |= SRMMU_PRIV_RDONLY;
 731         else
 732                 tmp |= SRMMU_PRIV;
 733         srmmu_set_entry(ptep, tmp);
 734         invalidate_all();
 735 }
 736 
 737 static char *srmmu_lockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 738 {
 739         return vaddr;
 740 }
 741 
 742 static void srmmu_unlockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 743 {
 744 }
 745 
 746 /* IOMMU things go here. */
 747 
 748 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
 749 static unsigned long first_dvma_page, last_dvma_page;
 750 
 751 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
     /* [previous][next][first][last][top][bottom][index][help] */
 752 {
 753         unsigned long first = first_dvma_page;
 754         unsigned long last = last_dvma_page;
 755         iopte_t *iopte;
 756 
 757         iopte = iommu->page_table;
 758         iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
 759         while(first <= last) {
 760                 iopte_val(*iopte++) = ((((first - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
 761                                        (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
 762                 first += PAGE_SIZE;
 763         }
 764 }
 765 
 766 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 767 {
 768         pgd_t *pgdp;
 769         pmd_t *pmdp;
 770         pte_t *ptep;
 771         unsigned long end = start + size;
 772 
 773         while(start < end) {
 774                 pgdp = srmmu_pgd_offset(init_task.mm, start);
 775                 pmdp = srmmu_pmd_offset(pgdp, start);
 776                 ptep = srmmu_pte_offset(pmdp, start);
 777                 pte_val(*ptep) &= ~SRMMU_CACHE;
 778                 start += PAGE_SIZE;
 779         }
 780 }
 781 
 782 unsigned long iommu_init(int iommund, unsigned long memory_start,
     /* [previous][next][first][last][top][bottom][index][help] */
 783                          unsigned long memory_end, struct linux_sbus *sbus)
 784 {
 785         int impl, vers, ptsize;
 786         unsigned long tmp;
 787         struct iommu_struct *iommu;
 788         struct linux_prom_registers iommu_promregs[PROMREG_MAX];
 789 
 790         memory_start = LONG_ALIGN(memory_start);
 791         iommu = (struct iommu_struct *) memory_start;
 792         memory_start += sizeof(struct iommu_struct);
 793         prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
 794         iommu->regs = (struct iommu_regs *)
 795                 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
 796                                "IOMMU registers", iommu_promregs[0].which_io, 0x0);
 797         if(!iommu->regs)
 798                 panic("Cannot map IOMMU registers.");
 799         impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
 800         vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
 801         tmp = iommu->regs->control;
 802         tmp &= ~(IOMMU_CTRL_RNGE);
 803         tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
 804         iommu->regs->control = tmp;
 805         iommu_invalidate(iommu->regs);
 806         iommu->start = 0xfc000000;
 807         iommu->end = 0xffffffff;
 808 
 809         /* Allocate IOMMU page table */
 810         ptsize = iommu->end - iommu->start + 1;
 811         ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
 812 
 813         /* Stupid alignment constraints give me a headache. */
 814         memory_start = PAGE_ALIGN(memory_start);
 815         memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
 816         iommu->page_table = (iopte_t *) memory_start;
 817         memory_start += ptsize;
 818 
 819         /* Initialize new table. */
 820         memset(iommu->page_table, 0, ptsize);
 821         srmmu_map_dvma_pages_for_iommu(iommu);
 822         iommu->regs->base = (((unsigned long) iommu->page_table) - PAGE_OFFSET) >> 4;
 823         srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
 824         iommu_invalidate(iommu->regs);
 825         invalidate_all();
 826 
 827         sbus->iommu = iommu;
 828         printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
 829                impl, vers, iommu->page_table, ptsize);
 830         return memory_start;
 831 }
 832 
 833 
 834 static char *srmmu_get_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
 835 {
 836         struct iommu_struct *iommu = sbus->iommu;
 837         unsigned long page = (unsigned long) vaddr;
 838         unsigned long start, end, offset;
 839         iopte_t *iopte;
 840 
 841         if(len > PAGE_SIZE)
 842                 panic("Can only handle page sized iommu mappings.");
 843         offset = page & ~PAGE_MASK;
 844         page &= PAGE_MASK;
 845 
 846         start = iommu->start;
 847         end = KADB_DEBUGGER_BEGVM; /* Don't step on kadb/prom. */
 848         iopte = iommu->page_table;
 849         while(start < end) {
 850                 if(!(iopte_val(*iopte) & IOPTE_VALID))
 851                         break;
 852                 iopte++;
 853                 start += PAGE_SIZE;
 854         }
 855         if(start == KADB_DEBUGGER_BEGVM)
 856                 panic("Could not find free iommu entry in get_scsi_buffer.");
 857 
 858         vaddr = (char *) (start | offset);
 859         iopte_val(*iopte) = ((((page - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
 860                 (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
 861         iommu_invalidate(iommu->regs);
 862         invalidate_all();
 863 
 864         return vaddr;
 865 }
 866 
 867 static void srmmu_release_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
 868 {
 869         struct iommu_struct *iommu = sbus->iommu;
 870         unsigned long page = (unsigned long) vaddr;
 871         iopte_t *iopte;
 872 
 873         if(len > PAGE_SIZE)
 874                 panic("Can only handle page sized IOMMU mappings.");
 875         page &= PAGE_MASK;
 876         iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
 877         iopte_val(*iopte) = 0;
 878         iommu_invalidate(iommu->regs);
 879         invalidate_all();
 880 }
 881 
 882 /* On the SRMMU we do not have the problems with limited tlb entries
 883  * for mapping kernel pages, so we just take things from the free page
 884  * pool.  As a side effect we are putting a little too much pressure
 885  * on the gfp() subsystem and we don't catch stack overflow like we
 886  * did on the sun4c with virtual kstack mappings.  This setup also
 887  * makes the logic of the iommu mapping code a lot easier as we can
 888  * transparently handle mappings on the kernel stack without any
 889  * special code as we did need on the sun4c.
 890  */
 891 struct task_struct *srmmu_alloc_task_struct(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 892 {
 893         unsigned long page;
 894 
 895         page = get_free_page(GFP_KERNEL);
 896         if(!page)
 897                 return (struct task_struct *) 0;
 898         return (struct task_struct *) page;
 899 }
 900 
 901 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 902 {
 903         unsigned long pages;
 904 
 905         pages = __get_free_pages(GFP_KERNEL, 1, 0);
 906         if(!pages)
 907                 return 0;
 908         memset((void *) pages, 0, (PAGE_SIZE << 1));
 909         return pages;
 910 }
 911 
 912 static void srmmu_free_task_struct(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 913 {
 914         free_page((unsigned long) tsk);
 915 }
 916 
 917 static void srmmu_free_kernel_stack(unsigned long stack)
     /* [previous][next][first][last][top][bottom][index][help] */
 918 {
 919         free_pages(stack, 1);
 920 }
 921 
 922 static unsigned long mempool;
 923 
 924 /* Allocate a block of RAM which is aligned to its size.
 925  * This procedure can be used until the call to mem_init().
 926  */
 927 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned size)
     /* [previous][next][first][last][top][bottom][index][help] */
 928 {
 929         register unsigned mask = size - 1;
 930         register unsigned long ret;
 931 
 932         if(size==0) return 0x0;
 933         if(size & mask) {
 934                 prom_printf("panic: srmmu_init_alloc botch\n");
 935                 prom_halt();
 936         }
 937         ret = (*kbrk + mask) & ~mask;
 938         *kbrk = ret + size;
 939         memset((void*) ret, 0, size);
 940         return (void*) ret;
 941 }
 942 
 943 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 944 {
 945         pgd_t *pgdp;
 946         pmd_t *pmdp;
 947         pte_t *ptep;
 948 
 949         while(start < end) {
 950                 pgdp = srmmu_pgd_offset(init_task.mm, start);
 951                 if(srmmu_pgd_none(*pgdp)) {
 952                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
 953                         srmmu_pgd_set(pgdp, pmdp);
 954                 }
 955                 pmdp = srmmu_pmd_offset(pgdp, start);
 956                 if(srmmu_pmd_none(*pmdp)) {
 957                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
 958                         srmmu_pmd_set(pmdp, ptep);
 959                 }
 960                 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
 961         }
 962 }
 963 
 964 /* This is much cleaner than poking around physical address space
 965  * looking at the prom's page table directly which is what most
 966  * other OS's do.  Yuck... this is much better.
 967  */
 968 static inline void srmmu_inherit_prom_mappings(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 969 {
 970         pgd_t *pgdp;
 971         pmd_t *pmdp;
 972         pte_t *ptep;
 973         unsigned long start, end;
 974         unsigned long prompte;
 975 
 976         start = KADB_DEBUGGER_BEGVM;
 977         end = LINUX_OPPROM_ENDVM;
 978         while(start < end) {
 979                 /* Something going wrong here on some ss5's... */
 980                 prompte = srmmu_hwprobe(start);
 981 
 982                 if((prompte & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
 983                         pgdp = srmmu_pgd_offset(init_task.mm, start);
 984                         if(srmmu_pgd_none(*pgdp)) {
 985                                 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
 986                                 srmmu_pgd_set(pgdp, pmdp);
 987                         }
 988                         pmdp = srmmu_pmd_offset(pgdp, start);
 989                         if(srmmu_pmd_none(*pmdp)) {
 990                                 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
 991                                 srmmu_pmd_set(pmdp, ptep);
 992                         }
 993                         ptep = srmmu_pte_offset(pmdp, start);
 994                         pte_val(*ptep) = prompte;
 995                 }
 996                 start += PAGE_SIZE;
 997         }
 998 }
 999 
1000 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
     /* [previous][next][first][last][top][bottom][index][help] */
1001 {
1002         unsigned long start;
1003         pgprot_t dvma_prot;
1004         pgd_t *pgdp;
1005         pmd_t *pmdp;
1006         pte_t *ptep;
1007 
1008         start = DVMA_VADDR;
1009         dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1010         while(first <= last) {
1011                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1012                 pmdp = srmmu_pmd_offset(pgdp, start);
1013                 ptep = srmmu_pte_offset(pmdp, start);
1014 
1015                 /* Map with cacheable bit clear. */
1016                 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1017 
1018                 first += PAGE_SIZE;
1019                 start += PAGE_SIZE;
1020         }
1021 }
1022 
1023 static void srmmu_map_kernel(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1024 {
1025         pgd_t *pgdp;
1026         pmd_t *pmdp;
1027         pte_t *ptep;
1028 
1029         end = (PAGE_ALIGN(end) + PAGE_SIZE);
1030         while(start < end) {
1031                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1032                 if(srmmu_pgd_none(*pgdp)) {
1033                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1034                         srmmu_pgd_set(pgdp, pmdp);
1035                 }
1036                 pmdp = srmmu_pmd_offset(pgdp, start);
1037                 if(srmmu_pmd_none(*pmdp)) {
1038                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1039                         srmmu_pmd_set(pmdp, ptep);
1040                 }
1041                 ptep = srmmu_pte_offset(pmdp, start);
1042                 *ptep = srmmu_mk_pte(start, SRMMU_PAGE_KERNEL);
1043                 start += PAGE_SIZE;
1044         }
1045 }
1046 
1047 /* Paging initialization on the Sparc Reference MMU. */
1048 extern unsigned long free_area_init(unsigned long, unsigned long);
1049 extern unsigned long sparc_context_init(unsigned long, int);
1050 
1051 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1052 {
1053         int i, cpunode;
1054         char node_str[128];
1055 
1056         /* Find the number of contexts on the srmmu. */
1057         cpunode = prom_getchild(prom_root_node);
1058         num_contexts = 0;
1059         while((cpunode = prom_getsibling(cpunode)) != 0) {
1060                 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1061                 if(!strcmp(node_str, "cpu")) {
1062                         num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1063                         break;
1064                 }
1065         }
1066         if(!num_contexts) {
1067                 prom_printf("Something wrong, cant find cpu node in paging_init.\n");
1068                 prom_halt();
1069         }
1070                 
1071         prom_printf("Number of MMU contexts %d\n", num_contexts);
1072         mempool = start_mem;
1073         memset(swapper_pg_dir, 0, PAGE_SIZE);
1074         srmmu_map_kernel(KERNBASE, end_mem);
1075         srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1076         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1077         mempool = PAGE_ALIGN(mempool);
1078         first_dvma_page = mempool;
1079         last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1080         mempool = last_dvma_page + PAGE_SIZE;
1081         srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1082 
1083         srmmu_inherit_prom_mappings();
1084         srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1085         for(i = 0; i < num_contexts; i++)
1086                 srmmu_ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1087 
1088         prom_printf("Taking over MMU from PROM.\n");
1089         srmmu_flush_whole_tlb();
1090         srmmu_set_ctable_ptr(((unsigned)srmmu_context_table) - PAGE_OFFSET);
1091         srmmu_flush_whole_tlb();
1092 
1093         start_mem = PAGE_ALIGN(mempool);
1094         start_mem = sparc_context_init(start_mem, num_contexts);
1095         start_mem = free_area_init(start_mem, end_mem);
1096 
1097         prom_printf("survived...\n");
1098         return PAGE_ALIGN(start_mem);
1099 }
1100 
1101 /* Test the WP bit on the Sparc Reference MMU. */
1102 void srmmu_test_wp(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1103 {
1104         pgd_t *pgdp;
1105         
1106         wp_works_ok = -1;
1107         /* We mapped page zero as a read-only page in paging_init()
1108          * So fire up the test, then invalidate the pgd for page zero.
1109          * It is no longer needed.
1110          */
1111 
1112         /* Let it rip... */
1113         __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
1114         if (wp_works_ok < 0)
1115                 wp_works_ok = 0;
1116 
1117         pgdp = srmmu_pgd_offset(init_task.mm, 0x0);
1118         pgd_val(*pgdp) = 0x0;
1119 }
1120 
1121 static char *srmmu_mmu_info(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1122 {
1123         return "";
1124 }
1125 
1126 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
1127 {
1128 }
1129 
1130 static void srmmu_exit_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1131 {
1132         struct ctx_list *ctx_old;
1133         struct mm_struct *mm = current->mm;
1134 
1135         if(mm->context != NO_CONTEXT) {
1136                 srmmu_ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1137                 ctx_old = ctx_list_pool + mm->context;
1138                 remove_from_ctx_list(ctx_old);
1139                 add_to_free_ctxlist(ctx_old);
1140                 mm->context = NO_CONTEXT;
1141         }
1142 }
1143 
1144 static void
1145 srmmu_flush_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1146 {
1147         if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1148                 alloc_context(current->mm);
1149                 srmmu_ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1150                 srmmu_set_context(current->mm->context);
1151         }
1152 }
1153 
1154 /* Init various srmmu chip types. */
1155 void srmmu_is_bad(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1156 {
1157         prom_printf("Could not determine SRMMU chip type.\n");
1158         prom_halt();
1159 }
1160 
1161 void init_hypersparc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1162 {
1163         unsigned long mreg = srmmu_get_mmureg();
1164 
1165         prom_printf("HyperSparc MMU detected.\n");
1166         if(mreg & HYPERSPARC_CSIZE)
1167                 hyper_cache_size = (256 * 1024);
1168         else
1169                 hyper_cache_size = (128 * 1024);
1170 
1171         srmmu_modtype = HyperSparc;
1172         hwbug_bitmask |= HWBUG_VACFLUSH_BITROT;
1173 
1174         hyper_flush_whole_icache();
1175         hyper_flush_all_combined();
1176 
1177         /* Keep things sane for now, cache in write-through mode. */
1178         mreg &= ~(HYPERSPARC_CWENABLE | HYPERSPARC_CMODE | HYPERSPARC_WBENABLE);
1179         mreg |= HYPERSPARC_CENABLE;
1180         srmmu_set_mmureg(mreg);
1181         put_ross_icr(get_ross_icr() | 0x3);
1182         invalidate_all = hypersparc_invalidate_all;
1183         invalidate_mm = hypersparc_invalidate_mm;
1184         invalidate_page = hypersparc_invalidate_page;
1185         invalidate_range = hypersparc_invalidate_range;
1186 }
1187 
1188 void init_cypress_common(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1189 {
1190         unsigned long mreg = srmmu_get_mmureg();
1191 
1192         mreg &= ~CYPRESS_CMODE;
1193         mreg |= CYPRESS_CENABLE;
1194         srmmu_set_mmureg(mreg);
1195         invalidate_all = cypress_invalidate_all;
1196         invalidate_mm = cypress_invalidate_mm;
1197         invalidate_page = cypress_invalidate_page;
1198         invalidate_range = cypress_invalidate_range;
1199 }
1200 
1201 void init_cypress_604(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1202 {
1203         prom_printf("Cypress 604(UP) MMU detected.\n");
1204         srmmu_modtype = Cypress;
1205         init_cypress_common();
1206 }
1207 
1208 void init_cypress_605(unsigned long mrev)
     /* [previous][next][first][last][top][bottom][index][help] */
1209 {
1210         prom_printf("Cypress 605(MP) MMU detected.\n");
1211         if(mrev == 0xe) {
1212                 srmmu_modtype = Cypress_vE;
1213                 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1214         } else {
1215                 if(mrev == 0xd) {
1216                         srmmu_modtype = Cypress_vD;
1217                         hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1218                 } else {
1219                         srmmu_modtype = Cypress;
1220                 }
1221         }
1222         init_cypress_common();
1223 }
1224 
1225 #define SWIFT_REVISION_ADDR  0x10003000
1226 void init_swift(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1227 {
1228         unsigned long swift_rev, addr;
1229         unsigned long mreg = srmmu_get_mmureg();
1230 
1231         prom_printf("Swift MMU detected.\n");
1232         __asm__ __volatile__("lda [%1] %2, %0\n\t"
1233                              "srl %0, 0x18, %0\n\t" :
1234                              "=r" (swift_rev) :
1235                              "r" (SWIFT_REVISION_ADDR), "i" (0x20));
1236         switch(swift_rev) {
1237         case 0x11:
1238         case 0x20:
1239         case 0x23:
1240         case 0x30:
1241                 srmmu_modtype = Swift_lots_o_bugs;
1242                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1243                 /* Gee george, I wonder why Sun is so hush hush about
1244                  * this hardware bug... really braindamage stuff going
1245                  * on here.  However I think we can find a way to avoid
1246                  * all of the workaround overhead under Linux.  Basically,
1247                  * any page fault can cause kernel pages to become user
1248                  * accessible (the mmu gets confused and clears some of
1249                  * the ACC bits in kernel ptes).  Aha, sounds pretty
1250                  * horrible eh?  But wait, after extensive testing it appears
1251                  * that if you use pgd_t level large kernel pte's (like the
1252                  * 4MB pages on the Pentium) the bug does not get tripped
1253                  * at all.  This avoids almost all of the major overhead.
1254                  * Welcome to a world where your vendor tells you to,
1255                  * "apply this kernel patch" instead of "sorry for the
1256                  * broken hardware, send it back and we'll give you
1257                  * properly functioning parts"
1258                  */
1259                 break;
1260         case 0x25:
1261         case 0x31:
1262                 srmmu_modtype = Swift_bad_c;
1263                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1264                 /* You see Sun allude to this hardware bug but never
1265                  * admit things directly, they'll say things like,
1266                  * "the Swift chip cache problems" or similar.
1267                  */
1268                 break;
1269         default:
1270                 srmmu_modtype = Swift_ok;
1271                 break;
1272         };
1273         /* Clear any crap from the cache or else... */
1274         for(addr = 0; addr < (PAGE_SIZE * 4); addr += 16) {
1275                 swift_inv_insn_tag(addr); /* whiz- */
1276                 swift_inv_data_tag(addr); /* bang */
1277         }
1278         mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
1279 
1280         /* The Swift branch folding logic is completely broken.  At
1281          * trap time, if things are just right, if can mistakedly
1282          * thing that a trap is coming from kernel mode when in fact
1283          * it is coming from user mode (it misexecutes the branch in
1284          * the trap code).  So you see things like crashme completely
1285          * hosing your machine which is completely unacceptable.  Turn
1286          * this crap off... nice job Fujitsu.
1287          */
1288         mreg &= ~(SWIFT_BF);
1289         srmmu_set_mmureg(mreg);
1290 
1291         invalidate_all = swift_invalidate_all;
1292         invalidate_mm = swift_invalidate_mm;
1293         invalidate_page = swift_invalidate_page;
1294         invalidate_range = swift_invalidate_range;
1295 
1296         /* Are you now convinced that the Swift is one of the
1297          * biggest VLSI abortions of all time?  Bravo Fujitsu!
1298          */
1299 }
1300 
1301 void init_tsunami(unsigned long mreg)
     /* [previous][next][first][last][top][bottom][index][help] */
1302 {
1303         /* Tsunami's pretty sane, Sun and TI actually got it
1304          * somewhat right this time.  Fujitsu should have
1305          * taken some lessons from them.
1306          */
1307 
1308         prom_printf("Tsunami MMU detected.\n");
1309         srmmu_modtype = Tsunami;
1310         tsunami_invalidate_icache();
1311         tsunami_invalidate_dcache();
1312         mreg &= ~TSUNAMI_ITD;
1313         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1314         srmmu_set_mmureg(mreg);
1315         invalidate_all = tsunami_invalidate_all;
1316         invalidate_mm = tsunami_invalidate_mm;
1317         invalidate_page = tsunami_invalidate_page;
1318         invalidate_range = tsunami_invalidate_range;
1319 }
1320 
1321 void init_viking(unsigned long psr_vers, unsigned long mod_rev)
     /* [previous][next][first][last][top][bottom][index][help] */
1322 {
1323         unsigned long mreg = srmmu_get_mmureg();
1324 
1325         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1326 
1327         prom_printf("Viking MMU detected.\n");
1328         if(!psr_vers && ! mod_rev) {
1329                 srmmu_modtype = Viking_12;
1330                 hwbug_bitmask |= (HWBUG_MODIFIED_BITROT | HWBUG_PC_BADFAULT_ADDR);
1331 
1332                 /* On a fault, the chip gets entirely confused.  It will
1333                  * do one of two things.  Either it will set the modified
1334                  * bit for a read-only page (!!!) or it will improperly
1335                  * report a fault when a dcti/loadstore sequence is the
1336                  * last two instructions on a page.  Oh baby...
1337                  */
1338         } else {
1339                 if(psr_vers) {
1340                         srmmu_modtype = Viking_2x;
1341                         hwbug_bitmask |= HWBUG_PC_BADFAULT_ADDR; /* see above */
1342                 } else {
1343                         if(mod_rev == 1) {
1344                                 srmmu_modtype = Viking_30;
1345                                 hwbug_bitmask |= HWBUG_PACINIT_BITROT;
1346 
1347                                 /* At boot time the physical cache
1348                                  * has cherry bombs in it, so you
1349                                  * have to scrape it by hand before
1350                                  * enabling it.  Nice CAD tools guys.
1351                                  */
1352                         } else {
1353                                 if(mod_rev < 8)
1354                                         srmmu_modtype = Viking_35;
1355                                 else
1356                                         srmmu_modtype = Viking_new;
1357                         }
1358                 }
1359         }
1360         /* XXX Dave, play with the MXCC you pinhead XXX */
1361         viking_flush_icache();
1362         viking_flush_dcache();
1363         mreg |= (VIKING_DCENABLE | VIKING_ICENABLE | VIKING_SBENABLE |
1364                  VIKING_TCENABLE | VIKING_DPENABLE);
1365         srmmu_set_mmureg(mreg);
1366         invalidate_all = viking_invalidate_all;
1367         invalidate_mm = viking_invalidate_mm;
1368         invalidate_page = viking_invalidate_page;
1369         invalidate_range = viking_invalidate_range;
1370 }
1371 
1372 /* Probe for the srmmu chip version. */
1373 static void get_srmmu_type(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1374 {
1375         unsigned long mreg, psr;
1376         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1377 
1378         srmmu_modtype = SRMMU_INVAL_MOD;
1379         hwbug_bitmask = 0;
1380 
1381         mreg = srmmu_get_mmureg(); psr = get_psr();
1382         mod_typ = (mreg & 0xf0000000) >> 28;
1383         mod_rev = (mreg & 0x0f000000) >> 24;
1384         psr_typ = (psr >> 28) & 0xf;
1385         psr_vers = (psr >> 24) & 0xf;
1386 
1387         /* First, check for HyperSparc or Cypress. */
1388         if(mod_typ == 1) {
1389                 switch(mod_rev) {
1390                 case 7:
1391                         /* UP or MP Hypersparc */
1392                         init_hypersparc();
1393                         break;
1394                 case 0:
1395                         /* Uniprocessor Cypress */
1396                         init_cypress_604();
1397                         break;
1398                 case 13:
1399                 case 14:
1400                 case 15:
1401                         /* MP Cypress mmu/cache-controller */
1402                         init_cypress_605(mod_rev);
1403                         break;
1404                 default:
1405                         srmmu_is_bad();
1406                         break;
1407                 };
1408                 return;
1409         }
1410 
1411         /* Next check for Fujitsu Swift. */
1412         if(psr_typ == 0 && psr_vers == 4) {
1413                 init_swift();
1414                 return;
1415         }
1416 
1417         /* Now the Viking family of srmmu. */
1418         if(psr_typ == 4 &&
1419            ((psr_vers == 0) ||
1420             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1421                 init_viking(psr_vers, mod_rev);
1422                 return;
1423         }
1424 
1425         /* Finally the Tsunami. */
1426         if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1427                 init_tsunami(mreg);
1428                 return;
1429         }
1430 
1431         /* Oh well */
1432         srmmu_is_bad();
1433 }
1434 
1435 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
1436         tsetup_mmu_patchme, rtrap_mmu_patchme;
1437 
1438 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
1439         tsetup_srmmu_stackchk, srmmu_rett_stackchk;
1440 
1441 extern unsigned long srmmu_fault;
1442 
1443 #define PATCH_BRANCH(insn, dest) do { \
1444                 iaddr = &(insn); \
1445                 daddr = &(dest); \
1446                 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
1447         } while(0);
1448 
1449 static void patch_window_trap_handlers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1450 {
1451         unsigned long *iaddr, *daddr;
1452         
1453         PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
1454         PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
1455         PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
1456         PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
1457         PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
1458         PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
1459 }
1460 
1461 /* Load up routines and constants for sun4m mmu */
1462 void ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1463 {
1464         prom_printf("Loading srmmu MMU routines\n");
1465 
1466         /* First the constants */
1467         pmd_shift = SRMMU_PMD_SHIFT;
1468         pmd_size = SRMMU_PMD_SIZE;
1469         pmd_mask = SRMMU_PMD_MASK;
1470         pgdir_shift = SRMMU_PGDIR_SHIFT;
1471         pgdir_size = SRMMU_PGDIR_SIZE;
1472         pgdir_mask = SRMMU_PGDIR_MASK;
1473 
1474         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
1475         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
1476         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
1477 
1478         page_none = SRMMU_PAGE_NONE;
1479         page_shared = SRMMU_PAGE_SHARED;
1480         page_copy = SRMMU_PAGE_COPY;
1481         page_readonly = SRMMU_PAGE_RDONLY;
1482         page_kernel = SRMMU_PAGE_KERNEL;
1483         pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
1484             
1485         /* Functions */
1486         set_pte = srmmu_set_pte;
1487         switch_to_context = srmmu_switch_to_context;
1488         pmd_align = srmmu_pmd_align;
1489         pgdir_align = srmmu_pgdir_align;
1490         vmalloc_start = srmmu_vmalloc_start;
1491 
1492         pte_page = srmmu_pte_page;
1493         pmd_page = srmmu_pmd_page;
1494         pgd_page = srmmu_pgd_page;
1495 
1496         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
1497 
1498         pte_none = srmmu_pte_none;
1499         pte_present = srmmu_pte_present;
1500         pte_inuse = srmmu_pte_inuse;
1501         pte_clear = srmmu_pte_clear;
1502         pte_reuse = srmmu_pte_reuse;
1503 
1504         pmd_none = srmmu_pmd_none;
1505         pmd_bad = srmmu_pmd_bad;
1506         pmd_present = srmmu_pmd_present;
1507         pmd_inuse = srmmu_pmd_inuse;
1508         pmd_clear = srmmu_pmd_clear;
1509         pmd_reuse = srmmu_pmd_reuse;
1510 
1511         pgd_none = srmmu_pgd_none;
1512         pgd_bad = srmmu_pgd_bad;
1513         pgd_present = srmmu_pgd_present;
1514         pgd_inuse = srmmu_pgd_inuse;
1515         pgd_clear = srmmu_pgd_clear;
1516         pgd_reuse = srmmu_pgd_reuse;
1517 
1518         mk_pte = srmmu_mk_pte;
1519         pgd_set = srmmu_pgd_set;
1520         mk_pte_io = srmmu_mk_pte_io;
1521         pte_modify = srmmu_pte_modify;
1522         pgd_offset = srmmu_pgd_offset;
1523         pmd_offset = srmmu_pmd_offset;
1524         pte_offset = srmmu_pte_offset;
1525         pte_free_kernel = srmmu_pte_free_kernel;
1526         pmd_free_kernel = srmmu_pmd_free_kernel;
1527         pte_alloc_kernel = srmmu_pte_alloc_kernel;
1528         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
1529         pte_free = srmmu_pte_free;
1530         pte_alloc = srmmu_pte_alloc;
1531         pmd_free = srmmu_pmd_free;
1532         pmd_alloc = srmmu_pmd_alloc;
1533         pgd_free = srmmu_pgd_free;
1534         pgd_alloc = srmmu_pgd_alloc;
1535 
1536         pte_write = srmmu_pte_write;
1537         pte_dirty = srmmu_pte_dirty;
1538         pte_young = srmmu_pte_young;
1539         pte_wrprotect = srmmu_pte_wrprotect;
1540         pte_mkclean = srmmu_pte_mkclean;
1541         pte_mkold = srmmu_pte_mkold;
1542         pte_mkwrite = srmmu_pte_mkwrite;
1543         pte_mkdirty = srmmu_pte_mkdirty;
1544         pte_mkyoung = srmmu_pte_mkyoung;
1545         update_mmu_cache = srmmu_update_mmu_cache;
1546         mmu_exit_hook = srmmu_exit_hook;
1547         mmu_flush_hook = srmmu_flush_hook;
1548         mmu_lockarea = srmmu_lockarea;
1549         mmu_unlockarea = srmmu_unlockarea;
1550         mmu_get_scsi_buffer = srmmu_get_scsi_buffer;
1551         mmu_release_scsi_buffer = srmmu_release_scsi_buffer;
1552         mmu_info = srmmu_mmu_info;
1553 
1554         /* Task struct and kernel stack allocating/freeing. */
1555         alloc_kernel_stack = srmmu_alloc_kernel_stack;
1556         alloc_task_struct = srmmu_alloc_task_struct;
1557         free_kernel_stack = srmmu_free_kernel_stack;
1558         free_task_struct = srmmu_free_task_struct;
1559 
1560         quick_kernel_fault = srmmu_quick_kernel_fault;
1561 
1562         get_srmmu_type();
1563         if(!(srmmu_get_mmureg() & 0x800)) {
1564                 srmmu_read_physical = msparc_read_physical;
1565                 srmmu_write_physical = msparc_write_physical;
1566         } else {
1567                 srmmu_read_physical = gensrmmu_read_physical;
1568                 srmmu_write_physical = gensrmmu_write_physical;
1569         }
1570         patch_window_trap_handlers();
1571 }

/* [previous][next][first][last][top][bottom][index][help] */