root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_v2p
  2. srmmu_p2v
  3. srmmu_swap
  4. srmmu_pmd_align
  5. srmmu_pgdir_align
  6. srmmu_vmalloc_start
  7. srmmu_pgd_page
  8. srmmu_pmd_page
  9. srmmu_pte_page
  10. srmmu_pte_none
  11. srmmu_pte_present
  12. srmmu_pte_clear
  13. srmmu_pmd_none
  14. srmmu_pmd_bad
  15. srmmu_pmd_present
  16. srmmu_pmd_clear
  17. srmmu_pgd_none
  18. srmmu_pgd_bad
  19. srmmu_pgd_present
  20. srmmu_pgd_clear
  21. srmmu_pte_write
  22. srmmu_pte_dirty
  23. srmmu_pte_young
  24. srmmu_pte_wrprotect
  25. srmmu_pte_mkclean
  26. srmmu_pte_mkold
  27. srmmu_pte_mkwrite
  28. srmmu_pte_mkdirty
  29. srmmu_pte_mkyoung
  30. srmmu_mk_pte
  31. srmmu_mk_pte_io
  32. srmmu_ctxd_set
  33. srmmu_pgd_set
  34. srmmu_pmd_set
  35. srmmu_pte_modify
  36. srmmu_pgd_offset
  37. srmmu_pmd_offset
  38. srmmu_pte_offset
  39. srmmu_update_rootmmu_dir
  40. srmmu_uncache_page
  41. srmmu_recache_page
  42. srmmu_getpage
  43. srmmu_putpage
  44. srmmu_pte_free_kernel
  45. srmmu_pte_alloc_kernel
  46. srmmu_pmd_free_kernel
  47. srmmu_pmd_alloc_kernel
  48. srmmu_pte_free
  49. srmmu_pte_alloc
  50. srmmu_pmd_free
  51. srmmu_pmd_alloc
  52. srmmu_pgd_free
  53. srmmu_pgd_alloc
  54. srmmu_set_pte
  55. srmmu_quick_kernel_fault
  56. alloc_context
  57. srmmu_switch_to_context
  58. srmmu_mapioaddr
  59. srmmu_lockarea
  60. srmmu_unlockarea
  61. srmmu_alloc_task_struct
  62. srmmu_alloc_kernel_stack
  63. srmmu_free_task_struct
  64. srmmu_free_kernel_stack
  65. tsunami_flush_cache_all
  66. tsunami_flush_cache_mm
  67. tsunami_flush_cache_range
  68. tsunami_flush_cache_page
  69. tsunami_flush_cache_page_to_uncache
  70. tsunami_flush_page_to_ram
  71. tsunami_flush_page_for_dma
  72. tsunami_flush_tlb_all
  73. tsunami_flush_tlb_mm
  74. tsunami_flush_tlb_range
  75. tsunami_flush_tlb_page
  76. tsunami_flush_tlb_page_for_cbit
  77. swift_flush_cache_all
  78. swift_flush_cache_mm
  79. swift_flush_cache_range
  80. swift_flush_cache_page
  81. swift_flush_page_to_ram
  82. swift_flush_page_for_dma
  83. swift_flush_cache_page_to_uncache
  84. swift_flush_tlb_all
  85. swift_flush_tlb_mm
  86. swift_flush_tlb_range
  87. swift_flush_tlb_page
  88. swift_flush_tlb_page_for_cbit
  89. viking_flush_cache_all
  90. viking_flush_cache_mm
  91. viking_flush_cache_range
  92. viking_flush_cache_page
  93. viking_flush_page_to_ram
  94. viking_flush_page_for_dma
  95. viking_mxcc_flush_page
  96. viking_no_mxcc_flush_page
  97. viking_flush_tlb_all
  98. viking_flush_tlb_mm
  99. viking_flush_tlb_range
  100. viking_flush_tlb_page
  101. viking_flush_tlb_page_for_cbit
  102. cypress_flush_tlb_all
  103. cypress_flush_tlb_mm
  104. cypress_flush_tlb_range
  105. cypress_flush_tlb_page
  106. hypersparc_flush_cache_all
  107. hypersparc_flush_cache_mm
  108. hypersparc_flush_cache_range
  109. hypersparc_flush_cache_page
  110. hypersparc_flush_page_to_ram
  111. hypersparc_flush_page_for_dma
  112. hypersparc_flush_cache_page_to_uncache
  113. hypersparc_flush_tlb_all
  114. hypersparc_flush_tlb_mm
  115. hypersparc_flush_tlb_range
  116. hypersparc_flush_tlb_page
  117. hypersparc_flush_tlb_page_for_cbit
  118. hypersparc_ctxd_set
  119. hypersparc_update_rootmmu_dir
  120. hypersparc_set_pte
  121. hypersparc_switch_to_context
  122. srmmu_map_dvma_pages_for_iommu
  123. srmmu_uncache_iommu_page_table
  124. iommu_init
  125. srmmu_get_scsi_one
  126. srmmu_get_scsi_sgl
  127. srmmu_release_scsi_one
  128. srmmu_release_scsi_sgl
  129. srmmu_early_paddr
  130. srmmu_early_pgd_set
  131. srmmu_early_pmd_set
  132. srmmu_early_pgd_page
  133. srmmu_early_pmd_page
  134. srmmu_early_pmd_offset
  135. srmmu_early_pte_offset
  136. srmmu_init_alloc
  137. srmmu_allocate_ptable_skeleton
  138. srmmu_inherit_prom_mappings
  139. srmmu_map_dvma_pages_for_cpu
  140. srmmu_map_kernel
  141. srmmu_paging_init
  142. srmmu_mmu_info
  143. srmmu_update_mmu_cache
  144. srmmu_exit_hook
  145. srmmu_flush_hook
  146. hypersparc_exit_hook
  147. hypersparc_flush_hook
  148. srmmu_is_bad
  149. poke_hypersparc
  150. init_hypersparc
  151. poke_cypress
  152. init_cypress_common
  153. init_cypress_604
  154. init_cypress_605
  155. poke_swift
  156. init_swift
  157. poke_tsunami
  158. init_tsunami
  159. poke_viking
  160. init_viking
  161. get_srmmu_type
  162. patch_window_trap_handlers
  163. smp_flush_page_for_dma
  164. smp_flush_cache_page_to_uncache
  165. smp_flush_tlb_page_for_cbit
  166. ld_mmu_srmmu

   1 /* $Id: srmmu.c,v 1.59 1996/04/21 10:32:21 davem Exp $
   2  * srmmu.c:  SRMMU specific routines for memory management.
   3  *
   4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
   6  * Copyright (C) 1996 Eddie C. Dost    (ecd@pool.informatik.rwth-aachen.de)
   7  */
   8 
   9 #include <linux/config.h>
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 
  13 #include <asm/page.h>
  14 #include <asm/pgtable.h>
  15 #include <asm/io.h>
  16 #include <asm/kdebug.h>
  17 #include <asm/vaddrs.h>
  18 #include <asm/traps.h>
  19 #include <asm/smp.h>
  20 #include <asm/mbus.h>
  21 #include <asm/cache.h>
  22 #include <asm/oplib.h>
  23 #include <asm/sbus.h>
  24 #include <asm/iommu.h>
  25 #include <asm/asi.h>
  26 #include <asm/msi.h>
  27 
  28 /* Now the cpu specific definitions. */
  29 #include <asm/viking.h>
  30 #include <asm/mxcc.h>
  31 #include <asm/ross.h>
  32 #include <asm/tsunami.h>
  33 #include <asm/swift.h>
  34 
  35 enum mbus_module srmmu_modtype;
  36 unsigned int hwbug_bitmask;
  37 int hyper_cache_size;
  38 int hyper_line_size;
  39 
  40 #ifdef __SMP__
  41 extern void smp_capture(void);
  42 extern void smp_release(void);
  43 #else
  44 #define smp_capture()
  45 #define smp_release()
  46 #endif /* !(__SMP__) */
  47 
  48 static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
  49 static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
  50 
  51 static void (*flush_page_for_dma)(unsigned long page);
  52 static void (*flush_cache_page_to_uncache)(unsigned long page);
  53 static void (*flush_tlb_page_for_cbit)(unsigned long page);
  54 #ifdef __SMP__
  55 static void (*local_flush_page_for_dma)(unsigned long page);
  56 static void (*local_flush_cache_page_to_uncache)(unsigned long page);
  57 static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
  58 #endif
  59 
  60 static struct srmmu_stats {
  61         int invall;
  62         int invpg;
  63         int invrnge;
  64         int invmm;
  65 } module_stats;
  66 
  67 static char *srmmu_name;
  68 
  69 ctxd_t *srmmu_ctx_table_phys;
  70 ctxd_t *srmmu_context_table;
  71 
  72 static struct srmmu_trans {
  73         unsigned long vbase;
  74         unsigned long pbase;
  75         int size;
  76 } srmmu_map[SPARC_PHYS_BANKS];
  77 
  78 static int can_cache_ptables = 0;
  79 static int viking_mxcc_present = 0;
  80 
  81 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
  82  * the SS10/20 class machines and with the latest openprom revisions.
  83  * So we have to crunch the free page pool.
  84  */
  85 static inline unsigned long srmmu_v2p(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87         int i;
  88 
  89         for(i=0; srmmu_map[i].size != 0; i++) {
  90                 if(srmmu_map[i].vbase <= vaddr &&
  91                    (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
  92                         return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
  93         }
  94         return 0xffffffffUL;
  95 }
  96 
  97 static inline unsigned long srmmu_p2v(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  98 {
  99         int i;
 100 
 101         for(i=0; srmmu_map[i].size != 0; i++) {
 102                 if(srmmu_map[i].pbase <= paddr &&
 103                    (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
 104                         return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
 105         }
 106         return 0xffffffffUL;
 107 }
 108 
 109 /* In general all page table modifications should use the V8 atomic
 110  * swap instruction.  This insures the mmu and the cpu are in sync
 111  * with respect to ref/mod bits in the page tables.
 112  */
 113 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115 #if CONFIG_AP1000
 116   /* the AP1000 has its memory on bus 8, not 0 like suns do */
 117   if (!(value&0xf0000000))
 118     value |= 0x80000000;
 119   if (value == 0x80000000) value = 0;
 120 #endif
 121         __asm__ __volatile__("swap [%2], %0\n\t" :
 122                              "=&r" (value) :
 123                              "0" (value), "r" (addr));
 124         return value;
 125 }
 126 
 127 /* Functions really use this, not srmmu_swap directly. */
 128 #define srmmu_set_entry(ptr, newentry) \
 129         srmmu_swap((unsigned long *) (ptr), (newentry))
 130 
 131 
 132 /* The very generic SRMMU page table operations. */
 133 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 134 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 135 
 136 static unsigned long srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         return SRMMU_VMALLOC_START;
 139 }
 140 
 141 static unsigned long srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 142 { return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
 143 
 144 static unsigned long srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 { return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
 146 
 147 static unsigned long srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 { return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
 149 
 150 static int srmmu_pte_none(pte_t pte)          { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 151 static int srmmu_pte_present(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
 153 
 154 static void srmmu_pte_clear(pte_t *ptep)      { set_pte(ptep, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 155 
 156 static int srmmu_pmd_none(pmd_t pmd)          { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 157 static int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 159 
 160 static int srmmu_pmd_present(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 162 
 163 static void srmmu_pmd_clear(pmd_t *pmdp)      { set_pte((pte_t *)pmdp, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 164 
 165 static int srmmu_pgd_none(pgd_t pgd)          { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 166 static int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 167 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 168 
 169 static int srmmu_pgd_present(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 171 
 172 static void srmmu_pgd_clear(pgd_t * pgdp)     { set_pte((pte_t *)pgdp, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 173 
 174 static int srmmu_pte_write(pte_t pte)         { return pte_val(pte) & SRMMU_WRITE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 175 static int srmmu_pte_dirty(pte_t pte)         { return pte_val(pte) & SRMMU_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 176 static int srmmu_pte_young(pte_t pte)         { return pte_val(pte) & SRMMU_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 177 
 178 static pte_t srmmu_pte_wrprotect(pte_t pte)   { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
     /* [previous][next][first][last][top][bottom][index][help] */
 179 static pte_t srmmu_pte_mkclean(pte_t pte)     { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 180 static pte_t srmmu_pte_mkold(pte_t pte)       { pte_val(pte) &= ~SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 181 static pte_t srmmu_pte_mkwrite(pte_t pte)     { pte_val(pte) |= SRMMU_WRITE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 182 static pte_t srmmu_pte_mkdirty(pte_t pte)     { pte_val(pte) |= SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 183 static pte_t srmmu_pte_mkyoung(pte_t pte)     { pte_val(pte) |= SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 184 
 185 /*
 186  * Conversion functions: convert a page and protection to a page entry,
 187  * and a page entry and page directory to the page they refer to.
 188  */
 189 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 { pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
 191 
 192 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
     /* [previous][next][first][last][top][bottom][index][help] */
 193 {
 194         pte_t pte;
 195         pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
 196         return pte;
 197 }
 198 
 199 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 { 
 201         srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
 202 }
 203 
 204 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
 207 }
 208 
 209 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
 212 }
 213 
 214 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
 216 
 217 /* to find an entry in a top-level page table... */
 218 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220         return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 221 }
 222 
 223 /* Find an entry in the second-level page table.. */
 224 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 {
 226         return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 227 }
 228 
 229 /* Find an entry in the third-level page table.. */ 
 230 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 233 }
 234 
 235 /* This must update the context table entry for this process. */
 236 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) 
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         if(tsk->mm->context != NO_CONTEXT)
 239                 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
 240 }
 241 
 242 static inline void srmmu_uncache_page(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 243 {
 244         pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
 245         pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
 246         pte_t *ptep = srmmu_pte_offset(pmdp, addr);
 247 
 248         flush_cache_page_to_uncache(addr);
 249         set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
 250         flush_tlb_page_for_cbit(addr);
 251 }
 252 
 253 static inline void srmmu_recache_page(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
 256         pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
 257         pte_t *ptep = srmmu_pte_offset(pmdp, addr);
 258 
 259         set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
 260         flush_tlb_page_for_cbit(addr);
 261 }
 262 
 263 static inline unsigned long srmmu_getpage(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 264 {
 265         unsigned long page = get_free_page(GFP_KERNEL);
 266 
 267         if (can_cache_ptables)
 268                 return page;
 269 
 270         if(page)
 271                 srmmu_uncache_page(page);
 272         return page;
 273 }
 274 
 275 static inline void srmmu_putpage(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277         if (!can_cache_ptables)
 278                 srmmu_recache_page(page);
 279         free_page(page);
 280 }
 281 
 282 /* The easy versions. */
 283 #define NEW_PGD() (pgd_t *) srmmu_getpage()
 284 #define NEW_PMD() (pmd_t *) srmmu_getpage()
 285 #define NEW_PTE() (pte_t *) srmmu_getpage()
 286 #define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
 287 #define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
 288 #define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
 289 
 290 /*
 291  * Allocate and free page tables. The xxx_kernel() versions are
 292  * used to allocate a kernel page table - this turns on ASN bits
 293  * if any, and marks the page tables reserved.
 294  */
 295 static void srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 296 {
 297         FREE_PTE(pte);
 298 }
 299 
 300 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 303         if(srmmu_pmd_none(*pmd)) {
 304                 pte_t *page = NEW_PTE();
 305                 if(srmmu_pmd_none(*pmd)) {
 306                         if(page) {
 307                                 pmd_set(pmd, page);
 308                                 return page + address;
 309                         }
 310                         pmd_set(pmd, BAD_PAGETABLE);
 311                         return NULL;
 312                 }
 313                 FREE_PTE(page);
 314         }
 315         if(srmmu_pmd_bad(*pmd)) {
 316                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 317                 pmd_set(pmd, BAD_PAGETABLE);
 318                 return NULL;
 319         }
 320         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 321 }
 322 
 323 static void srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         FREE_PMD(pmd);
 326 }
 327 
 328 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 331         if(srmmu_pgd_none(*pgd)) {
 332                 pmd_t *page = NEW_PMD();
 333                 if(srmmu_pgd_none(*pgd)) {
 334                         if(page) {
 335                                 pgd_set(pgd, page);
 336                                 return page + address;
 337                         }
 338                         pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 339                         return NULL;
 340                 }
 341                 FREE_PMD(page);
 342         }
 343         if(srmmu_pgd_bad(*pgd)) {
 344                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 345                 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 346                 return NULL;
 347         }
 348         return (pmd_t *) pgd_page(*pgd) + address;
 349 }
 350 
 351 static void srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353         FREE_PTE(pte);
 354 }
 355 
 356 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 359         if(srmmu_pmd_none(*pmd)) {
 360                 pte_t *page = NEW_PTE();
 361                 if(srmmu_pmd_none(*pmd)) {
 362                         if(page) {
 363                                 pmd_set(pmd, page);
 364                                 return page + address;
 365                         }
 366                         pmd_set(pmd, BAD_PAGETABLE);
 367                         return NULL;
 368                 }
 369                 FREE_PTE(page);
 370         }
 371         if(srmmu_pmd_bad(*pmd)) {
 372                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 373                 pmd_set(pmd, BAD_PAGETABLE);
 374                 return NULL;
 375         }
 376         return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
 377 }
 378 
 379 /* Real three-level page tables on SRMMU. */
 380 static void srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382         FREE_PMD(pmd);
 383 }
 384 
 385 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 386 {
 387         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 388         if(srmmu_pgd_none(*pgd)) {
 389                 pmd_t *page = NEW_PMD();
 390                 if(srmmu_pgd_none(*pgd)) {
 391                         if(page) {
 392                                 pgd_set(pgd, page);
 393                                 return page + address;
 394                         }
 395                         pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 396                         return NULL;
 397                 }
 398                 FREE_PMD(page);
 399         }
 400         if(srmmu_pgd_bad(*pgd)) {
 401                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 402                 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 403                 return NULL;
 404         }
 405         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 406 }
 407 
 408 static void srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410         FREE_PGD(pgd);
 411 }
 412 
 413 static pgd_t *srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 414 {
 415         return NEW_PGD();
 416 }
 417 
 418 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
 419 {
 420         srmmu_set_entry(ptep, pte_val(pteval));
 421 }
 422 
 423 static void srmmu_quick_kernel_fault(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 424 {
 425         printk("Penguin faults at address %08lx\n", address);
 426         panic("Srmmu bolixed...");
 427 }
 428 
 429 static inline void alloc_context(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 430 {
 431         struct ctx_list *ctxp;
 432 
 433         ctxp = ctx_free.next;
 434         if(ctxp != &ctx_free) {
 435                 remove_from_ctx_list(ctxp);
 436                 add_to_used_ctxlist(ctxp);
 437                 mm->context = ctxp->ctx_number;
 438                 ctxp->ctx_mm = mm;
 439                 return;
 440         }
 441         ctxp = ctx_used.next;
 442         if(ctxp->ctx_mm == current->mm)
 443                 ctxp = ctxp->next;
 444         if(ctxp == &ctx_used)
 445                 panic("out of mmu contexts");
 446         flush_cache_mm(ctxp->ctx_mm);
 447         flush_tlb_mm(ctxp->ctx_mm);
 448         remove_from_ctx_list(ctxp);
 449         add_to_used_ctxlist(ctxp);
 450         ctxp->ctx_mm->context = NO_CONTEXT;
 451         ctxp->ctx_mm = mm;
 452         mm->context = ctxp->ctx_number;
 453 }
 454 
 455 static void srmmu_switch_to_context(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 456 {
 457         /* Kernel threads can execute in any context and so can tasks
 458          * sleeping in the middle of exiting. If this task has already
 459          * been allocated a piece of the mmu realestate, just jump to
 460          * it.
 461          */
 462         if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
 463            (tsk->flags & PF_EXITING))
 464                 return;
 465         if(tsk->mm->context == NO_CONTEXT) {
 466                 alloc_context(tsk->mm);
 467                 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
 468         }
 469         srmmu_set_context(tsk->mm->context);
 470 }
 471 
 472 /* Low level IO area allocation on the SRMMU. */
 473 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
     /* [previous][next][first][last][top][bottom][index][help] */
 474 {
 475         pgd_t *pgdp;
 476         pmd_t *pmdp;
 477         pte_t *ptep;
 478         unsigned long tmp;
 479 
 480         physaddr &= PAGE_MASK;
 481         pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
 482         pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 483         ptep = srmmu_pte_offset(pmdp, virt_addr);
 484         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 485 
 486         /* I need to test whether this is consistant over all
 487          * sun4m's.  The bus_type represents the upper 4 bits of
 488          * 36-bit physical address on the I/O space lines...
 489          */
 490         tmp |= (bus_type << 28);
 491         if(rdonly)
 492                 tmp |= SRMMU_PRIV_RDONLY;
 493         else
 494                 tmp |= SRMMU_PRIV;
 495         flush_page_to_ram(virt_addr);
 496         srmmu_set_entry(ptep, tmp);
 497         flush_tlb_all();
 498 }
 499 
 500 static char *srmmu_lockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 501 {
 502         return vaddr;
 503 }
 504 
 505 static void srmmu_unlockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 506 {
 507 }
 508 
 509 /* On the SRMMU we do not have the problems with limited tlb entries
 510  * for mapping kernel pages, so we just take things from the free page
 511  * pool.  As a side effect we are putting a little too much pressure
 512  * on the gfp() subsystem.  This setup also makes the logic of the
 513  * iommu mapping code a lot easier as we can transparently handle
 514  * mappings on the kernel stack without any special code as we did
 515  * need on the sun4c.
 516  */
 517 struct task_struct *srmmu_alloc_task_struct(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 518 {
 519         unsigned long page;
 520 
 521         page = get_free_page(GFP_KERNEL);
 522         if(!page)
 523                 return (struct task_struct *) 0;
 524         return (struct task_struct *) page;
 525 }
 526 
 527 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 528 {
 529         unsigned long pages;
 530 
 531         pages = __get_free_pages(GFP_KERNEL, 2, 0);
 532         if(!pages)
 533                 return 0;
 534         memset((void *) pages, 0, (PAGE_SIZE << 2));
 535         return pages;
 536 }
 537 
 538 static void srmmu_free_task_struct(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 539 {
 540         free_page((unsigned long) tsk);
 541 }
 542 
 543 static void srmmu_free_kernel_stack(unsigned long stack)
     /* [previous][next][first][last][top][bottom][index][help] */
 544 {
 545         free_pages(stack, 2);
 546 }
 547 
 548 /* Tsunami flushes.  It's page level tlb invalidation is not very
 549  * useful at all, you must be in the context that page exists in to
 550  * get a match.
 551  */
 552 static void tsunami_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         flush_user_windows();
 555         tsunami_flush_icache();
 556         tsunami_flush_dcache();
 557 }
 558 
 559 static void tsunami_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 560 {
 561 #ifndef __SMP__
 562         if(mm->context != NO_CONTEXT) {
 563 #endif
 564                 flush_user_windows();
 565                 tsunami_flush_icache();
 566                 tsunami_flush_dcache();
 567 #ifndef __SMP__
 568         }
 569 #endif
 570 }
 571 
 572 static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 573 {
 574 #ifndef __SMP__
 575         if(mm->context != NO_CONTEXT) {
 576 #endif
 577                 flush_user_windows();
 578                 tsunami_flush_icache();
 579                 tsunami_flush_dcache();
 580 #ifndef __SMP__
 581         }
 582 #endif
 583 }
 584 
 585 static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 586 {
 587 #ifndef __SMP__
 588         struct mm_struct *mm = vma->vm_mm;
 589         if(mm->context != NO_CONTEXT) {
 590 #endif
 591                 flush_user_windows();
 592                 tsunami_flush_icache();
 593                 tsunami_flush_dcache();
 594 #ifndef __SMP__
 595         }
 596 #endif
 597 }
 598 
 599 static void tsunami_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 600 {
 601         tsunami_flush_dcache();
 602 }
 603 
 604 /* Tsunami does not have a Copy-back style virtual cache. */
 605 static void tsunami_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 606 {
 607 }
 608 
 609 /* However, Tsunami is not IO coherent. */
 610 static void tsunami_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 611 {
 612         tsunami_flush_dcache();
 613 }
 614 
 615 /* TLB flushes seem to upset the tsunami sometimes, I can't figure out
 616  * what the hell is going on.  All I see is a tlb flush (page or whole,
 617  * there is no consistant pattern) and then total local variable corruption
 618  * in the procedure who called us after return.  Usually triggerable
 619  * by "cool" programs like crashme and bonnie.  I played around a bit
 620  * and adding a bunch of forced nops seems to make the problems all
 621  * go away. (missed instruction fetches possibly? ugh...)
 622  */
 623 #define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
 624                            nop(); nop(); nop(); nop(); nop(); } while(0)
 625 
 626 static void tsunami_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628         module_stats.invall++;
 629         srmmu_flush_whole_tlb();
 630         TSUNAMI_SUCKS;
 631 }
 632 
 633 static void tsunami_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 634 {
 635         module_stats.invmm++;
 636 #ifndef __SMP__
 637         if(mm->context != NO_CONTEXT) {
 638 #endif
 639                 srmmu_flush_whole_tlb();
 640                 TSUNAMI_SUCKS;
 641 #ifndef __SMP__
 642         }
 643 #endif
 644 }
 645 
 646 static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 647 {
 648         module_stats.invrnge++;
 649 #ifndef __SMP__
 650         if(mm->context != NO_CONTEXT) {
 651 #endif
 652                 srmmu_flush_whole_tlb();
 653                 TSUNAMI_SUCKS;
 654 #ifndef __SMP__
 655         }
 656 #endif
 657 }
 658 
 659 static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 660 {
 661         int octx;
 662         struct mm_struct *mm = vma->vm_mm;
 663 
 664 #ifndef __SMP__
 665         if(mm->context != NO_CONTEXT) {
 666 #endif
 667                 octx = srmmu_get_context();
 668 
 669                 srmmu_set_context(mm->context);
 670                 srmmu_flush_tlb_page(page);
 671                 TSUNAMI_SUCKS;
 672                 srmmu_set_context(octx);
 673 #ifndef __SMP__
 674         }
 675 #endif
 676         module_stats.invpg++;
 677 }
 678 
 679 static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 680 {
 681         srmmu_flush_tlb_page(page);
 682 }
 683 
 684 /* Swift flushes.  It has the recommended SRMMU specification flushing
 685  * facilities, so we can do things in a more fine grained fashion than we
 686  * could on the tsunami.  Let's watch out for HARDWARE BUGS...
 687  */
 688 
 689 static void swift_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 690 {
 691         flush_user_windows();
 692         swift_idflash_clear();
 693 }
 694 
 695 static void swift_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 696 {
 697 #ifndef __SMP__
 698         if(mm->context != NO_CONTEXT) {
 699 #endif
 700                 flush_user_windows();
 701                 swift_idflash_clear();
 702 #ifndef __SMP__
 703         }
 704 #endif
 705 }
 706 
 707 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 708 {
 709 #ifndef __SMP__
 710         if(mm->context != NO_CONTEXT) {
 711 #endif
 712                 flush_user_windows();
 713                 swift_idflash_clear();
 714 #ifndef __SMP__
 715         }
 716 #endif
 717 }
 718 
 719 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 720 {
 721 #ifndef __SMP__
 722         struct mm_struct *mm = vma->vm_mm;
 723         if(mm->context != NO_CONTEXT) {
 724 #endif
 725                 flush_user_windows();
 726                 if(vma->vm_flags & VM_EXEC)
 727                         swift_flush_icache();
 728                 swift_flush_dcache();
 729 #ifndef __SMP__
 730         }
 731 #endif
 732 }
 733 
 734 /* Not copy-back on swift. */
 735 static void swift_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 736 {
 737 }
 738 
 739 /* But not IO coherent either. */
 740 static void swift_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 741 {
 742         swift_flush_dcache();
 743 }
 744 
 745 static void swift_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 746 {
 747         swift_flush_dcache();
 748 }
 749 
 750 static void swift_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 751 {
 752         module_stats.invall++;
 753         srmmu_flush_whole_tlb();
 754 }
 755 
 756 static void swift_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 757 {
 758         module_stats.invmm++;
 759 #ifndef __SMP__
 760         if(mm->context != NO_CONTEXT)
 761 #endif
 762                 srmmu_flush_whole_tlb();
 763 }
 764 
 765 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 766 {
 767         module_stats.invrnge++;
 768 #ifndef __SMP__
 769         if(mm->context != NO_CONTEXT)
 770 #endif
 771                 srmmu_flush_whole_tlb();
 772 }
 773 
 774 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776 #ifndef __SMP__
 777         struct mm_struct *mm = vma->vm_mm;
 778         if(mm->context != NO_CONTEXT)
 779 #endif
 780                 srmmu_flush_whole_tlb();
 781         module_stats.invpg++;
 782 }
 783 
 784 static void swift_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 785 {
 786         srmmu_flush_whole_tlb();
 787 }
 788 
 789 /* The following are all MBUS based SRMMU modules, and therefore could
 790  * be found in a multiprocessor configuration.  On the whole, these
 791  * chips seems to be much more touchy about DVMA and page tables
 792  * with respect to cache coherency.
 793  */
 794 
 795 /* Viking flushes.  For Sun's mainline MBUS processor it is pretty much
 796  * a crappy mmu.  The on-chip I&D caches only have full flushes, no fine
 797  * grained cache invalidations.  It only has these "flash clear" things
 798  * just like the MicroSparcI.  Added to this many revs of the chip are
 799  * teaming with hardware buggery.  Someday maybe we'll do direct
 800  * diagnostic tag accesses for page level flushes as those should
 801  * be painless and will increase performance due to the frequency of
 802  * page level flushes. This is a must to _really_ flush the caches,
 803  * crazy hardware ;-)
 804  */
 805 
 806 static void viking_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 807 {
 808         viking_flush_icache();
 809 }
 810 
 811 static void viking_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 812 {
 813 #ifndef __SMP__
 814         if(mm->context != NO_CONTEXT) {
 815 #endif
 816                 flush_user_windows();
 817                 viking_flush_icache();
 818 #ifndef __SMP__
 819         }
 820 #endif
 821 }
 822 
 823 static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 824 {
 825 #ifndef __SMP__
 826         if(mm->context != NO_CONTEXT) {
 827 #endif
 828                 flush_user_windows();
 829                 viking_flush_icache();
 830 #ifndef __SMP__
 831         }
 832 #endif
 833 }
 834 
 835 static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 836 {
 837 #ifndef __SMP__
 838         struct mm_struct *mm = vma->vm_mm;
 839         if(mm->context != NO_CONTEXT) {
 840 #endif
 841                 flush_user_windows();
 842                 if(vma->vm_flags & VM_EXEC)
 843                         viking_flush_icache();
 844 #ifndef __SMP__
 845         }
 846 #endif
 847 }
 848 
 849 /* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
 850 static void viking_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 851 {
 852 }
 853 
 854 /* Viking is IO cache coherent. */
 855 static void viking_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 856 {
 857 }
 858 
 859 static void viking_mxcc_flush_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 860 {
 861         unsigned long ppage = srmmu_hwprobe(page);
 862         unsigned long paddr0, paddr1;
 863 
 864         if (!ppage)
 865                 return;
 866 
 867         paddr0 = (ppage >> 28) | 0x10;          /* Set cacheable bit. */
 868         paddr1 = (ppage << 4) & PAGE_MASK;
 869 
 870         /* Read the page's data through the stream registers,
 871          * and write it back to memory. This will issue
 872          * coherent write invalidates to all other caches, thus
 873          * should also be sufficient in an MP system.
 874          */
 875         __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
 876                               "or %%g0, %1, %%g3\n"
 877                               "1:\n\t"
 878                               "stda %%g2, [%2] %5\n\t"
 879                               "stda %%g2, [%3] %5\n\t"
 880                               "add %%g3, %4, %%g3\n\t"
 881                               "btst 0xfff, %%g3\n\t"
 882                               "bne 1b\n\t"
 883                               "nop\n\t" : :
 884                               "r" (paddr0), "r" (paddr1),
 885                               "r" (MXCC_SRCSTREAM),
 886                               "r" (MXCC_DESSTREAM),
 887                               "r" (MXCC_STREAM_SIZE),
 888                               "i" (ASI_M_MXCC) : "g2", "g3");
 889 
 890         /* This was handcoded after a look at the gcc output from
 891          *
 892          *      do {
 893          *              mxcc_set_stream_src(paddr);
 894          *              mxcc_set_stream_dst(paddr);
 895          *              paddr[1] += MXCC_STREAM_SIZE;
 896          *      } while (paddr[1] & ~PAGE_MASK);
 897          */
 898 }
 899 
 900 static void viking_no_mxcc_flush_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 901 {
 902         unsigned long ppage = srmmu_hwprobe(page) >> 8;
 903         int set, block;
 904         unsigned long ptag[2];
 905         unsigned long vaddr;
 906         int i;
 907 
 908         if (!ppage)
 909                 return;
 910 
 911         for (set = 0; set < 128; set++) {
 912                 for (block = 0; block < 4; block++) {
 913 
 914                         viking_get_dcache_ptag(set, block, ptag);
 915 
 916                         if (ptag[1] != ppage)
 917                                 continue;
 918                         if (!(ptag[0] & VIKING_PTAG_VALID))
 919                                 continue;
 920                         if (!(ptag[0] & VIKING_PTAG_DIRTY))
 921                                 continue;
 922 
 923                         /* There was a great cache from TI
 924                          * with comfort as much as vi,
 925                          * 4 pages to flush,
 926                          * 4 pages, no rush,
 927                          * since anything else makes him die.
 928                          */
 929                         vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
 930                         for (i = 0; i < 8; i++) {
 931                                 __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
 932                                                       "r" (vaddr) : "g2");
 933                                 vaddr += PAGE_SIZE;
 934                         }
 935 
 936                         /* Continue with next set. */
 937                         break;
 938                 }
 939         }
 940 }
 941 
 942 static void viking_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 943 {
 944         module_stats.invall++;
 945         srmmu_flush_whole_tlb();
 946 }
 947 
 948 static void viking_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 949 {
 950         int octx;
 951         module_stats.invmm++;
 952 
 953 #ifndef __SMP__
 954         if(mm->context != NO_CONTEXT) {
 955 #endif
 956                 octx = srmmu_get_context();
 957                 srmmu_set_context(mm->context);
 958                 srmmu_flush_tlb_ctx();
 959                 srmmu_set_context(octx);
 960 #ifndef __SMP__
 961         }
 962 #endif
 963 }
 964 
 965 static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 966 {
 967         int octx;
 968         module_stats.invrnge++;
 969 
 970 #ifndef __SMP__
 971         if(mm->context != NO_CONTEXT) {
 972 #endif
 973                 octx = srmmu_get_context();
 974                 srmmu_set_context(mm->context);
 975                 start &= SRMMU_PMD_MASK;
 976                 while(start < end) {
 977                         srmmu_flush_tlb_segment(start);
 978                         start += SRMMU_PMD_SIZE;
 979                 }
 980                 srmmu_set_context(octx);
 981 #ifndef __SMP__
 982         }
 983 #endif
 984 }
 985 
 986 static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 987 {
 988         int octx;
 989         struct mm_struct *mm = vma->vm_mm;
 990 
 991         module_stats.invpg++;
 992 #ifndef __SMP__
 993         if(mm->context != NO_CONTEXT) {
 994 #endif
 995                 octx = srmmu_get_context();
 996                 srmmu_set_context(mm->context);
 997                 srmmu_flush_tlb_page(page);
 998                 srmmu_set_context(octx);
 999 #ifndef __SMP__
1000         }
1001 #endif
1002 }
1003 
1004 static void viking_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1005 {
1006         srmmu_flush_tlb_page(page);
1007 }
1008 
1009 /* Cypress flushes. */
1010 
1011 static void cypress_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1012 {
1013         module_stats.invall++;
1014         srmmu_flush_whole_tlb();
1015 }
1016 
1017 static void cypress_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1018 {
1019         int octx;
1020 
1021         module_stats.invmm++;
1022 #ifndef __SMP__
1023         if(mm->context != NO_CONTEXT) {
1024 #endif
1025                 octx = srmmu_get_context();
1026                 srmmu_set_context(mm->context);
1027                 srmmu_flush_tlb_ctx();
1028                 srmmu_set_context(octx);
1029 #ifndef __SMP__
1030         }
1031 #endif
1032 }
1033 
1034 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1035 {
1036         int octx;
1037 
1038         module_stats.invrnge++;
1039 #ifndef __SMP__
1040         if(mm->context != NO_CONTEXT) {
1041 #endif
1042                 octx = srmmu_get_context();
1043                 srmmu_set_context(mm->context);
1044                 start &= SRMMU_PMD_MASK;
1045                 while(start < end) {
1046                         srmmu_flush_tlb_segment(start);
1047                         start += SRMMU_PMD_SIZE;
1048                 }
1049                 srmmu_set_context(octx);
1050 #ifndef __SMP__
1051         }
1052 #endif
1053 }
1054 
1055 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1056 {
1057         int octx;
1058         struct mm_struct *mm = vma->vm_mm;
1059 
1060         module_stats.invpg++;
1061 #ifndef __SMP__
1062         if(mm->context != NO_CONTEXT) {
1063 #endif
1064                 octx = srmmu_get_context();
1065                 srmmu_set_context(mm->context);
1066                 srmmu_flush_tlb_page(page);
1067                 srmmu_set_context(octx);
1068 #ifndef __SMP__
1069         }
1070 #endif
1071 }
1072 
1073 /* Hypersparc flushes.  Very nice chip... */
1074 static void hypersparc_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1075 {
1076         flush_user_windows();
1077         hyper_flush_unconditional_combined();
1078         hyper_flush_whole_icache();
1079 }
1080 
1081 static void hypersparc_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1082 {
1083 #ifndef __SMP__
1084         if(mm->context != NO_CONTEXT) {
1085 #endif
1086                 flush_user_windows();
1087                 hyper_flush_unconditional_combined();
1088                 hyper_flush_whole_icache();
1089 #ifndef __SMP__
1090         }
1091 #endif
1092 }
1093 
1094 static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1095 {
1096 #ifndef __SMP__
1097         if(mm->context != NO_CONTEXT) {
1098 #endif
1099                 flush_user_windows();
1100                 hyper_flush_unconditional_combined();
1101                 hyper_flush_whole_icache();
1102 #ifndef __SMP__
1103         }
1104 #endif
1105 }
1106 
1107 /* HyperSparc requires a valid mapping where we are about to flush
1108  * in order to check for a physical tag match during the flush.
1109  */
1110 static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1111 {
1112         struct mm_struct *mm = vma->vm_mm;
1113         volatile unsigned long clear;
1114         int octx;
1115 
1116 #ifndef __SMP__
1117         if(mm->context != NO_CONTEXT) {
1118 #endif
1119                 octx = srmmu_get_context();
1120                 flush_user_windows();
1121                 srmmu_set_context(mm->context);
1122                 hyper_flush_whole_icache();
1123                 if(!srmmu_hwprobe(page))
1124                         goto no_mapping;
1125                 hyper_flush_cache_page(page);
1126         no_mapping:
1127                 clear = srmmu_get_fstatus();
1128                 srmmu_set_context(octx);
1129 #ifndef __SMP__
1130         }
1131 #endif
1132 }
1133 
1134 /* HyperSparc is copy-back. */
1135 static void hypersparc_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1136 {
1137         volatile unsigned long clear;
1138 
1139         if(srmmu_hwprobe(page))
1140                 hyper_flush_cache_page(page);
1141         clear = srmmu_get_fstatus();
1142 }
1143 
1144 /* HyperSparc is IO cache coherent. */
1145 static void hypersparc_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1146 {
1147         volatile unsigned long clear;
1148 
1149         if(srmmu_hwprobe(page))
1150                 hyper_flush_cache_page(page);
1151         clear = srmmu_get_fstatus();
1152 }
1153 
1154 static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1155 {
1156         volatile unsigned long clear;
1157 
1158         if(srmmu_hwprobe(page))
1159                 hyper_flush_cache_page(page);
1160         clear = srmmu_get_fstatus();
1161 }
1162 
1163 static void hypersparc_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1164 {
1165         module_stats.invall++;
1166         srmmu_flush_whole_tlb();
1167 }
1168 
1169 static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1170 {
1171         int octx;
1172 
1173         module_stats.invmm++;
1174 #ifndef __SMP__
1175         if(mm->context != NO_CONTEXT) {
1176 #endif
1177 
1178                 octx = srmmu_get_context();
1179                 srmmu_set_context(mm->context);
1180                 srmmu_flush_tlb_ctx();
1181                 srmmu_set_context(octx);
1182 
1183 #ifndef __SMP__
1184         }
1185 #endif
1186 }
1187 
1188 static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1189 {
1190         int octx;
1191 
1192         module_stats.invrnge++;
1193 #ifndef __SMP__
1194         if(mm->context != NO_CONTEXT) {
1195 #endif
1196 
1197                 octx = srmmu_get_context();
1198                 srmmu_set_context(mm->context);
1199                 start &= SRMMU_PMD_MASK;
1200                 while(start < end) {
1201                         srmmu_flush_tlb_segment(start);
1202                         start += SRMMU_PMD_SIZE;
1203                 }
1204                 srmmu_set_context(octx);
1205 
1206 #ifndef __SMP__
1207         }
1208 #endif
1209 }
1210 
1211 static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1212 {
1213         struct mm_struct *mm = vma->vm_mm;
1214         int octx;
1215 
1216         module_stats.invpg++;
1217 #ifndef __SMP__
1218         if(mm->context != NO_CONTEXT) {
1219 #endif
1220 
1221                 octx = srmmu_get_context();
1222                 srmmu_set_context(mm->context);
1223                 srmmu_flush_tlb_page(page);
1224                 srmmu_set_context(octx);
1225 
1226 #ifndef __SMP__
1227         }
1228 #endif
1229 }
1230 
1231 static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1232 {
1233         srmmu_flush_tlb_page(page);
1234 }
1235 
1236 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
1237 {
1238         hyper_flush_whole_icache();
1239         srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
1240 }
1241 
1242 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) 
     /* [previous][next][first][last][top][bottom][index][help] */
1243 {
1244         if(tsk->mm->context != NO_CONTEXT) {
1245                 hyper_flush_whole_icache();
1246                 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1247         }
1248 }
1249 
1250 static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
1251 {
1252         /* xor is your friend */
1253         __asm__ __volatile__("rd        %%psr, %%g1\n\t"
1254                              "wr        %%g1, %4, %%psr\n\t"
1255                              "nop; nop; nop;\n\t"
1256                              "swap      [%0], %1\n\t"
1257                              "wr        %%g1, 0x0, %%psr\n\t"
1258                              "nop; nop; nop;\n\t" :
1259                              "=r" (ptep), "=r" (pteval) :
1260                              "0" (ptep), "1" (pteval), "i" (PSR_ET) :
1261                              "g1");
1262 }
1263 
1264 static void hypersparc_switch_to_context(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
1265 {
1266         /* Kernel threads can execute in any context and so can tasks
1267          * sleeping in the middle of exiting. If this task has already
1268          * been allocated a piece of the mmu realestate, just jump to
1269          * it.
1270          */
1271         hyper_flush_whole_icache();
1272         if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
1273            (tsk->flags & PF_EXITING))
1274                 return;
1275         if(tsk->mm->context == NO_CONTEXT) {
1276                 alloc_context(tsk->mm);
1277                 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
1278         }
1279         srmmu_set_context(tsk->mm->context);
1280 }
1281 
1282 /* IOMMU things go here. */
1283 
1284 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1285 static unsigned long first_dvma_page, last_dvma_page;
1286 
1287 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
1288 #define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
1289 
1290 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
     /* [previous][next][first][last][top][bottom][index][help] */
1291 {
1292         unsigned long first = first_dvma_page;
1293         unsigned long last = last_dvma_page;
1294         iopte_t *iopte;
1295 
1296         iopte = iommu->page_table;
1297         iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
1298         while(first <= last) {
1299                 iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
1300                 first += PAGE_SIZE;
1301         }
1302 }
1303 
1304 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1305 {
1306         pgd_t *pgdp;
1307         pmd_t *pmdp;
1308         pte_t *ptep;
1309         unsigned long end = start + size;
1310 
1311         while(start < end) {
1312                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1313                 pmdp = srmmu_pmd_offset(pgdp, start);
1314                 ptep = srmmu_pte_offset(pmdp, start);
1315                 pte_val(*ptep) &= ~SRMMU_CACHE;
1316                 start += PAGE_SIZE;
1317         }
1318 }
1319 
1320 unsigned long iommu_init(int iommund, unsigned long memory_start,
     /* [previous][next][first][last][top][bottom][index][help] */
1321                          unsigned long memory_end, struct linux_sbus *sbus)
1322 {
1323         int impl, vers, ptsize;
1324         unsigned long tmp;
1325         struct iommu_struct *iommu;
1326         struct linux_prom_registers iommu_promregs[PROMREG_MAX];
1327 
1328         memory_start = LONG_ALIGN(memory_start);
1329         iommu = (struct iommu_struct *) memory_start;
1330         memory_start += sizeof(struct iommu_struct);
1331         prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
1332         iommu->regs = (struct iommu_regs *)
1333                 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
1334                                "IOMMU registers", iommu_promregs[0].which_io, 0x0);
1335         if(!iommu->regs)
1336                 panic("Cannot map IOMMU registers.");
1337         impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
1338         vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
1339         tmp = iommu->regs->control;
1340         tmp &= ~(IOMMU_CTRL_RNGE);
1341         tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
1342         iommu->regs->control = tmp;
1343         iommu_invalidate(iommu->regs);
1344         iommu->plow = iommu->start = 0xfc000000;
1345         iommu->end = 0xffffffff;
1346 
1347         /* Allocate IOMMU page table */
1348         ptsize = iommu->end - iommu->start + 1;
1349         ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
1350 
1351         /* Stupid alignment constraints give me a headache. */
1352         memory_start = PAGE_ALIGN(memory_start);
1353         memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
1354         iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
1355         memory_start += ptsize;
1356 
1357         /* Initialize new table. */
1358         flush_cache_all();
1359         srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
1360         flush_tlb_all();
1361         memset(iommu->page_table, 0, ptsize);
1362         srmmu_map_dvma_pages_for_iommu(iommu);
1363         iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
1364         iommu_invalidate(iommu->regs);
1365 
1366         sbus->iommu = iommu;
1367         printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
1368                impl, vers, iommu->page_table, ptsize);
1369         return memory_start;
1370 }
1371 
1372 static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1373 {
1374         struct iommu_struct *iommu = sbus->iommu;
1375         unsigned long page = (unsigned long) vaddr;
1376         unsigned long start, end, offset;
1377         iopte_t *iopte;
1378 
1379         offset = page & ~PAGE_MASK;
1380         page &= PAGE_MASK;
1381 
1382         start = iommu->plow;
1383         end = KADB_DEBUGGER_BEGVM; /* Don't step on kadb/prom. */
1384         iopte = iommu->lowest;
1385         while(start < end) {
1386                 if(!(iopte_val(*iopte) & IOPTE_VALID))
1387                         break;
1388                 iopte++;
1389                 start += PAGE_SIZE;
1390         }
1391 
1392         flush_page_for_dma(page);
1393         vaddr = (char *) (start | offset);
1394         iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1395         iommu_invalidate_page(iommu->regs, start);
1396         iommu->lowest = iopte + 1;
1397         iommu->plow = start + PAGE_SIZE;
1398 
1399         return vaddr;
1400 }
1401 
1402 static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1403 {
1404         struct iommu_struct *iommu = sbus->iommu;
1405         unsigned long page, start, end, offset;
1406         iopte_t *iopte = iommu->lowest;
1407 
1408         start = iommu->plow;
1409         end = KADB_DEBUGGER_BEGVM;
1410         while(sz >= 0) {
1411                 page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
1412                 offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
1413                 while(start < end) {
1414                         if(!(iopte_val(*iopte) & IOPTE_VALID))
1415                                 break;
1416                         iopte++;
1417                         start += PAGE_SIZE;
1418                 }
1419                 if(start == KADB_DEBUGGER_BEGVM)
1420                         panic("Wheee, iomapping overflow.");
1421                 flush_page_for_dma(page);
1422                 sg[sz].alt_addr = (char *) (start | offset);
1423                 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1424                 iommu_invalidate_page(iommu->regs, start);
1425                 iopte++;
1426                 start += PAGE_SIZE;
1427                 sz--;
1428         }
1429         iommu->lowest = iopte;
1430         iommu->plow = start;
1431 }
1432 
1433 static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1434 {
1435         struct iommu_struct *iommu = sbus->iommu;
1436         unsigned long page = (unsigned long) vaddr;
1437         iopte_t *iopte;
1438 
1439         if(len > PAGE_SIZE)
1440                 panic("Can only handle page sized IOMMU mappings.");
1441         page &= PAGE_MASK;
1442         iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1443         iopte_val(*iopte) = 0;
1444         iommu_invalidate_page(iommu->regs, page);
1445         if(iopte < iommu->lowest) {
1446                 iommu->lowest = iopte;
1447                 iommu->plow = page;
1448         }
1449 }
1450 
1451 static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1452 {
1453         struct iommu_struct *iommu = sbus->iommu;
1454         unsigned long page;
1455         iopte_t *iopte;
1456 
1457         while(sz >= 0) {
1458                 page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
1459                 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1460                 iopte_val(*iopte) = 0;
1461                 iommu_invalidate_page(iommu->regs, page);
1462                 if(iopte < iommu->lowest) {
1463                         iommu->lowest = iopte;
1464                         iommu->plow = page;
1465                 }
1466                 sg[sz].alt_addr = 0;
1467                 sz--;
1468         }
1469 }
1470 
1471 static unsigned long mempool;
1472 
1473 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1474  *       kernel mappings are done with one single contiguous chunk of
1475  *       ram.  On small ram machines (classics mainly) we only get
1476  *       around 8mb mapped for us.
1477  */
1478 
1479 static unsigned long kbpage;
1480 
1481 /* Some dirty hacks to abstract away the painful boot up init. */
1482 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
1483 {
1484         return ((vaddr - PAGE_OFFSET) + kbpage);
1485 }
1486 
1487 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
1488 {
1489         srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
1490 }
1491 
1492 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
1493 {
1494         srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
1495 }
1496 
1497 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
1498 {
1499         return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1500 }
1501 
1502 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
1503 {
1504         return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1505 }
1506 
1507 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1508 {
1509         return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1510 }
1511 
1512 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1513 {
1514         return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1515 }
1516 
1517 /* Allocate a block of RAM which is aligned to its size.
1518  * This procedure can be used until the call to mem_init().
1519  */
1520 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
1521 {
1522         unsigned long mask = size - 1;
1523         unsigned long ret;
1524 
1525         if(!size)
1526                 return 0x0;
1527         if(size & mask) {
1528                 prom_printf("panic: srmmu_init_alloc botch\n");
1529                 prom_halt();
1530         }
1531         ret = (*kbrk + mask) & ~mask;
1532         *kbrk = ret + size;
1533         memset((void*) ret, 0, size);
1534         return (void*) ret;
1535 }
1536 
1537 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1538 {
1539         pgd_t *pgdp;
1540         pmd_t *pmdp;
1541         pte_t *ptep;
1542 
1543         while(start < end) {
1544                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1545                 if(srmmu_pgd_none(*pgdp)) {
1546                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1547                         srmmu_early_pgd_set(pgdp, pmdp);
1548                 }
1549                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1550                 if(srmmu_pmd_none(*pmdp)) {
1551                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1552                         srmmu_early_pmd_set(pmdp, ptep);
1553                 }
1554                 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1555         }
1556 }
1557 
1558 /* This is much cleaner than poking around physical address space
1559  * looking at the prom's page table directly which is what most
1560  * other OS's do.  Yuck... this is much better.
1561  */
1562 void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1563 {
1564         pgd_t *pgdp;
1565         pmd_t *pmdp;
1566         pte_t *ptep;
1567         int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1568         unsigned long prompte;
1569 
1570         while(start <= end) {
1571                 if (start == 0)
1572                         break; /* probably wrap around */
1573                 if(start == 0xfef00000)
1574                         start = KADB_DEBUGGER_BEGVM;
1575                 if(!(prompte = srmmu_hwprobe(start))) {
1576                         start += PAGE_SIZE;
1577                         continue;
1578                 }
1579     
1580                 /* A red snapper, see what it really is. */
1581                 what = 0;
1582     
1583                 if(!(start & ~(SRMMU_PMD_MASK))) {
1584                         if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1585                                 what = 1;
1586                 }
1587     
1588                 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1589                         if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1590                            prompte)
1591                                 what = 2;
1592                 }
1593     
1594                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1595                 if(what == 2) {
1596                         pgd_val(*pgdp) = prompte;
1597                         start += SRMMU_PGDIR_SIZE;
1598                         continue;
1599                 }
1600                 if(srmmu_pgd_none(*pgdp)) {
1601                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1602                         srmmu_early_pgd_set(pgdp, pmdp);
1603                 }
1604                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1605                 if(what == 1) {
1606                         pmd_val(*pmdp) = prompte;
1607                         start += SRMMU_PMD_SIZE;
1608                         continue;
1609                 }
1610                 if(srmmu_pmd_none(*pmdp)) {
1611                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1612                         srmmu_early_pmd_set(pmdp, ptep);
1613                 }
1614                 ptep = srmmu_early_pte_offset(pmdp, start);
1615                 pte_val(*ptep) = prompte;
1616                 start += PAGE_SIZE;
1617         }
1618 }
1619 
1620 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
     /* [previous][next][first][last][top][bottom][index][help] */
1621 {
1622         unsigned long start;
1623         pgprot_t dvma_prot;
1624         pgd_t *pgdp;
1625         pmd_t *pmdp;
1626         pte_t *ptep;
1627 
1628         start = DVMA_VADDR;
1629         if (viking_mxcc_present)
1630                 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
1631         else
1632                 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1633         while(first <= last) {
1634                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1635                 pmdp = srmmu_pmd_offset(pgdp, start);
1636                 ptep = srmmu_pte_offset(pmdp, start);
1637 
1638                 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1639 
1640                 first += PAGE_SIZE;
1641                 start += PAGE_SIZE;
1642         }
1643 
1644         /* Uncache DVMA pages. */
1645         if (!viking_mxcc_present) {
1646                 first = first_dvma_page;
1647                 last = last_dvma_page;
1648                 while(first <= last) {
1649                         pgdp = srmmu_pgd_offset(init_task.mm, first);
1650                         pmdp = srmmu_pmd_offset(pgdp, first);
1651                         ptep = srmmu_pte_offset(pmdp, first);
1652                         pte_val(*ptep) &= ~SRMMU_CACHE;
1653                         first += PAGE_SIZE;
1654                 }
1655         }
1656 }
1657 
1658 static void srmmu_map_kernel(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1659 {
1660         unsigned long last_page;
1661         int srmmu_bank, phys_bank, i;
1662         pgd_t *pgdp;
1663         pmd_t *pmdp;
1664         pte_t *ptep;
1665 
1666         end = PAGE_ALIGN(end);
1667 
1668         if(start == (KERNBASE + PAGE_SIZE)) {
1669                 unsigned long pte;
1670                 unsigned long tmp;
1671 
1672                 pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
1673                 pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
1674                 ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
1675 
1676                 /* Put a real mapping in for the KERNBASE page. */
1677                 tmp = kbpage;
1678                 pte = (tmp) >> 4;
1679                 pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1680                 pte_val(*ptep) = pte;
1681         }
1682 
1683         /* Copy over mappings prom already gave us. */
1684         last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1685         while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
1686                 unsigned long tmp;
1687 
1688                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1689                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1690                 ptep = srmmu_early_pte_offset(pmdp, start);
1691                 tmp = srmmu_hwprobe(start);
1692                 tmp &= ~(0xff);
1693                 tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1694                 pte_val(*ptep) = tmp;
1695                 start += PAGE_SIZE;
1696                 tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1697 
1698                 /* Never a cross bank boundry, thank you. */
1699                 if(tmp != last_page + PAGE_SIZE)
1700                         break;
1701                 last_page = tmp;
1702         }
1703 
1704         /* Ok, that was assumed to be one full bank, begin
1705          * construction of srmmu_map[].
1706          */
1707         for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
1708                 if(kbpage >= sp_banks[phys_bank].base_addr &&
1709                    (kbpage <
1710                     (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
1711                         break; /* found it */
1712         }
1713         srmmu_bank = 0;
1714         srmmu_map[srmmu_bank].vbase = KERNBASE;
1715         srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
1716         srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
1717         if(kbpage != sp_banks[phys_bank].base_addr) {
1718                 prom_printf("Detected PenguinPages, getting out of here.\n");
1719                 prom_halt();
1720 #if 0
1721                 srmmu_map[srmmu_bank].pbase = kbpage;
1722                 srmmu_map[srmmu_bank].size -=
1723                         (kbpage - sp_banks[phys_bank].base_addr);
1724 #endif
1725         }
1726         /* Prom didn't map all of this first bank, fill
1727          * in the rest by hand.
1728          */
1729         while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
1730                 unsigned long pteval;
1731 
1732                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1733                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1734                 ptep = srmmu_early_pte_offset(pmdp, start);
1735 
1736                 pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
1737                 pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1738                 pte_val(*ptep) = pteval;
1739                 start += PAGE_SIZE;
1740         }
1741 
1742         /* Mark this sp_bank invalid... */
1743         sp_banks[phys_bank].base_addr |= 1;
1744         srmmu_bank++;
1745 
1746         /* Now, deal with what is left. */
1747         while(start < end) {
1748                 unsigned long baddr;
1749                 int btg;
1750 
1751                 /* Find a usable cluster of physical ram. */
1752                 for(i=0; sp_banks[i].num_bytes != 0; i++)
1753                         if(!(sp_banks[i].base_addr & 1))
1754                                 break;
1755                 if(sp_banks[i].num_bytes == 0)
1756                         break;
1757 
1758                 /* Add it to srmmu_map */
1759                 srmmu_map[srmmu_bank].vbase = start;
1760                 srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
1761                 srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
1762                 srmmu_bank++;
1763 
1764                 btg = sp_banks[i].num_bytes;
1765                 baddr = sp_banks[i].base_addr;
1766                 while(btg) {
1767                         pgdp = srmmu_pgd_offset(init_task.mm, start);
1768                         pmdp = srmmu_early_pmd_offset(pgdp, start);
1769                         ptep = srmmu_early_pte_offset(pmdp, start);
1770                         pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1771                         pte_val(*ptep) |= (baddr >> 4);
1772 
1773                         baddr += PAGE_SIZE;
1774                         start += PAGE_SIZE;
1775                         btg -= PAGE_SIZE;
1776                 }
1777                 sp_banks[i].base_addr |= 1;
1778         }
1779         if(start < end) {
1780                 prom_printf("weird, didn't use all of physical memory... ");
1781                 prom_halt();
1782         }
1783         for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
1784                 sp_banks[phys_bank].base_addr &= ~1;
1785 #if 0
1786         for(i = 0; srmmu_map[i].size != 0; i++) {
1787                 prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
1788                             i, srmmu_map[i].vbase,
1789                             srmmu_map[i].pbase, srmmu_map[i].size);
1790         }
1791         prom_getchar();
1792         for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1793                 prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
1794                             i,
1795                             sp_banks[i].base_addr,
1796                             sp_banks[i].num_bytes);
1797         }
1798         prom_getchar();
1799         prom_halt();
1800 #endif
1801 }
1802 
1803 /* Paging initialization on the Sparc Reference MMU. */
1804 extern unsigned long free_area_init(unsigned long, unsigned long);
1805 extern unsigned long sparc_context_init(unsigned long, int);
1806 
1807 extern int physmem_mapped_contig;
1808 extern int linux_num_cpus;
1809 
1810 void (*poke_srmmu)(void);
1811 
1812 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1813 {
1814         unsigned long ptables_start, first_mapped_page;
1815         int i, cpunode;
1816         char node_str[128];
1817         pgd_t *pgdp;
1818         pmd_t *pmdp;
1819         pte_t *ptep;
1820 
1821         physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
1822 
1823 #if CONFIG_AP1000
1824         printk("Forcing num_contexts to 1024\n");
1825         num_contexts = 1024;
1826 #else
1827         /* Find the number of contexts on the srmmu. */
1828         cpunode = prom_getchild(prom_root_node);
1829         num_contexts = 0;
1830         while((cpunode = prom_getsibling(cpunode)) != 0) {
1831                 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1832                 if(!strcmp(node_str, "cpu")) {
1833                         num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1834                         break;
1835                 }
1836         }
1837 #endif
1838         if(!num_contexts) {
1839                 prom_printf("Something wrong, cant find cpu node in paging_init.\n");
1840                 prom_halt();
1841         }
1842                 
1843         ptables_start = mempool = PAGE_ALIGN(start_mem);
1844         memset(swapper_pg_dir, 0, PAGE_SIZE);
1845         first_mapped_page = KERNBASE;
1846         kbpage = srmmu_hwprobe(KERNBASE);
1847         if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
1848                 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1849                 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1850                 kbpage -= PAGE_SIZE;
1851                 first_mapped_page += PAGE_SIZE;
1852         } else
1853                 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1854 
1855         srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1856 #if CONFIG_SUN_IO
1857         srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1858         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1859 #endif
1860 
1861         /* Steal DVMA pages now, I still don't like how we waste all this. */
1862         mempool = PAGE_ALIGN(mempool);
1863         first_dvma_page = mempool;
1864         last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1865         mempool = last_dvma_page + PAGE_SIZE;
1866 
1867 #if CONFIG_AP1000
1868         ap_inherit_mappings();
1869 #else
1870         srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1871 #endif
1872         srmmu_map_kernel(first_mapped_page, end_mem);
1873 #if CONFIG_SUN_IO
1874         srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1875 #endif
1876         srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1877         srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1878         for(i = 0; i < num_contexts; i++)
1879                 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1880 
1881         start_mem = PAGE_ALIGN(mempool);
1882 
1883         /* Some SRMMU's are _very_ stupid indeed. */
1884         if(!can_cache_ptables) {
1885                 for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
1886                         pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
1887                         pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
1888                         ptep = srmmu_early_pte_offset(pmdp, ptables_start);
1889                         pte_val(*ptep) &= ~SRMMU_CACHE;
1890                 }
1891 
1892                 pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
1893                 pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
1894                 ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
1895                 pte_val(*ptep) &= ~SRMMU_CACHE;
1896         }
1897 
1898         flush_cache_all();
1899         srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1900         flush_tlb_all();
1901         poke_srmmu();
1902 
1903         start_mem = sparc_context_init(start_mem, num_contexts);
1904         start_mem = free_area_init(start_mem, end_mem);
1905 
1906         return PAGE_ALIGN(start_mem);
1907 }
1908 
1909 static char srmmuinfo[512];
1910 
1911 static char *srmmu_mmu_info(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1912 {
1913         sprintf(srmmuinfo, "MMU type\t: %s\n"
1914                 "invall\t\t: %d\n"
1915                 "invmm\t\t: %d\n"
1916                 "invrnge\t\t: %d\n"
1917                 "invpg\t\t: %d\n"
1918                 "contexts\t: %d\n"
1919                 "big_chunks\t: %d\n"
1920                 "little_chunks\t: %d\n",
1921                 srmmu_name,
1922                 module_stats.invall,
1923                 module_stats.invmm,
1924                 module_stats.invrnge,
1925                 module_stats.invpg,
1926                 num_contexts,
1927 #if 0
1928                 num_big_chunks,
1929                 num_little_chunks
1930 #else
1931                 0, 0
1932 #endif
1933                 );
1934         return srmmuinfo;
1935 }
1936 
1937 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
1938 {
1939 }
1940 
1941 static void srmmu_exit_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1942 {
1943         struct ctx_list *ctx_old;
1944         struct mm_struct *mm = current->mm;
1945 
1946         if(mm->context != NO_CONTEXT) {
1947                 flush_cache_mm(mm);
1948                 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1949                 flush_tlb_mm(mm);
1950                 ctx_old = ctx_list_pool + mm->context;
1951                 remove_from_ctx_list(ctx_old);
1952                 add_to_free_ctxlist(ctx_old);
1953                 mm->context = NO_CONTEXT;
1954         }
1955 }
1956 
1957 static void srmmu_flush_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1958 {
1959         if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1960                 alloc_context(current->mm);
1961                 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1962                 srmmu_set_context(current->mm->context);
1963         }
1964 }
1965 
1966 static void hypersparc_exit_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1967 {
1968         struct ctx_list *ctx_old;
1969         struct mm_struct *mm = current->mm;
1970 
1971         if(mm->context != NO_CONTEXT) {
1972                 /* HyperSparc is copy-back, any data for this
1973                  * process in a modified cache line is stale
1974                  * and must be written back to main memory now
1975                  * else we eat shit later big time.
1976                  */
1977                 flush_cache_mm(mm);
1978                 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1979                 flush_tlb_mm(mm);
1980                 ctx_old = ctx_list_pool + mm->context;
1981                 remove_from_ctx_list(ctx_old);
1982                 add_to_free_ctxlist(ctx_old);
1983                 mm->context = NO_CONTEXT;
1984         }
1985 }
1986 
1987 static void hypersparc_flush_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1988 {
1989         if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1990                 alloc_context(current->mm);
1991                 flush_cache_mm(current->mm);
1992                 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1993                 srmmu_set_context(current->mm->context);
1994         }
1995 }
1996 
1997 /* Init various srmmu chip types. */
1998 void srmmu_is_bad(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1999 {
2000         prom_printf("Could not determine SRMMU chip type.\n");
2001         prom_halt();
2002 }
2003 
2004 void poke_hypersparc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2005 {
2006         volatile unsigned long clear;
2007         unsigned long mreg = srmmu_get_mmureg();
2008 
2009         hyper_flush_unconditional_combined();
2010 
2011         mreg &= ~(HYPERSPARC_CWENABLE);
2012         mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2013         mreg |= (HYPERSPARC_CMODE);
2014 
2015         srmmu_set_mmureg(mreg);
2016         hyper_clear_all_tags();
2017 
2018         put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2019         hyper_flush_whole_icache();
2020         clear = srmmu_get_faddr();
2021         clear = srmmu_get_fstatus();
2022 }
2023 
2024 void init_hypersparc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2025 {
2026         unsigned long mreg = srmmu_get_mmureg();
2027 
2028         srmmu_name = "ROSS HyperSparc";
2029         can_cache_ptables = 0;
2030         if(mreg & HYPERSPARC_CSIZE) {
2031                 hyper_cache_size = (256 * 1024);
2032                 hyper_line_size = 64;
2033         } else {
2034                 hyper_cache_size = (128 * 1024);
2035                 hyper_line_size = 32;
2036         }
2037 
2038         flush_cache_all = hypersparc_flush_cache_all;
2039         flush_cache_mm = hypersparc_flush_cache_mm;
2040         flush_cache_range = hypersparc_flush_cache_range;
2041         flush_cache_page = hypersparc_flush_cache_page;
2042 
2043         flush_tlb_all = hypersparc_flush_tlb_all;
2044         flush_tlb_mm = hypersparc_flush_tlb_mm;
2045         flush_tlb_range = hypersparc_flush_tlb_range;
2046         flush_tlb_page = hypersparc_flush_tlb_page;
2047 
2048         flush_page_to_ram = hypersparc_flush_page_to_ram;
2049         flush_page_for_dma = hypersparc_flush_page_for_dma;
2050         flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
2051         flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
2052 
2053         ctxd_set = hypersparc_ctxd_set;
2054         switch_to_context = hypersparc_switch_to_context;
2055         mmu_exit_hook = hypersparc_exit_hook;
2056         mmu_flush_hook = hypersparc_flush_hook;
2057         sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
2058         set_pte = hypersparc_set_pte;
2059         poke_srmmu = poke_hypersparc;
2060 }
2061 
2062 void poke_cypress(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2063 {
2064         unsigned long mreg = srmmu_get_mmureg();
2065 
2066         mreg &= ~CYPRESS_CMODE;
2067         mreg |= CYPRESS_CENABLE;
2068         srmmu_set_mmureg(mreg);
2069 }
2070 
2071 void init_cypress_common(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2072 {
2073         can_cache_ptables = 0;
2074         flush_tlb_all = cypress_flush_tlb_all;
2075         flush_tlb_mm = cypress_flush_tlb_mm;
2076         flush_tlb_page = cypress_flush_tlb_page;
2077         flush_tlb_range = cypress_flush_tlb_range;
2078         poke_srmmu = poke_cypress;
2079 
2080         /* XXX Need to write cache flushes for this one... XXX */
2081 
2082 }
2083 
2084 void init_cypress_604(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2085 {
2086         srmmu_name = "ROSS Cypress-604(UP)";
2087         srmmu_modtype = Cypress;
2088         init_cypress_common();
2089 }
2090 
2091 void init_cypress_605(unsigned long mrev)
     /* [previous][next][first][last][top][bottom][index][help] */
2092 {
2093         srmmu_name = "ROSS Cypress-605(MP)";
2094         if(mrev == 0xe) {
2095                 srmmu_modtype = Cypress_vE;
2096                 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2097         } else {
2098                 if(mrev == 0xd) {
2099                         srmmu_modtype = Cypress_vD;
2100                         hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2101                 } else {
2102                         srmmu_modtype = Cypress;
2103                 }
2104         }
2105         init_cypress_common();
2106 }
2107 
2108 void poke_swift(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2109 {
2110         unsigned long mreg = srmmu_get_mmureg();
2111 
2112         /* Clear any crap from the cache or else... */
2113         swift_idflash_clear();
2114         mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
2115 
2116         /* The Swift branch folding logic is completely broken.  At
2117          * trap time, if things are just right, if can mistakedly
2118          * think that a trap is coming from kernel mode when in fact
2119          * it is coming from user mode (it mis-executes the branch in
2120          * the trap code).  So you see things like crashme completely
2121          * hosing your machine which is completely unacceptable.  Turn
2122          * this shit off... nice job Fujitsu.
2123          */
2124         mreg &= ~(SWIFT_BF);
2125         srmmu_set_mmureg(mreg);
2126 }
2127 
2128 #define SWIFT_MASKID_ADDR  0x10003018
2129 void init_swift(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2130 {
2131         unsigned long swift_rev;
2132 
2133         __asm__ __volatile__("lda [%1] %2, %0\n\t"
2134                              "srl %0, 0x18, %0\n\t" :
2135                              "=r" (swift_rev) :
2136                              "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2137         srmmu_name = "Fujitsu Swift";
2138         switch(swift_rev) {
2139         case 0x11:
2140         case 0x20:
2141         case 0x23:
2142         case 0x30:
2143                 srmmu_modtype = Swift_lots_o_bugs;
2144                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2145                 /* Gee george, I wonder why Sun is so hush hush about
2146                  * this hardware bug... really braindamage stuff going
2147                  * on here.  However I think we can find a way to avoid
2148                  * all of the workaround overhead under Linux.  Basically,
2149                  * any page fault can cause kernel pages to become user
2150                  * accessible (the mmu gets confused and clears some of
2151                  * the ACC bits in kernel ptes).  Aha, sounds pretty
2152                  * horrible eh?  But wait, after extensive testing it appears
2153                  * that if you use pgd_t level large kernel pte's (like the
2154                  * 4MB pages on the Pentium) the bug does not get tripped
2155                  * at all.  This avoids almost all of the major overhead.
2156                  * Welcome to a world where your vendor tells you to,
2157                  * "apply this kernel patch" instead of "sorry for the
2158                  * broken hardware, send it back and we'll give you
2159                  * properly functioning parts"
2160                  */
2161                 break;
2162         case 0x25:
2163         case 0x31:
2164                 srmmu_modtype = Swift_bad_c;
2165                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2166                 /* You see Sun allude to this hardware bug but never
2167                  * admit things directly, they'll say things like,
2168                  * "the Swift chip cache problems" or similar.
2169                  */
2170                 break;
2171         default:
2172                 srmmu_modtype = Swift_ok;
2173                 break;
2174         };
2175 
2176         flush_cache_all = swift_flush_cache_all;
2177         flush_cache_mm = swift_flush_cache_mm;
2178         flush_cache_page = swift_flush_cache_page;
2179         flush_cache_range = swift_flush_cache_range;
2180 
2181         flush_tlb_all = swift_flush_tlb_all;
2182         flush_tlb_mm = swift_flush_tlb_mm;
2183         flush_tlb_page = swift_flush_tlb_page;
2184         flush_tlb_range = swift_flush_tlb_range;
2185 
2186         flush_page_to_ram = swift_flush_page_to_ram;
2187         flush_page_for_dma = swift_flush_page_for_dma;
2188         flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
2189         flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
2190 
2191         /* Are you now convinced that the Swift is one of the
2192          * biggest VLSI abortions of all time?  Bravo Fujitsu!
2193          * Fujitsu, the !#?!%$'d up processor people.  I bet if
2194          * you examined the microcode of the Swift you'd find
2195          * XXX's all over the place.
2196          */
2197         poke_srmmu = poke_swift;
2198 }
2199 
2200 void poke_tsunami(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2201 {
2202         unsigned long mreg = srmmu_get_mmureg();
2203 
2204         tsunami_flush_icache();
2205         tsunami_flush_dcache();
2206         mreg &= ~TSUNAMI_ITD;
2207         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2208         srmmu_set_mmureg(mreg);
2209 }
2210 
2211 void init_tsunami(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2212 {
2213         /* Tsunami's pretty sane, Sun and TI actually got it
2214          * somewhat right this time.  Fujitsu should have
2215          * taken some lessons from them.
2216          */
2217 
2218         srmmu_name = "TI Tsunami";
2219         srmmu_modtype = Tsunami;
2220         can_cache_ptables = 1;
2221 
2222         flush_cache_all = tsunami_flush_cache_all;
2223         flush_cache_mm = tsunami_flush_cache_mm;
2224         flush_cache_page = tsunami_flush_cache_page;
2225         flush_cache_range = tsunami_flush_cache_range;
2226 
2227         flush_tlb_all = tsunami_flush_tlb_all;
2228         flush_tlb_mm = tsunami_flush_tlb_mm;
2229         flush_tlb_page = tsunami_flush_tlb_page;
2230         flush_tlb_range = tsunami_flush_tlb_range;
2231 
2232         flush_page_to_ram = tsunami_flush_page_to_ram;
2233         flush_page_for_dma = tsunami_flush_page_for_dma;
2234         flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
2235         flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
2236 
2237         poke_srmmu = poke_tsunami;
2238 }
2239 
2240 void poke_viking(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2241 {
2242         unsigned long mreg = srmmu_get_mmureg();
2243         static int smp_catch = 0;
2244 
2245         if(viking_mxcc_present) {
2246                 unsigned long mxcc_control;
2247 
2248                 __asm__ __volatile__("set -1, %%g2\n\t"
2249                                      "set -1, %%g3\n\t"
2250                                      "stda %%g2, [%1] %2\n\t"
2251                                      "lda [%3] %2, %0\n\t" :
2252                                      "=r" (mxcc_control) :
2253                                      "r" (MXCC_EREG), "i" (ASI_M_MXCC),
2254                                      "r" (MXCC_CREG) : "g2", "g3");
2255                 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2256                 mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
2257                 mreg &= ~(VIKING_PCENABLE);
2258                 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
2259                                      "r" (mxcc_control), "r" (MXCC_CREG),
2260                                      "i" (ASI_M_MXCC));
2261                 srmmu_set_mmureg(mreg);
2262                 mreg |= VIKING_TCENABLE;
2263         } else {
2264                 unsigned long bpreg;
2265 
2266                 mreg &= ~(VIKING_TCENABLE);
2267                 if(smp_catch++) {
2268                         /* Must disable mixed-cmd mode here for
2269                          * other cpu's.
2270                          */
2271                         bpreg = viking_get_bpreg();
2272                         bpreg &= ~(VIKING_ACTION_MIX);
2273                         viking_set_bpreg(bpreg);
2274 
2275                         /* Just in case PROM does something funny. */
2276                         msi_set_sync();
2277                 }
2278         }
2279 
2280         viking_unlock_icache();
2281         viking_flush_icache();
2282 #if 0
2283         viking_unlock_dcache();
2284         viking_flush_dcache();
2285 #endif
2286         mreg |= VIKING_SPENABLE;
2287         mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2288         mreg |= VIKING_SBENABLE;
2289         mreg &= ~(VIKING_ACENABLE);
2290 #if CONFIG_AP1000
2291         mreg &= ~(VIKING_SBENABLE);
2292 #endif
2293 #ifdef __SMP__
2294         mreg &= ~(VIKING_SBENABLE);
2295 #endif
2296         srmmu_set_mmureg(mreg);
2297 }
2298 
2299 void init_viking(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2300 {
2301         unsigned long mreg = srmmu_get_mmureg();
2302 
2303         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
2304 
2305         if(mreg & VIKING_MMODE) {
2306                 unsigned long bpreg;
2307 
2308                 srmmu_name = "TI Viking";
2309                 viking_mxcc_present = 0;
2310                 can_cache_ptables = 0;
2311 
2312                 bpreg = viking_get_bpreg();
2313                 bpreg &= ~(VIKING_ACTION_MIX);
2314                 viking_set_bpreg(bpreg);
2315 
2316                 msi_set_sync();
2317 
2318                 flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
2319         } else {
2320                 srmmu_name = "TI Viking/MXCC";
2321                 viking_mxcc_present = 1;
2322                 can_cache_ptables = 1;
2323                 flush_cache_page_to_uncache = viking_mxcc_flush_page;
2324         }
2325 
2326         flush_cache_all = viking_flush_cache_all;
2327         flush_cache_mm = viking_flush_cache_mm;
2328         flush_cache_page = viking_flush_cache_page;
2329         flush_cache_range = viking_flush_cache_range;
2330 
2331         flush_tlb_all = viking_flush_tlb_all;
2332         flush_tlb_mm = viking_flush_tlb_mm;
2333         flush_tlb_page = viking_flush_tlb_page;
2334         flush_tlb_range = viking_flush_tlb_range;
2335 
2336         flush_page_to_ram = viking_flush_page_to_ram;
2337         flush_page_for_dma = viking_flush_page_for_dma;
2338         flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
2339 
2340         poke_srmmu = poke_viking;
2341 }
2342 
2343 /* Probe for the srmmu chip version. */
2344 static void get_srmmu_type(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2345 {
2346         unsigned long mreg, psr;
2347         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2348 
2349         srmmu_modtype = SRMMU_INVAL_MOD;
2350         hwbug_bitmask = 0;
2351 
2352         mreg = srmmu_get_mmureg(); psr = get_psr();
2353         mod_typ = (mreg & 0xf0000000) >> 28;
2354         mod_rev = (mreg & 0x0f000000) >> 24;
2355         psr_typ = (psr >> 28) & 0xf;
2356         psr_vers = (psr >> 24) & 0xf;
2357 
2358         /* First, check for HyperSparc or Cypress. */
2359         if(mod_typ == 1) {
2360                 switch(mod_rev) {
2361                 case 7:
2362                         /* UP or MP Hypersparc */
2363                         init_hypersparc();
2364                         break;
2365                 case 0:
2366                         /* Uniprocessor Cypress */
2367                         init_cypress_604();
2368                         break;
2369                 case 13:
2370                 case 14:
2371                 case 15:
2372                         /* MP Cypress mmu/cache-controller */
2373                         init_cypress_605(mod_rev);
2374                         break;
2375                 default:
2376                         srmmu_is_bad();
2377                         break;
2378                 };
2379                 return;
2380         }
2381 
2382         /* Next check for Fujitsu Swift. */
2383         if(psr_typ == 0 && psr_vers == 4) {
2384                 init_swift();
2385                 return;
2386         }
2387 
2388         /* Now the Viking family of srmmu. */
2389         if(psr_typ == 4 &&
2390            ((psr_vers == 0) ||
2391             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2392                 init_viking();
2393                 return;
2394         }
2395 
2396         /* Finally the Tsunami. */
2397         if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2398                 init_tsunami();
2399                 return;
2400         }
2401 
2402         /* Oh well */
2403         srmmu_is_bad();
2404 }
2405 
2406 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2407         tsetup_mmu_patchme, rtrap_mmu_patchme;
2408 
2409 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2410         tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2411 
2412 #ifdef __SMP__
2413 extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
2414 #endif
2415 
2416 extern unsigned long srmmu_fault;
2417 
2418 #define PATCH_BRANCH(insn, dest) do { \
2419                 iaddr = &(insn); \
2420                 daddr = &(dest); \
2421                 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2422         } while(0);
2423 
2424 static void patch_window_trap_handlers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2425 {
2426         unsigned long *iaddr, *daddr;
2427         
2428         PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2429         PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2430         PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2431         PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2432 #ifdef __SMP__
2433         PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
2434 #endif
2435         PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2436         PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2437         PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2438 }
2439 
2440 #ifdef __SMP__
2441 /* Local cross-calls. */
2442 static void smp_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2443 {
2444         xc1((smpfunc_t) local_flush_page_for_dma, page);
2445 }
2446 
2447 static void smp_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2448 {
2449         xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
2450 }
2451 
2452 static void smp_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2453 {
2454         xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
2455 }
2456 #endif
2457 
2458 /* Load up routines and constants for sun4m mmu */
2459 void ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2460 {
2461         /* First the constants */
2462         pmd_shift = SRMMU_PMD_SHIFT;
2463         pmd_size = SRMMU_PMD_SIZE;
2464         pmd_mask = SRMMU_PMD_MASK;
2465         pgdir_shift = SRMMU_PGDIR_SHIFT;
2466         pgdir_size = SRMMU_PGDIR_SIZE;
2467         pgdir_mask = SRMMU_PGDIR_MASK;
2468 
2469         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
2470         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
2471         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
2472 
2473         page_none = SRMMU_PAGE_NONE;
2474         page_shared = SRMMU_PAGE_SHARED;
2475         page_copy = SRMMU_PAGE_COPY;
2476         page_readonly = SRMMU_PAGE_RDONLY;
2477         page_kernel = SRMMU_PAGE_KERNEL;
2478         pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2479             
2480         /* Functions */
2481         set_pte = srmmu_set_pte;
2482         switch_to_context = srmmu_switch_to_context;
2483         pmd_align = srmmu_pmd_align;
2484         pgdir_align = srmmu_pgdir_align;
2485         vmalloc_start = srmmu_vmalloc_start;
2486 
2487         pte_page = srmmu_pte_page;
2488         pmd_page = srmmu_pmd_page;
2489         pgd_page = srmmu_pgd_page;
2490 
2491         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
2492 
2493         pte_none = srmmu_pte_none;
2494         pte_present = srmmu_pte_present;
2495         pte_clear = srmmu_pte_clear;
2496 
2497         pmd_none = srmmu_pmd_none;
2498         pmd_bad = srmmu_pmd_bad;
2499         pmd_present = srmmu_pmd_present;
2500         pmd_clear = srmmu_pmd_clear;
2501 
2502         pgd_none = srmmu_pgd_none;
2503         pgd_bad = srmmu_pgd_bad;
2504         pgd_present = srmmu_pgd_present;
2505         pgd_clear = srmmu_pgd_clear;
2506 
2507         mk_pte = srmmu_mk_pte;
2508         pgd_set = srmmu_pgd_set;
2509         mk_pte_io = srmmu_mk_pte_io;
2510         pte_modify = srmmu_pte_modify;
2511         pgd_offset = srmmu_pgd_offset;
2512         pmd_offset = srmmu_pmd_offset;
2513         pte_offset = srmmu_pte_offset;
2514         pte_free_kernel = srmmu_pte_free_kernel;
2515         pmd_free_kernel = srmmu_pmd_free_kernel;
2516         pte_alloc_kernel = srmmu_pte_alloc_kernel;
2517         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
2518         pte_free = srmmu_pte_free;
2519         pte_alloc = srmmu_pte_alloc;
2520         pmd_free = srmmu_pmd_free;
2521         pmd_alloc = srmmu_pmd_alloc;
2522         pgd_free = srmmu_pgd_free;
2523         pgd_alloc = srmmu_pgd_alloc;
2524 
2525         pte_write = srmmu_pte_write;
2526         pte_dirty = srmmu_pte_dirty;
2527         pte_young = srmmu_pte_young;
2528         pte_wrprotect = srmmu_pte_wrprotect;
2529         pte_mkclean = srmmu_pte_mkclean;
2530         pte_mkold = srmmu_pte_mkold;
2531         pte_mkwrite = srmmu_pte_mkwrite;
2532         pte_mkdirty = srmmu_pte_mkdirty;
2533         pte_mkyoung = srmmu_pte_mkyoung;
2534         update_mmu_cache = srmmu_update_mmu_cache;
2535         mmu_exit_hook = srmmu_exit_hook;
2536         mmu_flush_hook = srmmu_flush_hook;
2537         mmu_lockarea = srmmu_lockarea;
2538         mmu_unlockarea = srmmu_unlockarea;
2539 
2540         mmu_get_scsi_one = srmmu_get_scsi_one;
2541         mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
2542         mmu_release_scsi_one = srmmu_release_scsi_one;
2543         mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
2544 
2545         mmu_info = srmmu_mmu_info;
2546         mmu_v2p = srmmu_v2p;
2547         mmu_p2v = srmmu_p2v;
2548 
2549         /* Task struct and kernel stack allocating/freeing. */
2550         alloc_kernel_stack = srmmu_alloc_kernel_stack;
2551         alloc_task_struct = srmmu_alloc_task_struct;
2552         free_kernel_stack = srmmu_free_kernel_stack;
2553         free_task_struct = srmmu_free_task_struct;
2554 
2555         quick_kernel_fault = srmmu_quick_kernel_fault;
2556 
2557         /* SRMMU specific. */
2558         ctxd_set = srmmu_ctxd_set;
2559         pmd_set = srmmu_pmd_set;
2560 
2561         get_srmmu_type();
2562         patch_window_trap_handlers();
2563 
2564 #ifdef __SMP__
2565         /* El switcheroo... */
2566 
2567         local_flush_cache_all = flush_cache_all;
2568         local_flush_cache_mm = flush_cache_mm;
2569         local_flush_cache_range = flush_cache_range;
2570         local_flush_cache_page = flush_cache_page;
2571         local_flush_tlb_all = flush_tlb_all;
2572         local_flush_tlb_mm = flush_tlb_mm;
2573         local_flush_tlb_range = flush_tlb_range;
2574         local_flush_tlb_page = flush_tlb_page;
2575         local_flush_page_to_ram = flush_page_to_ram;
2576         local_flush_page_for_dma = flush_page_for_dma;
2577         local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
2578         local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
2579 
2580         flush_cache_all = smp_flush_cache_all;
2581         flush_cache_mm = smp_flush_cache_mm;
2582         flush_cache_range = smp_flush_cache_range;
2583         flush_cache_page = smp_flush_cache_page;
2584         flush_tlb_all = smp_flush_tlb_all;
2585         flush_tlb_mm = smp_flush_tlb_mm;
2586         flush_tlb_range = smp_flush_tlb_range;
2587         flush_tlb_page = smp_flush_tlb_page;
2588         flush_page_to_ram = smp_flush_page_to_ram;
2589         flush_page_for_dma = smp_flush_page_for_dma;
2590         flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
2591         flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
2592 #endif
2593 }

/* [previous][next][first][last][top][bottom][index][help] */