root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_v2p
  2. srmmu_p2v
  3. srmmu_swap
  4. srmmu_pmd_align
  5. srmmu_pgdir_align
  6. srmmu_vmalloc_start
  7. srmmu_pgd_page
  8. srmmu_pmd_page
  9. srmmu_pte_page
  10. srmmu_pte_none
  11. srmmu_pte_present
  12. srmmu_pte_clear
  13. srmmu_pmd_none
  14. srmmu_pmd_bad
  15. srmmu_pmd_present
  16. srmmu_pmd_clear
  17. srmmu_pgd_none
  18. srmmu_pgd_bad
  19. srmmu_pgd_present
  20. srmmu_pgd_clear
  21. srmmu_pte_write
  22. srmmu_pte_dirty
  23. srmmu_pte_young
  24. srmmu_pte_wrprotect
  25. srmmu_pte_mkclean
  26. srmmu_pte_mkold
  27. srmmu_pte_mkwrite
  28. srmmu_pte_mkdirty
  29. srmmu_pte_mkyoung
  30. srmmu_mk_pte
  31. srmmu_mk_pte_io
  32. srmmu_ctxd_set
  33. srmmu_pgd_set
  34. srmmu_pmd_set
  35. srmmu_pte_modify
  36. srmmu_pgd_offset
  37. srmmu_pmd_offset
  38. srmmu_pte_offset
  39. srmmu_update_rootmmu_dir
  40. srmmu_uncache_page
  41. srmmu_recache_page
  42. srmmu_getpage
  43. srmmu_putpage
  44. srmmu_pte_free_kernel
  45. srmmu_pte_alloc_kernel
  46. srmmu_pmd_free_kernel
  47. srmmu_pmd_alloc_kernel
  48. srmmu_pte_free
  49. srmmu_pte_alloc
  50. srmmu_pmd_free
  51. srmmu_pmd_alloc
  52. srmmu_pgd_free
  53. srmmu_pgd_alloc
  54. srmmu_set_pte
  55. srmmu_quick_kernel_fault
  56. alloc_context
  57. srmmu_switch_to_context
  58. srmmu_mapioaddr
  59. srmmu_lockarea
  60. srmmu_unlockarea
  61. srmmu_alloc_task_struct
  62. srmmu_alloc_kernel_stack
  63. srmmu_free_task_struct
  64. srmmu_free_kernel_stack
  65. tsunami_flush_cache_all
  66. tsunami_flush_cache_mm
  67. tsunami_flush_cache_range
  68. tsunami_flush_cache_page
  69. tsunami_flush_cache_page_to_uncache
  70. tsunami_flush_page_to_ram
  71. tsunami_flush_page_for_dma
  72. tsunami_flush_tlb_all
  73. tsunami_flush_tlb_mm
  74. tsunami_flush_tlb_range
  75. tsunami_flush_tlb_page
  76. tsunami_flush_tlb_page_for_cbit
  77. swift_flush_cache_all
  78. swift_flush_cache_mm
  79. swift_flush_cache_range
  80. swift_flush_cache_page
  81. swift_flush_page_to_ram
  82. swift_flush_page_for_dma
  83. swift_flush_cache_page_to_uncache
  84. swift_flush_tlb_all
  85. swift_flush_tlb_mm
  86. swift_flush_tlb_range
  87. swift_flush_tlb_page
  88. swift_flush_tlb_page_for_cbit
  89. viking_flush_cache_all
  90. viking_flush_cache_mm
  91. viking_flush_cache_range
  92. viking_flush_cache_page
  93. viking_flush_page_to_ram
  94. viking_flush_page_for_dma
  95. viking_mxcc_flush_page
  96. viking_no_mxcc_flush_page
  97. viking_flush_tlb_all
  98. viking_flush_tlb_mm
  99. viking_flush_tlb_range
  100. viking_flush_tlb_page
  101. viking_flush_tlb_page_for_cbit
  102. cypress_flush_tlb_all
  103. cypress_flush_tlb_mm
  104. cypress_flush_tlb_range
  105. cypress_flush_tlb_page
  106. hypersparc_flush_cache_all
  107. hypersparc_flush_cache_mm
  108. hypersparc_flush_cache_range
  109. hypersparc_flush_cache_page
  110. hypersparc_flush_page_to_ram
  111. hypersparc_flush_page_for_dma
  112. hypersparc_flush_cache_page_to_uncache
  113. hypersparc_flush_tlb_all
  114. hypersparc_flush_tlb_mm
  115. hypersparc_flush_tlb_range
  116. hypersparc_flush_tlb_page
  117. hypersparc_flush_tlb_page_for_cbit
  118. hypersparc_ctxd_set
  119. hypersparc_update_rootmmu_dir
  120. hypersparc_set_pte
  121. hypersparc_switch_to_context
  122. srmmu_map_dvma_pages_for_iommu
  123. srmmu_uncache_iommu_page_table
  124. iommu_init
  125. srmmu_get_scsi_one
  126. srmmu_get_scsi_sgl
  127. srmmu_release_scsi_one
  128. srmmu_release_scsi_sgl
  129. srmmu_early_paddr
  130. srmmu_early_pgd_set
  131. srmmu_early_pmd_set
  132. srmmu_early_pgd_page
  133. srmmu_early_pmd_page
  134. srmmu_early_pmd_offset
  135. srmmu_early_pte_offset
  136. srmmu_init_alloc
  137. srmmu_allocate_ptable_skeleton
  138. srmmu_inherit_prom_mappings
  139. srmmu_map_dvma_pages_for_cpu
  140. srmmu_map_kernel
  141. srmmu_paging_init
  142. srmmu_mmu_info
  143. srmmu_update_mmu_cache
  144. srmmu_exit_hook
  145. srmmu_flush_hook
  146. hypersparc_exit_hook
  147. hypersparc_flush_hook
  148. srmmu_is_bad
  149. poke_hypersparc
  150. init_hypersparc
  151. poke_cypress
  152. init_cypress_common
  153. init_cypress_604
  154. init_cypress_605
  155. poke_swift
  156. init_swift
  157. poke_tsunami
  158. init_tsunami
  159. poke_viking
  160. init_viking
  161. get_srmmu_type
  162. patch_window_trap_handlers
  163. smp_flush_page_for_dma
  164. smp_flush_cache_page_to_uncache
  165. smp_flush_tlb_page_for_cbit
  166. ld_mmu_srmmu

   1 /* $Id: srmmu.c,v 1.62 1996/04/25 09:11:47 davem Exp $
   2  * srmmu.c:  SRMMU specific routines for memory management.
   3  *
   4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
   6  * Copyright (C) 1996 Eddie C. Dost    (ecd@pool.informatik.rwth-aachen.de)
   7  */
   8 
   9 #include <linux/config.h>
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 
  13 #include <asm/page.h>
  14 #include <asm/pgtable.h>
  15 #include <asm/io.h>
  16 #include <asm/kdebug.h>
  17 #include <asm/vaddrs.h>
  18 #include <asm/traps.h>
  19 #include <asm/smp.h>
  20 #include <asm/mbus.h>
  21 #include <asm/cache.h>
  22 #include <asm/oplib.h>
  23 #include <asm/sbus.h>
  24 #include <asm/iommu.h>
  25 #include <asm/asi.h>
  26 #include <asm/msi.h>
  27 
  28 /* Now the cpu specific definitions. */
  29 #include <asm/viking.h>
  30 #include <asm/mxcc.h>
  31 #include <asm/ross.h>
  32 #include <asm/tsunami.h>
  33 #include <asm/swift.h>
  34 
  35 enum mbus_module srmmu_modtype;
  36 unsigned int hwbug_bitmask;
  37 int hyper_cache_size;
  38 int hyper_line_size;
  39 
  40 #ifdef __SMP__
  41 extern void smp_capture(void);
  42 extern void smp_release(void);
  43 #else
  44 #define smp_capture()
  45 #define smp_release()
  46 #endif /* !(__SMP__) */
  47 
  48 static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
  49 static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
  50 
  51 static void (*flush_page_for_dma)(unsigned long page);
  52 static void (*flush_cache_page_to_uncache)(unsigned long page);
  53 static void (*flush_tlb_page_for_cbit)(unsigned long page);
  54 #ifdef __SMP__
  55 static void (*local_flush_page_for_dma)(unsigned long page);
  56 static void (*local_flush_cache_page_to_uncache)(unsigned long page);
  57 static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
  58 #endif
  59 
  60 static struct srmmu_stats {
  61         int invall;
  62         int invpg;
  63         int invrnge;
  64         int invmm;
  65 } module_stats;
  66 
  67 static char *srmmu_name;
  68 
  69 ctxd_t *srmmu_ctx_table_phys;
  70 ctxd_t *srmmu_context_table;
  71 
  72 static struct srmmu_trans {
  73         unsigned long vbase;
  74         unsigned long pbase;
  75         int size;
  76 } srmmu_map[SPARC_PHYS_BANKS];
  77 
  78 static int can_cache_ptables = 0;
  79 static int viking_mxcc_present = 0;
  80 
  81 /* Physical memory can be _very_ non-contiguous on the sun4m, especially
  82  * the SS10/20 class machines and with the latest openprom revisions.
  83  * So we have to crunch the free page pool.
  84  */
  85 static inline unsigned long srmmu_v2p(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87         int i;
  88 
  89         for(i=0; srmmu_map[i].size != 0; i++) {
  90                 if(srmmu_map[i].vbase <= vaddr &&
  91                    (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
  92                         return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
  93         }
  94         return 0xffffffffUL;
  95 }
  96 
  97 static inline unsigned long srmmu_p2v(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  98 {
  99         int i;
 100 
 101         for(i=0; srmmu_map[i].size != 0; i++) {
 102                 if(srmmu_map[i].pbase <= paddr &&
 103                    (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
 104                         return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
 105         }
 106         return 0xffffffffUL;
 107 }
 108 
 109 /* In general all page table modifications should use the V8 atomic
 110  * swap instruction.  This insures the mmu and the cpu are in sync
 111  * with respect to ref/mod bits in the page tables.
 112  */
 113 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115 #if CONFIG_AP1000
 116   /* the AP1000 has its memory on bus 8, not 0 like suns do */
 117   if (!(value&0xf0000000))
 118     value |= 0x80000000;
 119   if (value == 0x80000000) value = 0;
 120 #endif
 121         __asm__ __volatile__("swap [%2], %0\n\t" :
 122                              "=&r" (value) :
 123                              "0" (value), "r" (addr));
 124         return value;
 125 }
 126 
 127 /* Functions really use this, not srmmu_swap directly. */
 128 #define srmmu_set_entry(ptr, newentry) \
 129         srmmu_swap((unsigned long *) (ptr), (newentry))
 130 
 131 
 132 /* The very generic SRMMU page table operations. */
 133 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 134 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
 135 
 136 static unsigned long srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         return SRMMU_VMALLOC_START;
 139 }
 140 
 141 static unsigned long srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 142 { return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
 143 
 144 static unsigned long srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 { return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
 146 
 147 static unsigned long srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 { return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
 149 
 150 static int srmmu_pte_none(pte_t pte)          { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 151 static int srmmu_pte_present(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
 153 
 154 static void srmmu_pte_clear(pte_t *ptep)      { set_pte(ptep, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 155 
 156 static int srmmu_pmd_none(pmd_t pmd)          { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 157 static int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 159 
 160 static int srmmu_pmd_present(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 162 
 163 static void srmmu_pmd_clear(pmd_t *pmdp)      { set_pte((pte_t *)pmdp, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 164 
 165 static int srmmu_pgd_none(pgd_t pgd)          { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 166 static int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 167 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
 168 
 169 static int srmmu_pgd_present(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
 171 
 172 static void srmmu_pgd_clear(pgd_t * pgdp)     { set_pte((pte_t *)pgdp, __pte(0)); }
     /* [previous][next][first][last][top][bottom][index][help] */
 173 
 174 static int srmmu_pte_write(pte_t pte)         { return pte_val(pte) & SRMMU_WRITE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 175 static int srmmu_pte_dirty(pte_t pte)         { return pte_val(pte) & SRMMU_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 176 static int srmmu_pte_young(pte_t pte)         { return pte_val(pte) & SRMMU_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 177 
 178 static pte_t srmmu_pte_wrprotect(pte_t pte)   { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
     /* [previous][next][first][last][top][bottom][index][help] */
 179 static pte_t srmmu_pte_mkclean(pte_t pte)     { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 180 static pte_t srmmu_pte_mkold(pte_t pte)       { pte_val(pte) &= ~SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 181 static pte_t srmmu_pte_mkwrite(pte_t pte)     { pte_val(pte) |= SRMMU_WRITE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 182 static pte_t srmmu_pte_mkdirty(pte_t pte)     { pte_val(pte) |= SRMMU_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 183 static pte_t srmmu_pte_mkyoung(pte_t pte)     { pte_val(pte) |= SRMMU_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 184 
 185 /*
 186  * Conversion functions: convert a page and protection to a page entry,
 187  * and a page entry and page directory to the page they refer to.
 188  */
 189 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 { pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
 191 
 192 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
     /* [previous][next][first][last][top][bottom][index][help] */
 193 {
 194         pte_t pte;
 195         pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
 196         return pte;
 197 }
 198 
 199 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 { 
 201         srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
 202 }
 203 
 204 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
 207 }
 208 
 209 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
 212 }
 213 
 214 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
 216 
 217 /* to find an entry in a top-level page table... */
 218 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220         return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 221 }
 222 
 223 /* Find an entry in the second-level page table.. */
 224 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 {
 226         return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 227 }
 228 
 229 /* Find an entry in the third-level page table.. */ 
 230 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 233 }
 234 
 235 /* This must update the context table entry for this process. */
 236 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) 
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         if(tsk->mm->context != NO_CONTEXT)
 239                 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
 240 }
 241 
 242 static inline void srmmu_uncache_page(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 243 {
 244         pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
 245         pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
 246         pte_t *ptep = srmmu_pte_offset(pmdp, addr);
 247 
 248         flush_cache_page_to_uncache(addr);
 249         set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
 250         flush_tlb_page_for_cbit(addr);
 251 }
 252 
 253 static inline void srmmu_recache_page(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
 256         pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
 257         pte_t *ptep = srmmu_pte_offset(pmdp, addr);
 258 
 259         set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
 260         flush_tlb_page_for_cbit(addr);
 261 }
 262 
 263 static inline unsigned long srmmu_getpage(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 264 {
 265         unsigned long page = get_free_page(GFP_KERNEL);
 266 
 267         if (can_cache_ptables)
 268                 return page;
 269 
 270         if(page)
 271                 srmmu_uncache_page(page);
 272         return page;
 273 }
 274 
 275 static inline void srmmu_putpage(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277         if (!can_cache_ptables)
 278                 srmmu_recache_page(page);
 279         free_page(page);
 280 }
 281 
 282 /* The easy versions. */
 283 #define NEW_PGD() (pgd_t *) srmmu_getpage()
 284 #define NEW_PMD() (pmd_t *) srmmu_getpage()
 285 #define NEW_PTE() (pte_t *) srmmu_getpage()
 286 #define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
 287 #define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
 288 #define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
 289 
 290 /*
 291  * Allocate and free page tables. The xxx_kernel() versions are
 292  * used to allocate a kernel page table - this turns on ASN bits
 293  * if any, and marks the page tables reserved.
 294  */
 295 static void srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 296 {
 297         FREE_PTE(pte);
 298 }
 299 
 300 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 303         if(srmmu_pmd_none(*pmd)) {
 304                 pte_t *page = NEW_PTE();
 305                 if(srmmu_pmd_none(*pmd)) {
 306                         if(page) {
 307                                 pmd_set(pmd, page);
 308                                 return page + address;
 309                         }
 310                         pmd_set(pmd, BAD_PAGETABLE);
 311                         return NULL;
 312                 }
 313                 FREE_PTE(page);
 314         }
 315         if(srmmu_pmd_bad(*pmd)) {
 316                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 317                 pmd_set(pmd, BAD_PAGETABLE);
 318                 return NULL;
 319         }
 320         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 321 }
 322 
 323 static void srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         FREE_PMD(pmd);
 326 }
 327 
 328 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 331         if(srmmu_pgd_none(*pgd)) {
 332                 pmd_t *page = NEW_PMD();
 333                 if(srmmu_pgd_none(*pgd)) {
 334                         if(page) {
 335                                 pgd_set(pgd, page);
 336                                 return page + address;
 337                         }
 338                         pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 339                         return NULL;
 340                 }
 341                 FREE_PMD(page);
 342         }
 343         if(srmmu_pgd_bad(*pgd)) {
 344                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 345                 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 346                 return NULL;
 347         }
 348         return (pmd_t *) pgd_page(*pgd) + address;
 349 }
 350 
 351 static void srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353         FREE_PTE(pte);
 354 }
 355 
 356 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 359         if(srmmu_pmd_none(*pmd)) {
 360                 pte_t *page = NEW_PTE();
 361                 if(srmmu_pmd_none(*pmd)) {
 362                         if(page) {
 363                                 pmd_set(pmd, page);
 364                                 return page + address;
 365                         }
 366                         pmd_set(pmd, BAD_PAGETABLE);
 367                         return NULL;
 368                 }
 369                 FREE_PTE(page);
 370         }
 371         if(srmmu_pmd_bad(*pmd)) {
 372                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 373                 pmd_set(pmd, BAD_PAGETABLE);
 374                 return NULL;
 375         }
 376         return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
 377 }
 378 
 379 /* Real three-level page tables on SRMMU. */
 380 static void srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382         FREE_PMD(pmd);
 383 }
 384 
 385 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 386 {
 387         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 388         if(srmmu_pgd_none(*pgd)) {
 389                 pmd_t *page = NEW_PMD();
 390                 if(srmmu_pgd_none(*pgd)) {
 391                         if(page) {
 392                                 pgd_set(pgd, page);
 393                                 return page + address;
 394                         }
 395                         pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 396                         return NULL;
 397                 }
 398                 FREE_PMD(page);
 399         }
 400         if(srmmu_pgd_bad(*pgd)) {
 401                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 402                 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
 403                 return NULL;
 404         }
 405         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 406 }
 407 
 408 static void srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410         FREE_PGD(pgd);
 411 }
 412 
 413 static pgd_t *srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 414 {
 415         return NEW_PGD();
 416 }
 417 
 418 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
 419 {
 420         srmmu_set_entry(ptep, pte_val(pteval));
 421 }
 422 
 423 static void srmmu_quick_kernel_fault(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 424 {
 425         printk("Penguin faults at address %08lx\n", address);
 426         panic("Srmmu bolixed...");
 427 }
 428 
 429 static inline void alloc_context(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 430 {
 431         struct ctx_list *ctxp;
 432 
 433         ctxp = ctx_free.next;
 434         if(ctxp != &ctx_free) {
 435                 remove_from_ctx_list(ctxp);
 436                 add_to_used_ctxlist(ctxp);
 437                 mm->context = ctxp->ctx_number;
 438                 ctxp->ctx_mm = mm;
 439                 return;
 440         }
 441         ctxp = ctx_used.next;
 442         if(ctxp->ctx_mm == current->mm)
 443                 ctxp = ctxp->next;
 444         if(ctxp == &ctx_used)
 445                 panic("out of mmu contexts");
 446         flush_cache_mm(ctxp->ctx_mm);
 447         flush_tlb_mm(ctxp->ctx_mm);
 448         remove_from_ctx_list(ctxp);
 449         add_to_used_ctxlist(ctxp);
 450         ctxp->ctx_mm->context = NO_CONTEXT;
 451         ctxp->ctx_mm = mm;
 452         mm->context = ctxp->ctx_number;
 453 }
 454 
 455 static void srmmu_switch_to_context(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 456 {
 457         /* Kernel threads can execute in any context and so can tasks
 458          * sleeping in the middle of exiting. If this task has already
 459          * been allocated a piece of the mmu realestate, just jump to
 460          * it.
 461          */
 462         if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
 463            (tsk->flags & PF_EXITING))
 464                 return;
 465         if(tsk->mm->context == NO_CONTEXT) {
 466                 alloc_context(tsk->mm);
 467                 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
 468         }
 469         srmmu_set_context(tsk->mm->context);
 470 }
 471 
 472 /* Low level IO area allocation on the SRMMU. */
 473 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
     /* [previous][next][first][last][top][bottom][index][help] */
 474 {
 475         pgd_t *pgdp;
 476         pmd_t *pmdp;
 477         pte_t *ptep;
 478         unsigned long tmp;
 479 
 480         physaddr &= PAGE_MASK;
 481         pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
 482         pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 483         ptep = srmmu_pte_offset(pmdp, virt_addr);
 484         tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 485 
 486         /* I need to test whether this is consistent over all
 487          * sun4m's.  The bus_type represents the upper 4 bits of
 488          * 36-bit physical address on the I/O space lines...
 489          */
 490         tmp |= (bus_type << 28);
 491         if(rdonly)
 492                 tmp |= SRMMU_PRIV_RDONLY;
 493         else
 494                 tmp |= SRMMU_PRIV;
 495         flush_page_to_ram(virt_addr);
 496         srmmu_set_entry(ptep, tmp);
 497         flush_tlb_all();
 498 }
 499 
 500 static char *srmmu_lockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 501 {
 502         return vaddr;
 503 }
 504 
 505 static void srmmu_unlockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 506 {
 507 }
 508 
 509 /* On the SRMMU we do not have the problems with limited tlb entries
 510  * for mapping kernel pages, so we just take things from the free page
 511  * pool.  As a side effect we are putting a little too much pressure
 512  * on the gfp() subsystem.  This setup also makes the logic of the
 513  * iommu mapping code a lot easier as we can transparently handle
 514  * mappings on the kernel stack without any special code as we did
 515  * need on the sun4c.
 516  */
 517 struct task_struct *srmmu_alloc_task_struct(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 518 {
 519         unsigned long page;
 520 
 521         page = get_free_page(GFP_KERNEL);
 522         if(!page)
 523                 return (struct task_struct *) 0;
 524         return (struct task_struct *) page;
 525 }
 526 
 527 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 528 {
 529         unsigned long pages;
 530 
 531         pages = __get_free_pages(GFP_KERNEL, 2, 0);
 532         if(!pages)
 533                 return 0;
 534         memset((void *) pages, 0, (PAGE_SIZE << 2));
 535         return pages;
 536 }
 537 
 538 static void srmmu_free_task_struct(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
 539 {
 540         free_page((unsigned long) tsk);
 541 }
 542 
 543 static void srmmu_free_kernel_stack(unsigned long stack)
     /* [previous][next][first][last][top][bottom][index][help] */
 544 {
 545         free_pages(stack, 2);
 546 }
 547 
 548 /* Tsunami flushes.  It's page level tlb invalidation is not very
 549  * useful at all, you must be in the context that page exists in to
 550  * get a match.
 551  */
 552 static void tsunami_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         flush_user_windows();
 555         tsunami_flush_icache();
 556         tsunami_flush_dcache();
 557 }
 558 
 559 static void tsunami_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 560 {
 561 #ifndef __SMP__
 562         if(mm->context != NO_CONTEXT) {
 563 #endif
 564                 flush_user_windows();
 565                 tsunami_flush_icache();
 566                 tsunami_flush_dcache();
 567 #ifndef __SMP__
 568         }
 569 #endif
 570 }
 571 
 572 static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 573 {
 574 #ifndef __SMP__
 575         if(mm->context != NO_CONTEXT) {
 576 #endif
 577                 flush_user_windows();
 578                 tsunami_flush_icache();
 579                 tsunami_flush_dcache();
 580 #ifndef __SMP__
 581         }
 582 #endif
 583 }
 584 
 585 static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 586 {
 587 #ifndef __SMP__
 588         struct mm_struct *mm = vma->vm_mm;
 589         if(mm->context != NO_CONTEXT) {
 590 #endif
 591                 flush_user_windows();
 592                 tsunami_flush_icache();
 593                 tsunami_flush_dcache();
 594 #ifndef __SMP__
 595         }
 596 #endif
 597 }
 598 
 599 static void tsunami_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 600 {
 601         tsunami_flush_dcache();
 602 }
 603 
 604 /* Tsunami does not have a Copy-back style virtual cache. */
 605 static void tsunami_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 606 {
 607         tsunami_flush_icache();
 608         tsunami_flush_dcache();
 609 }
 610 
 611 /* However, Tsunami is not IO coherent. */
 612 static void tsunami_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 613 {
 614         tsunami_flush_icache();
 615         tsunami_flush_dcache();
 616 }
 617 
 618 /* TLB flushes seem to upset the tsunami sometimes, I can't figure out
 619  * what the hell is going on.  All I see is a tlb flush (page or whole,
 620  * there is no consistent pattern) and then total local variable corruption
 621  * in the procedure who called us after return.  Usually triggerable
 622  * by "cool" programs like crashme and bonnie.  I played around a bit
 623  * and adding a bunch of forced nops seems to make the problems all
 624  * go away. (missed instruction fetches possibly? ugh...)
 625  */
 626 #define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
 627                            nop(); nop(); nop(); nop(); nop(); } while(0)
 628 
 629 static void tsunami_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 630 {
 631         module_stats.invall++;
 632         srmmu_flush_whole_tlb();
 633         TSUNAMI_SUCKS;
 634 }
 635 
 636 static void tsunami_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 637 {
 638         module_stats.invmm++;
 639 #ifndef __SMP__
 640         if(mm->context != NO_CONTEXT) {
 641 #endif
 642                 srmmu_flush_whole_tlb();
 643                 TSUNAMI_SUCKS;
 644 #ifndef __SMP__
 645         }
 646 #endif
 647 }
 648 
 649 static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 650 {
 651         module_stats.invrnge++;
 652 #ifndef __SMP__
 653         if(mm->context != NO_CONTEXT) {
 654 #endif
 655                 srmmu_flush_whole_tlb();
 656                 TSUNAMI_SUCKS;
 657 #ifndef __SMP__
 658         }
 659 #endif
 660 }
 661 
 662 static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 663 {
 664         int octx;
 665         struct mm_struct *mm = vma->vm_mm;
 666 
 667 #ifndef __SMP__
 668         if(mm->context != NO_CONTEXT) {
 669 #endif
 670                 octx = srmmu_get_context();
 671 
 672                 srmmu_set_context(mm->context);
 673                 srmmu_flush_tlb_page(page);
 674                 TSUNAMI_SUCKS;
 675                 srmmu_set_context(octx);
 676 #ifndef __SMP__
 677         }
 678 #endif
 679         module_stats.invpg++;
 680 }
 681 
 682 static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 683 {
 684         srmmu_flush_tlb_page(page);
 685 }
 686 
 687 /* Swift flushes.  It has the recommended SRMMU specification flushing
 688  * facilities, so we can do things in a more fine grained fashion than we
 689  * could on the tsunami.  Let's watch out for HARDWARE BUGS...
 690  */
 691 
 692 static void swift_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 693 {
 694         flush_user_windows();
 695         swift_idflash_clear();
 696 }
 697 
 698 static void swift_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 699 {
 700 #ifndef __SMP__
 701         if(mm->context != NO_CONTEXT) {
 702 #endif
 703                 flush_user_windows();
 704                 swift_idflash_clear();
 705 #ifndef __SMP__
 706         }
 707 #endif
 708 }
 709 
 710 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 711 {
 712 #ifndef __SMP__
 713         if(mm->context != NO_CONTEXT) {
 714 #endif
 715                 flush_user_windows();
 716                 swift_idflash_clear();
 717 #ifndef __SMP__
 718         }
 719 #endif
 720 }
 721 
 722 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 723 {
 724 #ifndef __SMP__
 725         struct mm_struct *mm = vma->vm_mm;
 726         if(mm->context != NO_CONTEXT) {
 727 #endif
 728                 flush_user_windows();
 729                 if(vma->vm_flags & VM_EXEC)
 730                         swift_flush_icache();
 731                 swift_flush_dcache();
 732 #ifndef __SMP__
 733         }
 734 #endif
 735 }
 736 
 737 /* Not copy-back on swift. */
 738 static void swift_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 739 {
 740 }
 741 
 742 /* But not IO coherent either. */
 743 static void swift_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 744 {
 745         swift_flush_dcache();
 746 }
 747 
 748 static void swift_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 749 {
 750         swift_flush_dcache();
 751 }
 752 
 753 static void swift_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 754 {
 755         module_stats.invall++;
 756         srmmu_flush_whole_tlb();
 757 }
 758 
 759 static void swift_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 760 {
 761         module_stats.invmm++;
 762 #ifndef __SMP__
 763         if(mm->context != NO_CONTEXT)
 764 #endif
 765                 srmmu_flush_whole_tlb();
 766 }
 767 
 768 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 769 {
 770         module_stats.invrnge++;
 771 #ifndef __SMP__
 772         if(mm->context != NO_CONTEXT)
 773 #endif
 774                 srmmu_flush_whole_tlb();
 775 }
 776 
 777 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 778 {
 779 #ifndef __SMP__
 780         struct mm_struct *mm = vma->vm_mm;
 781         if(mm->context != NO_CONTEXT)
 782 #endif
 783                 srmmu_flush_whole_tlb();
 784         module_stats.invpg++;
 785 }
 786 
 787 static void swift_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 788 {
 789         srmmu_flush_whole_tlb();
 790 }
 791 
 792 /* The following are all MBUS based SRMMU modules, and therefore could
 793  * be found in a multiprocessor configuration.  On the whole, these
 794  * chips seems to be much more touchy about DVMA and page tables
 795  * with respect to cache coherency.
 796  */
 797 
 798 /* Viking flushes.  For Sun's mainline MBUS processor it is pretty much
 799  * a crappy mmu.  The on-chip I&D caches only have full flushes, no fine
 800  * grained cache invalidations.  It only has these "flash clear" things
 801  * just like the MicroSparcI.  Added to this many revs of the chip are
 802  * teaming with hardware buggery.  Someday maybe we'll do direct
 803  * diagnostic tag accesses for page level flushes as those should
 804  * be painless and will increase performance due to the frequency of
 805  * page level flushes. This is a must to _really_ flush the caches,
 806  * crazy hardware ;-)
 807  */
 808 
 809 static void viking_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 810 {
 811 }
 812 
 813 static void viking_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 814 {
 815 #ifndef __SMP__
 816         if(mm->context != NO_CONTEXT) {
 817 #endif
 818                 flush_user_windows();
 819 #ifndef __SMP__
 820         }
 821 #endif
 822 }
 823 
 824 static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 825 {
 826 #ifndef __SMP__
 827         if(mm->context != NO_CONTEXT) {
 828 #endif
 829                 flush_user_windows();
 830 #ifndef __SMP__
 831         }
 832 #endif
 833 }
 834 
 835 static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 836 {
 837 #ifndef __SMP__
 838         struct mm_struct *mm = vma->vm_mm;
 839         if(mm->context != NO_CONTEXT) {
 840 #endif
 841                 flush_user_windows();
 842 #ifndef __SMP__
 843         }
 844 #endif
 845 }
 846 
 847 /* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
 848 static void viking_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 849 {
 850 }
 851 
 852 /* Viking is IO cache coherent. */
 853 static void viking_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 854 {
 855 }
 856 
 857 static void viking_mxcc_flush_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 858 {
 859         unsigned long ppage = srmmu_hwprobe(page);
 860         unsigned long paddr0, paddr1;
 861 
 862         if (!ppage)
 863                 return;
 864 
 865         paddr0 = (ppage >> 28) | 0x10;          /* Set cacheable bit. */
 866         paddr1 = (ppage << 4) & PAGE_MASK;
 867 
 868         /* Read the page's data through the stream registers,
 869          * and write it back to memory. This will issue
 870          * coherent write invalidates to all other caches, thus
 871          * should also be sufficient in an MP system.
 872          */
 873         __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
 874                               "or %%g0, %1, %%g3\n"
 875                               "1:\n\t"
 876                               "stda %%g2, [%2] %5\n\t"
 877                               "stda %%g2, [%3] %5\n\t"
 878                               "add %%g3, %4, %%g3\n\t"
 879                               "btst 0xfff, %%g3\n\t"
 880                               "bne 1b\n\t"
 881                               "nop\n\t" : :
 882                               "r" (paddr0), "r" (paddr1),
 883                               "r" (MXCC_SRCSTREAM),
 884                               "r" (MXCC_DESSTREAM),
 885                               "r" (MXCC_STREAM_SIZE),
 886                               "i" (ASI_M_MXCC) : "g2", "g3");
 887 
 888         /* This was handcoded after a look at the gcc output from
 889          *
 890          *      do {
 891          *              mxcc_set_stream_src(paddr);
 892          *              mxcc_set_stream_dst(paddr);
 893          *              paddr[1] += MXCC_STREAM_SIZE;
 894          *      } while (paddr[1] & ~PAGE_MASK);
 895          */
 896 }
 897 
 898 static void viking_no_mxcc_flush_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 899 {
 900         unsigned long ppage = srmmu_hwprobe(page) >> 8;
 901         int set, block;
 902         unsigned long ptag[2];
 903         unsigned long vaddr;
 904         int i;
 905 
 906         if (!ppage)
 907                 return;
 908 
 909         for (set = 0; set < 128; set++) {
 910                 for (block = 0; block < 4; block++) {
 911 
 912                         viking_get_dcache_ptag(set, block, ptag);
 913 
 914                         if (ptag[1] != ppage)
 915                                 continue;
 916                         if (!(ptag[0] & VIKING_PTAG_VALID))
 917                                 continue;
 918                         if (!(ptag[0] & VIKING_PTAG_DIRTY))
 919                                 continue;
 920 
 921                         /* There was a great cache from TI
 922                          * with comfort as much as vi,
 923                          * 4 pages to flush,
 924                          * 4 pages, no rush,
 925                          * since anything else makes him die.
 926                          */
 927                         vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
 928                         for (i = 0; i < 8; i++) {
 929                                 __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
 930                                                       "r" (vaddr) : "g2");
 931                                 vaddr += PAGE_SIZE;
 932                         }
 933 
 934                         /* Continue with next set. */
 935                         break;
 936                 }
 937         }
 938 }
 939 
 940 static void viking_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 941 {
 942         module_stats.invall++;
 943         srmmu_flush_whole_tlb();
 944 }
 945 
 946 static void viking_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
 947 {
 948         int octx;
 949         module_stats.invmm++;
 950 
 951 #ifndef __SMP__
 952         if(mm->context != NO_CONTEXT) {
 953 #endif
 954                 octx = srmmu_get_context();
 955                 srmmu_set_context(mm->context);
 956                 srmmu_flush_tlb_ctx();
 957                 srmmu_set_context(octx);
 958 #ifndef __SMP__
 959         }
 960 #endif
 961 }
 962 
 963 static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
 964 {
 965         int octx;
 966         module_stats.invrnge++;
 967 
 968 #ifndef __SMP__
 969         if(mm->context != NO_CONTEXT) {
 970 #endif
 971                 octx = srmmu_get_context();
 972                 srmmu_set_context(mm->context);
 973                 start &= SRMMU_PMD_MASK;
 974                 while(start < end) {
 975                         srmmu_flush_tlb_segment(start);
 976                         start += SRMMU_PMD_SIZE;
 977                 }
 978                 srmmu_set_context(octx);
 979 #ifndef __SMP__
 980         }
 981 #endif
 982 }
 983 
 984 static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 985 {
 986         int octx;
 987         struct mm_struct *mm = vma->vm_mm;
 988 
 989         module_stats.invpg++;
 990 #ifndef __SMP__
 991         if(mm->context != NO_CONTEXT) {
 992 #endif
 993                 octx = srmmu_get_context();
 994                 srmmu_set_context(mm->context);
 995                 srmmu_flush_tlb_page(page);
 996                 srmmu_set_context(octx);
 997 #ifndef __SMP__
 998         }
 999 #endif
1000 }
1001 
1002 static void viking_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1003 {
1004         srmmu_flush_tlb_page(page);
1005 }
1006 
1007 /* Cypress flushes. */
1008 
1009 static void cypress_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1010 {
1011         module_stats.invall++;
1012         srmmu_flush_whole_tlb();
1013 }
1014 
1015 static void cypress_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1016 {
1017         int octx;
1018 
1019         module_stats.invmm++;
1020 #ifndef __SMP__
1021         if(mm->context != NO_CONTEXT) {
1022 #endif
1023                 octx = srmmu_get_context();
1024                 srmmu_set_context(mm->context);
1025                 srmmu_flush_tlb_ctx();
1026                 srmmu_set_context(octx);
1027 #ifndef __SMP__
1028         }
1029 #endif
1030 }
1031 
1032 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1033 {
1034         int octx;
1035 
1036         module_stats.invrnge++;
1037 #ifndef __SMP__
1038         if(mm->context != NO_CONTEXT) {
1039 #endif
1040                 octx = srmmu_get_context();
1041                 srmmu_set_context(mm->context);
1042                 start &= SRMMU_PMD_MASK;
1043                 while(start < end) {
1044                         srmmu_flush_tlb_segment(start);
1045                         start += SRMMU_PMD_SIZE;
1046                 }
1047                 srmmu_set_context(octx);
1048 #ifndef __SMP__
1049         }
1050 #endif
1051 }
1052 
1053 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1054 {
1055         int octx;
1056         struct mm_struct *mm = vma->vm_mm;
1057 
1058         module_stats.invpg++;
1059 #ifndef __SMP__
1060         if(mm->context != NO_CONTEXT) {
1061 #endif
1062                 octx = srmmu_get_context();
1063                 srmmu_set_context(mm->context);
1064                 srmmu_flush_tlb_page(page);
1065                 srmmu_set_context(octx);
1066 #ifndef __SMP__
1067         }
1068 #endif
1069 }
1070 
1071 /* Hypersparc flushes.  Very nice chip... */
1072 static void hypersparc_flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1073 {
1074         flush_user_windows();
1075         hyper_flush_unconditional_combined();
1076         hyper_flush_whole_icache();
1077 }
1078 
1079 static void hypersparc_flush_cache_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1080 {
1081 #ifndef __SMP__
1082         if(mm->context != NO_CONTEXT) {
1083 #endif
1084                 flush_user_windows();
1085                 hyper_flush_unconditional_combined();
1086                 hyper_flush_whole_icache();
1087 #ifndef __SMP__
1088         }
1089 #endif
1090 }
1091 
1092 static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1093 {
1094 #ifndef __SMP__
1095         if(mm->context != NO_CONTEXT) {
1096 #endif
1097                 flush_user_windows();
1098                 hyper_flush_unconditional_combined();
1099                 hyper_flush_whole_icache();
1100 #ifndef __SMP__
1101         }
1102 #endif
1103 }
1104 
1105 /* HyperSparc requires a valid mapping where we are about to flush
1106  * in order to check for a physical tag match during the flush.
1107  */
1108 static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1109 {
1110         struct mm_struct *mm = vma->vm_mm;
1111         volatile unsigned long clear;
1112         int octx;
1113 
1114 #ifndef __SMP__
1115         if(mm->context != NO_CONTEXT) {
1116 #endif
1117                 octx = srmmu_get_context();
1118                 flush_user_windows();
1119                 srmmu_set_context(mm->context);
1120                 hyper_flush_whole_icache();
1121                 if(!srmmu_hwprobe(page))
1122                         goto no_mapping;
1123                 hyper_flush_cache_page(page);
1124         no_mapping:
1125                 clear = srmmu_get_fstatus();
1126                 srmmu_set_context(octx);
1127 #ifndef __SMP__
1128         }
1129 #endif
1130 }
1131 
1132 /* HyperSparc is copy-back. */
1133 static void hypersparc_flush_page_to_ram(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1134 {
1135         volatile unsigned long clear;
1136 
1137         if(srmmu_hwprobe(page))
1138                 hyper_flush_cache_page(page);
1139         clear = srmmu_get_fstatus();
1140 }
1141 
1142 /* HyperSparc is IO cache coherent. */
1143 static void hypersparc_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1144 {
1145         volatile unsigned long clear;
1146 
1147         if(srmmu_hwprobe(page))
1148                 hyper_flush_cache_page(page);
1149         clear = srmmu_get_fstatus();
1150 }
1151 
1152 static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1153 {
1154         volatile unsigned long clear;
1155 
1156         if(srmmu_hwprobe(page))
1157                 hyper_flush_cache_page(page);
1158         clear = srmmu_get_fstatus();
1159 }
1160 
1161 static void hypersparc_flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1162 {
1163         module_stats.invall++;
1164         srmmu_flush_whole_tlb();
1165 }
1166 
1167 static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
1168 {
1169         int octx;
1170 
1171         module_stats.invmm++;
1172 #ifndef __SMP__
1173         if(mm->context != NO_CONTEXT) {
1174 #endif
1175 
1176                 octx = srmmu_get_context();
1177                 srmmu_set_context(mm->context);
1178                 srmmu_flush_tlb_ctx();
1179                 srmmu_set_context(octx);
1180 
1181 #ifndef __SMP__
1182         }
1183 #endif
1184 }
1185 
1186 static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1187 {
1188         int octx;
1189 
1190         module_stats.invrnge++;
1191 #ifndef __SMP__
1192         if(mm->context != NO_CONTEXT) {
1193 #endif
1194 
1195                 octx = srmmu_get_context();
1196                 srmmu_set_context(mm->context);
1197                 start &= SRMMU_PMD_MASK;
1198                 while(start < end) {
1199                         srmmu_flush_tlb_segment(start);
1200                         start += SRMMU_PMD_SIZE;
1201                 }
1202                 srmmu_set_context(octx);
1203 
1204 #ifndef __SMP__
1205         }
1206 #endif
1207 }
1208 
1209 static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1210 {
1211         struct mm_struct *mm = vma->vm_mm;
1212         int octx;
1213 
1214         module_stats.invpg++;
1215 #ifndef __SMP__
1216         if(mm->context != NO_CONTEXT) {
1217 #endif
1218 
1219                 octx = srmmu_get_context();
1220                 srmmu_set_context(mm->context);
1221                 srmmu_flush_tlb_page(page);
1222                 srmmu_set_context(octx);
1223 
1224 #ifndef __SMP__
1225         }
1226 #endif
1227 }
1228 
1229 static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
1230 {
1231         srmmu_flush_tlb_page(page);
1232 }
1233 
1234 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
1235 {
1236         hyper_flush_whole_icache();
1237         srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
1238 }
1239 
1240 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) 
     /* [previous][next][first][last][top][bottom][index][help] */
1241 {
1242         if(tsk->mm->context != NO_CONTEXT) {
1243                 hyper_flush_whole_icache();
1244                 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1245         }
1246 }
1247 
1248 static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
1249 {
1250         /* xor is your friend */
1251         __asm__ __volatile__("rd        %%psr, %%g1\n\t"
1252                              "wr        %%g1, %4, %%psr\n\t"
1253                              "nop; nop; nop;\n\t"
1254                              "swap      [%0], %1\n\t"
1255                              "wr        %%g1, 0x0, %%psr\n\t"
1256                              "nop; nop; nop;\n\t" :
1257                              "=r" (ptep), "=r" (pteval) :
1258                              "0" (ptep), "1" (pteval), "i" (PSR_ET) :
1259                              "g1");
1260 }
1261 
1262 static void hypersparc_switch_to_context(struct task_struct *tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
1263 {
1264         /* Kernel threads can execute in any context and so can tasks
1265          * sleeping in the middle of exiting. If this task has already
1266          * been allocated a piece of the mmu realestate, just jump to
1267          * it.
1268          */
1269         hyper_flush_whole_icache();
1270         if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
1271            (tsk->flags & PF_EXITING))
1272                 return;
1273         if(tsk->mm->context == NO_CONTEXT) {
1274                 alloc_context(tsk->mm);
1275                 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
1276         }
1277         srmmu_set_context(tsk->mm->context);
1278 }
1279 
1280 /* IOMMU things go here. */
1281 
1282 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1283 static unsigned long first_dvma_page, last_dvma_page;
1284 
1285 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
1286 #define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
1287 
1288 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
     /* [previous][next][first][last][top][bottom][index][help] */
1289 {
1290         unsigned long first = first_dvma_page;
1291         unsigned long last = last_dvma_page;
1292         iopte_t *iopte;
1293 
1294         iopte = iommu->page_table;
1295         iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
1296         while(first <= last) {
1297                 iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
1298                 first += PAGE_SIZE;
1299         }
1300 }
1301 
1302 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
     /* [previous][next][first][last][top][bottom][index][help] */
1303 {
1304         pgd_t *pgdp;
1305         pmd_t *pmdp;
1306         pte_t *ptep;
1307         unsigned long end = start + size;
1308 
1309         while(start < end) {
1310                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1311                 pmdp = srmmu_pmd_offset(pgdp, start);
1312                 ptep = srmmu_pte_offset(pmdp, start);
1313                 pte_val(*ptep) &= ~SRMMU_CACHE;
1314                 start += PAGE_SIZE;
1315         }
1316 }
1317 
1318 unsigned long iommu_init(int iommund, unsigned long memory_start,
     /* [previous][next][first][last][top][bottom][index][help] */
1319                          unsigned long memory_end, struct linux_sbus *sbus)
1320 {
1321         int impl, vers, ptsize;
1322         unsigned long tmp;
1323         struct iommu_struct *iommu;
1324         struct linux_prom_registers iommu_promregs[PROMREG_MAX];
1325 
1326         memory_start = LONG_ALIGN(memory_start);
1327         iommu = (struct iommu_struct *) memory_start;
1328         memory_start += sizeof(struct iommu_struct);
1329         prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
1330         iommu->regs = (struct iommu_regs *)
1331                 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
1332                                "IOMMU registers", iommu_promregs[0].which_io, 0x0);
1333         if(!iommu->regs)
1334                 panic("Cannot map IOMMU registers.");
1335         impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
1336         vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
1337         tmp = iommu->regs->control;
1338         tmp &= ~(IOMMU_CTRL_RNGE);
1339         tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
1340         iommu->regs->control = tmp;
1341         iommu_invalidate(iommu->regs);
1342         iommu->plow = iommu->start = 0xfc000000;
1343         iommu->end = 0xffffffff;
1344 
1345         /* Allocate IOMMU page table */
1346         ptsize = iommu->end - iommu->start + 1;
1347         ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
1348 
1349         /* Stupid alignment constraints give me a headache. */
1350         memory_start = PAGE_ALIGN(memory_start);
1351         memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
1352         iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
1353         memory_start += ptsize;
1354 
1355         /* Initialize new table. */
1356         flush_cache_all();
1357         srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
1358         flush_tlb_all();
1359         memset(iommu->page_table, 0, ptsize);
1360         srmmu_map_dvma_pages_for_iommu(iommu);
1361         iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
1362         iommu_invalidate(iommu->regs);
1363 
1364         sbus->iommu = iommu;
1365         printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
1366                impl, vers, iommu->page_table, ptsize);
1367         return memory_start;
1368 }
1369 
1370 static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1371 {
1372         struct iommu_struct *iommu = sbus->iommu;
1373         unsigned long page = (unsigned long) vaddr;
1374         unsigned long start, end, offset;
1375         iopte_t *iopte;
1376 
1377         offset = page & ~PAGE_MASK;
1378         page &= PAGE_MASK;
1379 
1380         start = iommu->plow;
1381         end = KADB_DEBUGGER_BEGVM; /* Don't step on kadb/prom. */
1382         iopte = iommu->lowest;
1383         while(start < end) {
1384                 if(!(iopte_val(*iopte) & IOPTE_VALID))
1385                         break;
1386                 iopte++;
1387                 start += PAGE_SIZE;
1388         }
1389 
1390         flush_page_for_dma(page);
1391         vaddr = (char *) (start | offset);
1392         iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1393         iommu_invalidate_page(iommu->regs, start);
1394         iommu->lowest = iopte + 1;
1395         iommu->plow = start + PAGE_SIZE;
1396 
1397         return vaddr;
1398 }
1399 
1400 static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1401 {
1402         struct iommu_struct *iommu = sbus->iommu;
1403         unsigned long page, start, end, offset;
1404         iopte_t *iopte = iommu->lowest;
1405 
1406         start = iommu->plow;
1407         end = KADB_DEBUGGER_BEGVM;
1408         while(sz >= 0) {
1409                 page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
1410                 offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
1411                 while(start < end) {
1412                         if(!(iopte_val(*iopte) & IOPTE_VALID))
1413                                 break;
1414                         iopte++;
1415                         start += PAGE_SIZE;
1416                 }
1417                 if(start == KADB_DEBUGGER_BEGVM)
1418                         panic("Wheee, iomapping overflow.");
1419                 flush_page_for_dma(page);
1420                 sg[sz].alt_addr = (char *) (start | offset);
1421                 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1422                 iommu_invalidate_page(iommu->regs, start);
1423                 iopte++;
1424                 start += PAGE_SIZE;
1425                 sz--;
1426         }
1427         iommu->lowest = iopte;
1428         iommu->plow = start;
1429 }
1430 
1431 static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1432 {
1433         struct iommu_struct *iommu = sbus->iommu;
1434         unsigned long page = (unsigned long) vaddr;
1435         iopte_t *iopte;
1436 
1437         if(len > PAGE_SIZE)
1438                 panic("Can only handle page sized IOMMU mappings.");
1439         page &= PAGE_MASK;
1440         iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1441         iopte_val(*iopte) = 0;
1442         iommu_invalidate_page(iommu->regs, page);
1443         if(iopte < iommu->lowest) {
1444                 iommu->lowest = iopte;
1445                 iommu->plow = page;
1446         }
1447 }
1448 
1449 static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
     /* [previous][next][first][last][top][bottom][index][help] */
1450 {
1451         struct iommu_struct *iommu = sbus->iommu;
1452         unsigned long page;
1453         iopte_t *iopte;
1454 
1455         while(sz >= 0) {
1456                 page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
1457                 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1458                 iopte_val(*iopte) = 0;
1459                 iommu_invalidate_page(iommu->regs, page);
1460                 if(iopte < iommu->lowest) {
1461                         iommu->lowest = iopte;
1462                         iommu->plow = page;
1463                 }
1464                 sg[sz].alt_addr = 0;
1465                 sz--;
1466         }
1467 }
1468 
1469 static unsigned long mempool;
1470 
1471 /* NOTE: All of this startup code assumes the low 16mb (approx.) of
1472  *       kernel mappings are done with one single contiguous chunk of
1473  *       ram.  On small ram machines (classics mainly) we only get
1474  *       around 8mb mapped for us.
1475  */
1476 
1477 static unsigned long kbpage;
1478 
1479 /* Some dirty hacks to abstract away the painful boot up init. */
1480 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
1481 {
1482         return ((vaddr - PAGE_OFFSET) + kbpage);
1483 }
1484 
1485 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
1486 {
1487         srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
1488 }
1489 
1490 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
1491 {
1492         srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
1493 }
1494 
1495 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
1496 {
1497         return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1498 }
1499 
1500 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
1501 {
1502         return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1503 }
1504 
1505 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1506 {
1507         return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1508 }
1509 
1510 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
1511 {
1512         return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1513 }
1514 
1515 /* Allocate a block of RAM which is aligned to its size.
1516  * This procedure can be used until the call to mem_init().
1517  */
1518 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
1519 {
1520         unsigned long mask = size - 1;
1521         unsigned long ret;
1522 
1523         if(!size)
1524                 return 0x0;
1525         if(size & mask) {
1526                 prom_printf("panic: srmmu_init_alloc botch\n");
1527                 prom_halt();
1528         }
1529         ret = (*kbrk + mask) & ~mask;
1530         *kbrk = ret + size;
1531         memset((void*) ret, 0, size);
1532         return (void*) ret;
1533 }
1534 
1535 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1536 {
1537         pgd_t *pgdp;
1538         pmd_t *pmdp;
1539         pte_t *ptep;
1540 
1541         while(start < end) {
1542                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1543                 if(srmmu_pgd_none(*pgdp)) {
1544                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1545                         srmmu_early_pgd_set(pgdp, pmdp);
1546                 }
1547                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1548                 if(srmmu_pmd_none(*pmdp)) {
1549                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1550                         srmmu_early_pmd_set(pmdp, ptep);
1551                 }
1552                 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1553         }
1554 }
1555 
1556 /* This is much cleaner than poking around physical address space
1557  * looking at the prom's page table directly which is what most
1558  * other OS's do.  Yuck... this is much better.
1559  */
1560 void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1561 {
1562         pgd_t *pgdp;
1563         pmd_t *pmdp;
1564         pte_t *ptep;
1565         int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1566         unsigned long prompte;
1567 
1568         while(start <= end) {
1569                 if (start == 0)
1570                         break; /* probably wrap around */
1571                 if(start == 0xfef00000)
1572                         start = KADB_DEBUGGER_BEGVM;
1573                 if(!(prompte = srmmu_hwprobe(start))) {
1574                         start += PAGE_SIZE;
1575                         continue;
1576                 }
1577     
1578                 /* A red snapper, see what it really is. */
1579                 what = 0;
1580     
1581                 if(!(start & ~(SRMMU_PMD_MASK))) {
1582                         if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1583                                 what = 1;
1584                 }
1585     
1586                 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1587                         if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1588                            prompte)
1589                                 what = 2;
1590                 }
1591     
1592                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1593                 if(what == 2) {
1594                         pgd_val(*pgdp) = prompte;
1595                         start += SRMMU_PGDIR_SIZE;
1596                         continue;
1597                 }
1598                 if(srmmu_pgd_none(*pgdp)) {
1599                         pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1600                         srmmu_early_pgd_set(pgdp, pmdp);
1601                 }
1602                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1603                 if(what == 1) {
1604                         pmd_val(*pmdp) = prompte;
1605                         start += SRMMU_PMD_SIZE;
1606                         continue;
1607                 }
1608                 if(srmmu_pmd_none(*pmdp)) {
1609                         ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1610                         srmmu_early_pmd_set(pmdp, ptep);
1611                 }
1612                 ptep = srmmu_early_pte_offset(pmdp, start);
1613                 pte_val(*ptep) = prompte;
1614                 start += PAGE_SIZE;
1615         }
1616 }
1617 
1618 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
     /* [previous][next][first][last][top][bottom][index][help] */
1619 {
1620         unsigned long start;
1621         pgprot_t dvma_prot;
1622         pgd_t *pgdp;
1623         pmd_t *pmdp;
1624         pte_t *ptep;
1625 
1626         start = DVMA_VADDR;
1627         if (viking_mxcc_present)
1628                 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
1629         else
1630                 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1631         while(first <= last) {
1632                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1633                 pmdp = srmmu_pmd_offset(pgdp, start);
1634                 ptep = srmmu_pte_offset(pmdp, start);
1635 
1636                 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1637 
1638                 first += PAGE_SIZE;
1639                 start += PAGE_SIZE;
1640         }
1641 
1642         /* Uncache DVMA pages. */
1643         if (!viking_mxcc_present) {
1644                 first = first_dvma_page;
1645                 last = last_dvma_page;
1646                 while(first <= last) {
1647                         pgdp = srmmu_pgd_offset(init_task.mm, first);
1648                         pmdp = srmmu_pmd_offset(pgdp, first);
1649                         ptep = srmmu_pte_offset(pmdp, first);
1650                         pte_val(*ptep) &= ~SRMMU_CACHE;
1651                         first += PAGE_SIZE;
1652                 }
1653         }
1654 }
1655 
1656 static void srmmu_map_kernel(unsigned long start, unsigned long end)
     /* [previous][next][first][last][top][bottom][index][help] */
1657 {
1658         unsigned long last_page;
1659         int srmmu_bank, phys_bank, i;
1660         pgd_t *pgdp;
1661         pmd_t *pmdp;
1662         pte_t *ptep;
1663 
1664         end = PAGE_ALIGN(end);
1665 
1666         if(start == (KERNBASE + PAGE_SIZE)) {
1667                 unsigned long pte;
1668                 unsigned long tmp;
1669 
1670                 pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
1671                 pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
1672                 ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
1673 
1674                 /* Put a real mapping in for the KERNBASE page. */
1675                 tmp = kbpage;
1676                 pte = (tmp) >> 4;
1677                 pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1678                 pte_val(*ptep) = pte;
1679         }
1680 
1681         /* Copy over mappings prom already gave us. */
1682         last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1683         while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
1684                 unsigned long tmp;
1685 
1686                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1687                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1688                 ptep = srmmu_early_pte_offset(pmdp, start);
1689                 tmp = srmmu_hwprobe(start);
1690                 tmp &= ~(0xff);
1691                 tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1692                 pte_val(*ptep) = tmp;
1693                 start += PAGE_SIZE;
1694                 tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1695 
1696                 /* Never a cross bank boundary, thank you. */
1697                 if(tmp != last_page + PAGE_SIZE)
1698                         break;
1699                 last_page = tmp;
1700         }
1701 
1702         /* Ok, that was assumed to be one full bank, begin
1703          * construction of srmmu_map[].
1704          */
1705         for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
1706                 if(kbpage >= sp_banks[phys_bank].base_addr &&
1707                    (kbpage <
1708                     (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
1709                         break; /* found it */
1710         }
1711         srmmu_bank = 0;
1712         srmmu_map[srmmu_bank].vbase = KERNBASE;
1713         srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
1714         srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
1715         if(kbpage != sp_banks[phys_bank].base_addr) {
1716                 prom_printf("Detected PenguinPages, getting out of here.\n");
1717                 prom_halt();
1718 #if 0
1719                 srmmu_map[srmmu_bank].pbase = kbpage;
1720                 srmmu_map[srmmu_bank].size -=
1721                         (kbpage - sp_banks[phys_bank].base_addr);
1722 #endif
1723         }
1724         /* Prom didn't map all of this first bank, fill
1725          * in the rest by hand.
1726          */
1727         while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
1728                 unsigned long pteval;
1729 
1730                 pgdp = srmmu_pgd_offset(init_task.mm, start);
1731                 pmdp = srmmu_early_pmd_offset(pgdp, start);
1732                 ptep = srmmu_early_pte_offset(pmdp, start);
1733 
1734                 pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
1735                 pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1736                 pte_val(*ptep) = pteval;
1737                 start += PAGE_SIZE;
1738         }
1739 
1740         /* Mark this sp_bank invalid... */
1741         sp_banks[phys_bank].base_addr |= 1;
1742         srmmu_bank++;
1743 
1744         /* Now, deal with what is left. */
1745         while(start < end) {
1746                 unsigned long baddr;
1747                 int btg;
1748 
1749                 /* Find a usable cluster of physical ram. */
1750                 for(i=0; sp_banks[i].num_bytes != 0; i++)
1751                         if(!(sp_banks[i].base_addr & 1))
1752                                 break;
1753                 if(sp_banks[i].num_bytes == 0)
1754                         break;
1755 
1756                 /* Add it to srmmu_map */
1757                 srmmu_map[srmmu_bank].vbase = start;
1758                 srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
1759                 srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
1760                 srmmu_bank++;
1761 
1762                 btg = sp_banks[i].num_bytes;
1763                 baddr = sp_banks[i].base_addr;
1764                 while(btg) {
1765                         pgdp = srmmu_pgd_offset(init_task.mm, start);
1766                         pmdp = srmmu_early_pmd_offset(pgdp, start);
1767                         ptep = srmmu_early_pte_offset(pmdp, start);
1768                         pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1769                         pte_val(*ptep) |= (baddr >> 4);
1770 
1771                         baddr += PAGE_SIZE;
1772                         start += PAGE_SIZE;
1773                         btg -= PAGE_SIZE;
1774                 }
1775                 sp_banks[i].base_addr |= 1;
1776         }
1777         if(start < end) {
1778                 prom_printf("weird, didn't use all of physical memory... ");
1779                 prom_halt();
1780         }
1781         for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
1782                 sp_banks[phys_bank].base_addr &= ~1;
1783 #if 0
1784         for(i = 0; srmmu_map[i].size != 0; i++) {
1785                 prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
1786                             i, srmmu_map[i].vbase,
1787                             srmmu_map[i].pbase, srmmu_map[i].size);
1788         }
1789         prom_getchar();
1790         for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1791                 prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
1792                             i,
1793                             sp_banks[i].base_addr,
1794                             sp_banks[i].num_bytes);
1795         }
1796         prom_getchar();
1797         prom_halt();
1798 #endif
1799 }
1800 
1801 /* Paging initialization on the Sparc Reference MMU. */
1802 extern unsigned long free_area_init(unsigned long, unsigned long);
1803 extern unsigned long sparc_context_init(unsigned long, int);
1804 
1805 extern int physmem_mapped_contig;
1806 extern int linux_num_cpus;
1807 
1808 void (*poke_srmmu)(void);
1809 
1810 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1811 {
1812         unsigned long ptables_start, first_mapped_page;
1813         int i, cpunode;
1814         char node_str[128];
1815         pgd_t *pgdp;
1816         pmd_t *pmdp;
1817         pte_t *ptep;
1818 
1819         physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
1820 
1821 #if CONFIG_AP1000
1822         printk("Forcing num_contexts to 1024\n");
1823         num_contexts = 1024;
1824 #else
1825         /* Find the number of contexts on the srmmu. */
1826         cpunode = prom_getchild(prom_root_node);
1827         num_contexts = 0;
1828         while((cpunode = prom_getsibling(cpunode)) != 0) {
1829                 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1830                 if(!strcmp(node_str, "cpu")) {
1831                         num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1832                         break;
1833                 }
1834         }
1835 #endif
1836         if(!num_contexts) {
1837                 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1838                 prom_halt();
1839         }
1840                 
1841         ptables_start = mempool = PAGE_ALIGN(start_mem);
1842         memset(swapper_pg_dir, 0, PAGE_SIZE);
1843         first_mapped_page = KERNBASE;
1844         kbpage = srmmu_hwprobe(KERNBASE);
1845         if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
1846                 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1847                 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1848                 kbpage -= PAGE_SIZE;
1849                 first_mapped_page += PAGE_SIZE;
1850         } else
1851                 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1852 
1853         srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1854 #if CONFIG_SUN_IO
1855         srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1856         srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1857 #endif
1858 
1859         /* Steal DVMA pages now, I still don't like how we waste all this. */
1860         mempool = PAGE_ALIGN(mempool);
1861         first_dvma_page = mempool;
1862         last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1863         mempool = last_dvma_page + PAGE_SIZE;
1864 
1865 #if CONFIG_AP1000
1866         ap_inherit_mappings();
1867 #else
1868         srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1869 #endif
1870         srmmu_map_kernel(first_mapped_page, end_mem);
1871 #if CONFIG_SUN_IO
1872         srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1873 #endif
1874         srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1875         srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1876         for(i = 0; i < num_contexts; i++)
1877                 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1878 
1879         start_mem = PAGE_ALIGN(mempool);
1880 
1881         /* Some SRMMU's are _very_ stupid indeed. */
1882         if(!can_cache_ptables) {
1883                 for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
1884                         pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
1885                         pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
1886                         ptep = srmmu_early_pte_offset(pmdp, ptables_start);
1887                         pte_val(*ptep) &= ~SRMMU_CACHE;
1888                 }
1889 
1890                 pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
1891                 pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
1892                 ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
1893                 pte_val(*ptep) &= ~SRMMU_CACHE;
1894         }
1895 
1896         flush_cache_all();
1897         srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1898         flush_tlb_all();
1899         poke_srmmu();
1900 
1901         start_mem = sparc_context_init(start_mem, num_contexts);
1902         start_mem = free_area_init(start_mem, end_mem);
1903 
1904         return PAGE_ALIGN(start_mem);
1905 }
1906 
1907 static char srmmuinfo[512];
1908 
1909 static char *srmmu_mmu_info(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1910 {
1911         sprintf(srmmuinfo, "MMU type\t: %s\n"
1912                 "invall\t\t: %d\n"
1913                 "invmm\t\t: %d\n"
1914                 "invrnge\t\t: %d\n"
1915                 "invpg\t\t: %d\n"
1916                 "contexts\t: %d\n"
1917                 "big_chunks\t: %d\n"
1918                 "little_chunks\t: %d\n",
1919                 srmmu_name,
1920                 module_stats.invall,
1921                 module_stats.invmm,
1922                 module_stats.invrnge,
1923                 module_stats.invpg,
1924                 num_contexts,
1925 #if 0
1926                 num_big_chunks,
1927                 num_little_chunks
1928 #else
1929                 0, 0
1930 #endif
1931                 );
1932         return srmmuinfo;
1933 }
1934 
1935 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
1936 {
1937 }
1938 
1939 static void srmmu_exit_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1940 {
1941         struct ctx_list *ctx_old;
1942         struct mm_struct *mm = current->mm;
1943 
1944         if(mm->context != NO_CONTEXT) {
1945                 flush_cache_mm(mm);
1946                 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1947                 flush_tlb_mm(mm);
1948                 ctx_old = ctx_list_pool + mm->context;
1949                 remove_from_ctx_list(ctx_old);
1950                 add_to_free_ctxlist(ctx_old);
1951                 mm->context = NO_CONTEXT;
1952         }
1953 }
1954 
1955 static void srmmu_flush_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1956 {
1957         if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1958                 alloc_context(current->mm);
1959                 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1960                 srmmu_set_context(current->mm->context);
1961         }
1962 }
1963 
1964 static void hypersparc_exit_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1965 {
1966         struct ctx_list *ctx_old;
1967         struct mm_struct *mm = current->mm;
1968 
1969         if(mm->context != NO_CONTEXT) {
1970                 /* HyperSparc is copy-back, any data for this
1971                  * process in a modified cache line is stale
1972                  * and must be written back to main memory now
1973                  * else we eat shit later big time.
1974                  */
1975                 flush_cache_mm(mm);
1976                 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1977                 flush_tlb_mm(mm);
1978                 ctx_old = ctx_list_pool + mm->context;
1979                 remove_from_ctx_list(ctx_old);
1980                 add_to_free_ctxlist(ctx_old);
1981                 mm->context = NO_CONTEXT;
1982         }
1983 }
1984 
1985 static void hypersparc_flush_hook(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1986 {
1987         if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1988                 alloc_context(current->mm);
1989                 flush_cache_mm(current->mm);
1990                 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1991                 srmmu_set_context(current->mm->context);
1992         }
1993 }
1994 
1995 /* Init various srmmu chip types. */
1996 void srmmu_is_bad(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1997 {
1998         prom_printf("Could not determine SRMMU chip type.\n");
1999         prom_halt();
2000 }
2001 
2002 void poke_hypersparc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2003 {
2004         volatile unsigned long clear;
2005         unsigned long mreg = srmmu_get_mmureg();
2006 
2007         hyper_flush_unconditional_combined();
2008 
2009         mreg &= ~(HYPERSPARC_CWENABLE);
2010         mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2011         mreg |= (HYPERSPARC_CMODE);
2012 
2013         srmmu_set_mmureg(mreg);
2014         hyper_clear_all_tags();
2015 
2016         put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2017         hyper_flush_whole_icache();
2018         clear = srmmu_get_faddr();
2019         clear = srmmu_get_fstatus();
2020 }
2021 
2022 void init_hypersparc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2023 {
2024         unsigned long mreg = srmmu_get_mmureg();
2025 
2026         srmmu_name = "ROSS HyperSparc";
2027         can_cache_ptables = 0;
2028         if(mreg & HYPERSPARC_CSIZE) {
2029                 hyper_cache_size = (256 * 1024);
2030                 hyper_line_size = 64;
2031         } else {
2032                 hyper_cache_size = (128 * 1024);
2033                 hyper_line_size = 32;
2034         }
2035 
2036         flush_cache_all = hypersparc_flush_cache_all;
2037         flush_cache_mm = hypersparc_flush_cache_mm;
2038         flush_cache_range = hypersparc_flush_cache_range;
2039         flush_cache_page = hypersparc_flush_cache_page;
2040 
2041         flush_tlb_all = hypersparc_flush_tlb_all;
2042         flush_tlb_mm = hypersparc_flush_tlb_mm;
2043         flush_tlb_range = hypersparc_flush_tlb_range;
2044         flush_tlb_page = hypersparc_flush_tlb_page;
2045 
2046         flush_page_to_ram = hypersparc_flush_page_to_ram;
2047         flush_page_for_dma = hypersparc_flush_page_for_dma;
2048         flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
2049         flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
2050 
2051         ctxd_set = hypersparc_ctxd_set;
2052         switch_to_context = hypersparc_switch_to_context;
2053         mmu_exit_hook = hypersparc_exit_hook;
2054         mmu_flush_hook = hypersparc_flush_hook;
2055         sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
2056         set_pte = hypersparc_set_pte;
2057         poke_srmmu = poke_hypersparc;
2058 }
2059 
2060 void poke_cypress(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2061 {
2062         unsigned long mreg = srmmu_get_mmureg();
2063 
2064         mreg &= ~CYPRESS_CMODE;
2065         mreg |= CYPRESS_CENABLE;
2066         srmmu_set_mmureg(mreg);
2067 }
2068 
2069 void init_cypress_common(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2070 {
2071         can_cache_ptables = 0;
2072         flush_tlb_all = cypress_flush_tlb_all;
2073         flush_tlb_mm = cypress_flush_tlb_mm;
2074         flush_tlb_page = cypress_flush_tlb_page;
2075         flush_tlb_range = cypress_flush_tlb_range;
2076         poke_srmmu = poke_cypress;
2077 
2078         /* XXX Need to write cache flushes for this one... XXX */
2079 
2080 }
2081 
2082 void init_cypress_604(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2083 {
2084         srmmu_name = "ROSS Cypress-604(UP)";
2085         srmmu_modtype = Cypress;
2086         init_cypress_common();
2087 }
2088 
2089 void init_cypress_605(unsigned long mrev)
     /* [previous][next][first][last][top][bottom][index][help] */
2090 {
2091         srmmu_name = "ROSS Cypress-605(MP)";
2092         if(mrev == 0xe) {
2093                 srmmu_modtype = Cypress_vE;
2094                 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2095         } else {
2096                 if(mrev == 0xd) {
2097                         srmmu_modtype = Cypress_vD;
2098                         hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2099                 } else {
2100                         srmmu_modtype = Cypress;
2101                 }
2102         }
2103         init_cypress_common();
2104 }
2105 
2106 void poke_swift(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2107 {
2108         unsigned long mreg = srmmu_get_mmureg();
2109 
2110         /* Clear any crap from the cache or else... */
2111         swift_idflash_clear();
2112         mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
2113 
2114         /* The Swift branch folding logic is completely broken.  At
2115          * trap time, if things are just right, if can mistakenly
2116          * think that a trap is coming from kernel mode when in fact
2117          * it is coming from user mode (it mis-executes the branch in
2118          * the trap code).  So you see things like crashme completely
2119          * hosing your machine which is completely unacceptable.  Turn
2120          * this shit off... nice job Fujitsu.
2121          */
2122         mreg &= ~(SWIFT_BF);
2123         srmmu_set_mmureg(mreg);
2124 }
2125 
2126 #define SWIFT_MASKID_ADDR  0x10003018
2127 void init_swift(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2128 {
2129         unsigned long swift_rev;
2130 
2131         __asm__ __volatile__("lda [%1] %2, %0\n\t"
2132                              "srl %0, 0x18, %0\n\t" :
2133                              "=r" (swift_rev) :
2134                              "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2135         srmmu_name = "Fujitsu Swift";
2136         switch(swift_rev) {
2137         case 0x11:
2138         case 0x20:
2139         case 0x23:
2140         case 0x30:
2141                 srmmu_modtype = Swift_lots_o_bugs;
2142                 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2143                 /* Gee george, I wonder why Sun is so hush hush about
2144                  * this hardware bug... really braindamage stuff going
2145                  * on here.  However I think we can find a way to avoid
2146                  * all of the workaround overhead under Linux.  Basically,
2147                  * any page fault can cause kernel pages to become user
2148                  * accessible (the mmu gets confused and clears some of
2149                  * the ACC bits in kernel ptes).  Aha, sounds pretty
2150                  * horrible eh?  But wait, after extensive testing it appears
2151                  * that if you use pgd_t level large kernel pte's (like the
2152                  * 4MB pages on the Pentium) the bug does not get tripped
2153                  * at all.  This avoids almost all of the major overhead.
2154                  * Welcome to a world where your vendor tells you to,
2155                  * "apply this kernel patch" instead of "sorry for the
2156                  * broken hardware, send it back and we'll give you
2157                  * properly functioning parts"
2158                  */
2159                 break;
2160         case 0x25:
2161         case 0x31:
2162                 srmmu_modtype = Swift_bad_c;
2163                 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2164                 /* You see Sun allude to this hardware bug but never
2165                  * admit things directly, they'll say things like,
2166                  * "the Swift chip cache problems" or similar.
2167                  */
2168                 break;
2169         default:
2170                 srmmu_modtype = Swift_ok;
2171                 break;
2172         };
2173 
2174         flush_cache_all = swift_flush_cache_all;
2175         flush_cache_mm = swift_flush_cache_mm;
2176         flush_cache_page = swift_flush_cache_page;
2177         flush_cache_range = swift_flush_cache_range;
2178 
2179         flush_tlb_all = swift_flush_tlb_all;
2180         flush_tlb_mm = swift_flush_tlb_mm;
2181         flush_tlb_page = swift_flush_tlb_page;
2182         flush_tlb_range = swift_flush_tlb_range;
2183 
2184         flush_page_to_ram = swift_flush_page_to_ram;
2185         flush_page_for_dma = swift_flush_page_for_dma;
2186         flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
2187         flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
2188 
2189         /* Are you now convinced that the Swift is one of the
2190          * biggest VLSI abortions of all time?  Bravo Fujitsu!
2191          * Fujitsu, the !#?!%$'d up processor people.  I bet if
2192          * you examined the microcode of the Swift you'd find
2193          * XXX's all over the place.
2194          */
2195         poke_srmmu = poke_swift;
2196 }
2197 
2198 void poke_tsunami(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2199 {
2200         unsigned long mreg = srmmu_get_mmureg();
2201 
2202         tsunami_flush_icache();
2203         tsunami_flush_dcache();
2204         mreg &= ~TSUNAMI_ITD;
2205         mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2206         srmmu_set_mmureg(mreg);
2207 }
2208 
2209 void init_tsunami(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2210 {
2211         /* Tsunami's pretty sane, Sun and TI actually got it
2212          * somewhat right this time.  Fujitsu should have
2213          * taken some lessons from them.
2214          */
2215 
2216         srmmu_name = "TI Tsunami";
2217         srmmu_modtype = Tsunami;
2218         can_cache_ptables = 1;
2219 
2220         flush_cache_all = tsunami_flush_cache_all;
2221         flush_cache_mm = tsunami_flush_cache_mm;
2222         flush_cache_page = tsunami_flush_cache_page;
2223         flush_cache_range = tsunami_flush_cache_range;
2224 
2225         flush_tlb_all = tsunami_flush_tlb_all;
2226         flush_tlb_mm = tsunami_flush_tlb_mm;
2227         flush_tlb_page = tsunami_flush_tlb_page;
2228         flush_tlb_range = tsunami_flush_tlb_range;
2229 
2230         flush_page_to_ram = tsunami_flush_page_to_ram;
2231         flush_page_for_dma = tsunami_flush_page_for_dma;
2232         flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
2233         flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
2234 
2235         poke_srmmu = poke_tsunami;
2236 }
2237 
2238 void poke_viking(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2239 {
2240         unsigned long mreg = srmmu_get_mmureg();
2241         static int smp_catch = 0;
2242 
2243         if(viking_mxcc_present) {
2244                 unsigned long mxcc_control;
2245 
2246                 __asm__ __volatile__("set -1, %%g2\n\t"
2247                                      "set -1, %%g3\n\t"
2248                                      "stda %%g2, [%1] %2\n\t"
2249                                      "lda [%3] %2, %0\n\t" :
2250                                      "=r" (mxcc_control) :
2251                                      "r" (MXCC_EREG), "i" (ASI_M_MXCC),
2252                                      "r" (MXCC_CREG) : "g2", "g3");
2253                 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2254                 mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
2255                 mreg &= ~(VIKING_PCENABLE);
2256                 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
2257                                      "r" (mxcc_control), "r" (MXCC_CREG),
2258                                      "i" (ASI_M_MXCC));
2259                 srmmu_set_mmureg(mreg);
2260                 mreg |= VIKING_TCENABLE;
2261         } else {
2262                 unsigned long bpreg;
2263 
2264                 mreg &= ~(VIKING_TCENABLE);
2265                 if(smp_catch++) {
2266                         /* Must disable mixed-cmd mode here for
2267                          * other cpu's.
2268                          */
2269                         bpreg = viking_get_bpreg();
2270                         bpreg &= ~(VIKING_ACTION_MIX);
2271                         viking_set_bpreg(bpreg);
2272 
2273                         /* Just in case PROM does something funny. */
2274                         msi_set_sync();
2275                 }
2276         }
2277 
2278         viking_unlock_icache();
2279         viking_flush_icache();
2280 #if 0
2281         viking_unlock_dcache();
2282         viking_flush_dcache();
2283 #endif
2284         mreg |= VIKING_SPENABLE;
2285         mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2286         mreg |= VIKING_SBENABLE;
2287         mreg &= ~(VIKING_ACENABLE);
2288 #if CONFIG_AP1000
2289         mreg &= ~(VIKING_SBENABLE);
2290 #endif
2291 #ifdef __SMP__
2292         mreg &= ~(VIKING_SBENABLE);
2293 #endif
2294         srmmu_set_mmureg(mreg);
2295 }
2296 
2297 void init_viking(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2298 {
2299         unsigned long mreg = srmmu_get_mmureg();
2300 
2301         /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
2302 
2303         if(mreg & VIKING_MMODE) {
2304                 unsigned long bpreg;
2305 
2306                 srmmu_name = "TI Viking";
2307                 viking_mxcc_present = 0;
2308                 can_cache_ptables = 0;
2309 
2310                 bpreg = viking_get_bpreg();
2311                 bpreg &= ~(VIKING_ACTION_MIX);
2312                 viking_set_bpreg(bpreg);
2313 
2314                 msi_set_sync();
2315 
2316                 flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
2317         } else {
2318                 srmmu_name = "TI Viking/MXCC";
2319                 viking_mxcc_present = 1;
2320                 can_cache_ptables = 1;
2321                 flush_cache_page_to_uncache = viking_mxcc_flush_page;
2322         }
2323 
2324         flush_cache_all = viking_flush_cache_all;
2325         flush_cache_mm = viking_flush_cache_mm;
2326         flush_cache_page = viking_flush_cache_page;
2327         flush_cache_range = viking_flush_cache_range;
2328 
2329         flush_tlb_all = viking_flush_tlb_all;
2330         flush_tlb_mm = viking_flush_tlb_mm;
2331         flush_tlb_page = viking_flush_tlb_page;
2332         flush_tlb_range = viking_flush_tlb_range;
2333 
2334         flush_page_to_ram = viking_flush_page_to_ram;
2335         flush_page_for_dma = viking_flush_page_for_dma;
2336         flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
2337 
2338         poke_srmmu = poke_viking;
2339 }
2340 
2341 /* Probe for the srmmu chip version. */
2342 static void get_srmmu_type(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2343 {
2344         unsigned long mreg, psr;
2345         unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2346 
2347         srmmu_modtype = SRMMU_INVAL_MOD;
2348         hwbug_bitmask = 0;
2349 
2350         mreg = srmmu_get_mmureg(); psr = get_psr();
2351         mod_typ = (mreg & 0xf0000000) >> 28;
2352         mod_rev = (mreg & 0x0f000000) >> 24;
2353         psr_typ = (psr >> 28) & 0xf;
2354         psr_vers = (psr >> 24) & 0xf;
2355 
2356         /* First, check for HyperSparc or Cypress. */
2357         if(mod_typ == 1) {
2358                 switch(mod_rev) {
2359                 case 7:
2360                         /* UP or MP Hypersparc */
2361                         init_hypersparc();
2362                         break;
2363                 case 0:
2364                         /* Uniprocessor Cypress */
2365                         init_cypress_604();
2366                         break;
2367                 case 13:
2368                 case 14:
2369                 case 15:
2370                         /* MP Cypress mmu/cache-controller */
2371                         init_cypress_605(mod_rev);
2372                         break;
2373                 default:
2374                         srmmu_is_bad();
2375                         break;
2376                 };
2377                 return;
2378         }
2379 
2380         /* Next check for Fujitsu Swift. */
2381         if(psr_typ == 0 && psr_vers == 4) {
2382                 init_swift();
2383                 return;
2384         }
2385 
2386         /* Now the Viking family of srmmu. */
2387         if(psr_typ == 4 &&
2388            ((psr_vers == 0) ||
2389             ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2390                 init_viking();
2391                 return;
2392         }
2393 
2394         /* Finally the Tsunami. */
2395         if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2396                 init_tsunami();
2397                 return;
2398         }
2399 
2400         /* Oh well */
2401         srmmu_is_bad();
2402 }
2403 
2404 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2405         tsetup_mmu_patchme, rtrap_mmu_patchme;
2406 
2407 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2408         tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2409 
2410 #ifdef __SMP__
2411 extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
2412 #endif
2413 
2414 extern unsigned long srmmu_fault;
2415 
2416 #define PATCH_BRANCH(insn, dest) do { \
2417                 iaddr = &(insn); \
2418                 daddr = &(dest); \
2419                 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2420         } while(0);
2421 
2422 static void patch_window_trap_handlers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2423 {
2424         unsigned long *iaddr, *daddr;
2425         
2426         PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2427         PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2428         PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2429         PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2430 #ifdef __SMP__
2431         PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
2432 #endif
2433         PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2434         PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2435         PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2436 }
2437 
2438 #ifdef __SMP__
2439 /* Local cross-calls. */
2440 static void smp_flush_page_for_dma(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2441 {
2442         xc1((smpfunc_t) local_flush_page_for_dma, page);
2443 }
2444 
2445 static void smp_flush_cache_page_to_uncache(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2446 {
2447         xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
2448 }
2449 
2450 static void smp_flush_tlb_page_for_cbit(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
2451 {
2452         xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
2453 }
2454 #endif
2455 
2456 /* Load up routines and constants for sun4m mmu */
2457 void ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2458 {
2459         /* First the constants */
2460         pmd_shift = SRMMU_PMD_SHIFT;
2461         pmd_size = SRMMU_PMD_SIZE;
2462         pmd_mask = SRMMU_PMD_MASK;
2463         pgdir_shift = SRMMU_PGDIR_SHIFT;
2464         pgdir_size = SRMMU_PGDIR_SIZE;
2465         pgdir_mask = SRMMU_PGDIR_MASK;
2466 
2467         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
2468         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
2469         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
2470 
2471         page_none = SRMMU_PAGE_NONE;
2472         page_shared = SRMMU_PAGE_SHARED;
2473         page_copy = SRMMU_PAGE_COPY;
2474         page_readonly = SRMMU_PAGE_RDONLY;
2475         page_kernel = SRMMU_PAGE_KERNEL;
2476         pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2477             
2478         /* Functions */
2479         set_pte = srmmu_set_pte;
2480         switch_to_context = srmmu_switch_to_context;
2481         pmd_align = srmmu_pmd_align;
2482         pgdir_align = srmmu_pgdir_align;
2483         vmalloc_start = srmmu_vmalloc_start;
2484 
2485         pte_page = srmmu_pte_page;
2486         pmd_page = srmmu_pmd_page;
2487         pgd_page = srmmu_pgd_page;
2488 
2489         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
2490 
2491         pte_none = srmmu_pte_none;
2492         pte_present = srmmu_pte_present;
2493         pte_clear = srmmu_pte_clear;
2494 
2495         pmd_none = srmmu_pmd_none;
2496         pmd_bad = srmmu_pmd_bad;
2497         pmd_present = srmmu_pmd_present;
2498         pmd_clear = srmmu_pmd_clear;
2499 
2500         pgd_none = srmmu_pgd_none;
2501         pgd_bad = srmmu_pgd_bad;
2502         pgd_present = srmmu_pgd_present;
2503         pgd_clear = srmmu_pgd_clear;
2504 
2505         mk_pte = srmmu_mk_pte;
2506         pgd_set = srmmu_pgd_set;
2507         mk_pte_io = srmmu_mk_pte_io;
2508         pte_modify = srmmu_pte_modify;
2509         pgd_offset = srmmu_pgd_offset;
2510         pmd_offset = srmmu_pmd_offset;
2511         pte_offset = srmmu_pte_offset;
2512         pte_free_kernel = srmmu_pte_free_kernel;
2513         pmd_free_kernel = srmmu_pmd_free_kernel;
2514         pte_alloc_kernel = srmmu_pte_alloc_kernel;
2515         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
2516         pte_free = srmmu_pte_free;
2517         pte_alloc = srmmu_pte_alloc;
2518         pmd_free = srmmu_pmd_free;
2519         pmd_alloc = srmmu_pmd_alloc;
2520         pgd_free = srmmu_pgd_free;
2521         pgd_alloc = srmmu_pgd_alloc;
2522 
2523         pte_write = srmmu_pte_write;
2524         pte_dirty = srmmu_pte_dirty;
2525         pte_young = srmmu_pte_young;
2526         pte_wrprotect = srmmu_pte_wrprotect;
2527         pte_mkclean = srmmu_pte_mkclean;
2528         pte_mkold = srmmu_pte_mkold;
2529         pte_mkwrite = srmmu_pte_mkwrite;
2530         pte_mkdirty = srmmu_pte_mkdirty;
2531         pte_mkyoung = srmmu_pte_mkyoung;
2532         update_mmu_cache = srmmu_update_mmu_cache;
2533         mmu_exit_hook = srmmu_exit_hook;
2534         mmu_flush_hook = srmmu_flush_hook;
2535         mmu_lockarea = srmmu_lockarea;
2536         mmu_unlockarea = srmmu_unlockarea;
2537 
2538         mmu_get_scsi_one = srmmu_get_scsi_one;
2539         mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
2540         mmu_release_scsi_one = srmmu_release_scsi_one;
2541         mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
2542 
2543         mmu_info = srmmu_mmu_info;
2544         mmu_v2p = srmmu_v2p;
2545         mmu_p2v = srmmu_p2v;
2546 
2547         /* Task struct and kernel stack allocating/freeing. */
2548         alloc_kernel_stack = srmmu_alloc_kernel_stack;
2549         alloc_task_struct = srmmu_alloc_task_struct;
2550         free_kernel_stack = srmmu_free_kernel_stack;
2551         free_task_struct = srmmu_free_task_struct;
2552 
2553         quick_kernel_fault = srmmu_quick_kernel_fault;
2554 
2555         /* SRMMU specific. */
2556         ctxd_set = srmmu_ctxd_set;
2557         pmd_set = srmmu_pmd_set;
2558 
2559         get_srmmu_type();
2560         patch_window_trap_handlers();
2561 
2562 #ifdef __SMP__
2563         /* El switcheroo... */
2564 
2565         local_flush_cache_all = flush_cache_all;
2566         local_flush_cache_mm = flush_cache_mm;
2567         local_flush_cache_range = flush_cache_range;
2568         local_flush_cache_page = flush_cache_page;
2569         local_flush_tlb_all = flush_tlb_all;
2570         local_flush_tlb_mm = flush_tlb_mm;
2571         local_flush_tlb_range = flush_tlb_range;
2572         local_flush_tlb_page = flush_tlb_page;
2573         local_flush_page_to_ram = flush_page_to_ram;
2574         local_flush_page_for_dma = flush_page_for_dma;
2575         local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
2576         local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
2577 
2578         flush_cache_all = smp_flush_cache_all;
2579         flush_cache_mm = smp_flush_cache_mm;
2580         flush_cache_range = smp_flush_cache_range;
2581         flush_cache_page = smp_flush_cache_page;
2582         flush_tlb_all = smp_flush_tlb_all;
2583         flush_tlb_mm = smp_flush_tlb_mm;
2584         flush_tlb_range = smp_flush_tlb_range;
2585         flush_tlb_page = smp_flush_tlb_page;
2586         flush_page_to_ram = smp_flush_page_to_ram;
2587         flush_page_for_dma = smp_flush_page_for_dma;
2588         flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
2589         flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
2590 #endif
2591 }

/* [previous][next][first][last][top][bottom][index][help] */