This source file includes following definitions.
- srmmu_v2p
- srmmu_p2v
- srmmu_swap
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_vmalloc_start
- srmmu_pgd_page
- srmmu_pmd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_clear
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_clear
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_clear
- srmmu_pte_write
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_wrprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_mkwrite
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_mk_pte
- srmmu_mk_pte_io
- srmmu_ctxd_set
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_uncache_page
- srmmu_recache_page
- srmmu_getpage
- srmmu_putpage
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- srmmu_set_pte
- srmmu_quick_kernel_fault
- alloc_context
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_lockarea
- srmmu_unlockarea
- srmmu_alloc_task_struct
- srmmu_alloc_kernel_stack
- srmmu_free_task_struct
- srmmu_free_kernel_stack
- tsunami_flush_cache_all
- tsunami_flush_cache_mm
- tsunami_flush_cache_range
- tsunami_flush_cache_page
- tsunami_flush_cache_page_to_uncache
- tsunami_flush_page_to_ram
- tsunami_flush_page_for_dma
- tsunami_flush_tlb_all
- tsunami_flush_tlb_mm
- tsunami_flush_tlb_range
- tsunami_flush_tlb_page
- tsunami_flush_tlb_page_for_cbit
- swift_flush_cache_all
- swift_flush_cache_mm
- swift_flush_cache_range
- swift_flush_cache_page
- swift_flush_page_to_ram
- swift_flush_page_for_dma
- swift_flush_cache_page_to_uncache
- swift_flush_tlb_all
- swift_flush_tlb_mm
- swift_flush_tlb_range
- swift_flush_tlb_page
- swift_flush_tlb_page_for_cbit
- viking_flush_cache_all
- viking_flush_cache_mm
- viking_flush_cache_range
- viking_flush_cache_page
- viking_flush_page_to_ram
- viking_flush_page_for_dma
- viking_mxcc_flush_page
- viking_no_mxcc_flush_page
- viking_flush_tlb_all
- viking_flush_tlb_mm
- viking_flush_tlb_range
- viking_flush_tlb_page
- viking_flush_tlb_page_for_cbit
- cypress_flush_tlb_all
- cypress_flush_tlb_mm
- cypress_flush_tlb_range
- cypress_flush_tlb_page
- hypersparc_flush_cache_all
- hypersparc_flush_cache_mm
- hypersparc_flush_cache_range
- hypersparc_flush_cache_page
- hypersparc_flush_page_to_ram
- hypersparc_flush_page_for_dma
- hypersparc_flush_cache_page_to_uncache
- hypersparc_flush_tlb_all
- hypersparc_flush_tlb_mm
- hypersparc_flush_tlb_range
- hypersparc_flush_tlb_page
- hypersparc_flush_tlb_page_for_cbit
- hypersparc_ctxd_set
- hypersparc_update_rootmmu_dir
- hypersparc_set_pte
- hypersparc_switch_to_context
- srmmu_map_dvma_pages_for_iommu
- srmmu_uncache_iommu_page_table
- iommu_init
- srmmu_get_scsi_one
- srmmu_get_scsi_sgl
- srmmu_release_scsi_one
- srmmu_release_scsi_sgl
- srmmu_early_paddr
- srmmu_early_pgd_set
- srmmu_early_pmd_set
- srmmu_early_pgd_page
- srmmu_early_pmd_page
- srmmu_early_pmd_offset
- srmmu_early_pte_offset
- srmmu_init_alloc
- srmmu_allocate_ptable_skeleton
- srmmu_inherit_prom_mappings
- srmmu_map_dvma_pages_for_cpu
- srmmu_map_kernel
- srmmu_paging_init
- srmmu_mmu_info
- srmmu_update_mmu_cache
- srmmu_exit_hook
- srmmu_flush_hook
- hypersparc_exit_hook
- hypersparc_flush_hook
- srmmu_is_bad
- poke_hypersparc
- init_hypersparc
- poke_cypress
- init_cypress_common
- init_cypress_604
- init_cypress_605
- poke_swift
- init_swift
- poke_tsunami
- init_tsunami
- poke_viking
- init_viking
- get_srmmu_type
- patch_window_trap_handlers
- smp_flush_page_for_dma
- smp_flush_cache_page_to_uncache
- smp_flush_tlb_page_for_cbit
- ld_mmu_srmmu
1
2
3
4
5
6
7
8
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/io.h>
16 #include <asm/kdebug.h>
17 #include <asm/vaddrs.h>
18 #include <asm/traps.h>
19 #include <asm/smp.h>
20 #include <asm/mbus.h>
21 #include <asm/cache.h>
22 #include <asm/oplib.h>
23 #include <asm/sbus.h>
24 #include <asm/iommu.h>
25 #include <asm/asi.h>
26 #include <asm/msi.h>
27
28
29 #include <asm/viking.h>
30 #include <asm/mxcc.h>
31 #include <asm/ross.h>
32 #include <asm/tsunami.h>
33 #include <asm/swift.h>
34
35 enum mbus_module srmmu_modtype;
36 unsigned int hwbug_bitmask;
37 int hyper_cache_size;
38 int hyper_line_size;
39
40 #ifdef __SMP__
41 extern void smp_capture(void);
42 extern void smp_release(void);
43 #else
44 #define smp_capture()
45 #define smp_release()
46 #endif
47
48 static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
49 static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
50
51 static void (*flush_page_for_dma)(unsigned long page);
52 static void (*flush_cache_page_to_uncache)(unsigned long page);
53 static void (*flush_tlb_page_for_cbit)(unsigned long page);
54 #ifdef __SMP__
55 static void (*local_flush_page_for_dma)(unsigned long page);
56 static void (*local_flush_cache_page_to_uncache)(unsigned long page);
57 static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
58 #endif
59
60 static struct srmmu_stats {
61 int invall;
62 int invpg;
63 int invrnge;
64 int invmm;
65 } module_stats;
66
67 static char *srmmu_name;
68
69 ctxd_t *srmmu_ctx_table_phys;
70 ctxd_t *srmmu_context_table;
71
72 static struct srmmu_trans {
73 unsigned long vbase;
74 unsigned long pbase;
75 int size;
76 } srmmu_map[SPARC_PHYS_BANKS];
77
78 static int can_cache_ptables = 0;
79 static int viking_mxcc_present = 0;
80
81
82
83
84
85 static inline unsigned long srmmu_v2p(unsigned long vaddr)
86 {
87 int i;
88
89 for(i=0; srmmu_map[i].size != 0; i++) {
90 if(srmmu_map[i].vbase <= vaddr &&
91 (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
92 return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
93 }
94 return 0xffffffffUL;
95 }
96
97 static inline unsigned long srmmu_p2v(unsigned long paddr)
98 {
99 int i;
100
101 for(i=0; srmmu_map[i].size != 0; i++) {
102 if(srmmu_map[i].pbase <= paddr &&
103 (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
104 return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
105 }
106 return 0xffffffffUL;
107 }
108
109
110
111
112
113 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
114 {
115 #if CONFIG_AP1000
116
117 if (!(value&0xf0000000))
118 value |= 0x80000000;
119 if (value == 0x80000000) value = 0;
120 #endif
121 __asm__ __volatile__("swap [%2], %0\n\t" :
122 "=&r" (value) :
123 "0" (value), "r" (addr));
124 return value;
125 }
126
127
128 #define srmmu_set_entry(ptr, newentry) \
129 srmmu_swap((unsigned long *) (ptr), (newentry))
130
131
132
133 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
134 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
135
136 static unsigned long srmmu_vmalloc_start(void)
137 {
138 return SRMMU_VMALLOC_START;
139 }
140
141 static unsigned long srmmu_pgd_page(pgd_t pgd)
142 { return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
143
144 static unsigned long srmmu_pmd_page(pmd_t pmd)
145 { return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
146
147 static unsigned long srmmu_pte_page(pte_t pte)
148 { return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
149
150 static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
151 static int srmmu_pte_present(pte_t pte)
152 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
153
154 static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
155
156 static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
157 static int srmmu_pmd_bad(pmd_t pmd)
158 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
159
160 static int srmmu_pmd_present(pmd_t pmd)
161 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
162
163 static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
164
165 static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
166 static int srmmu_pgd_bad(pgd_t pgd)
167 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
168
169 static int srmmu_pgd_present(pgd_t pgd)
170 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
171
172 static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
173
174 static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
175 static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
176 static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
177
178 static pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
179 static pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
180 static pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~SRMMU_REF; return pte; }
181 static pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) |= SRMMU_WRITE; return pte; }
182 static pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= SRMMU_DIRTY; return pte; }
183 static pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= SRMMU_REF; return pte; }
184
185
186
187
188
189 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
190 { pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
191
192 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
193 {
194 pte_t pte;
195 pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
196 return pte;
197 }
198
199 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
200 {
201 srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
202 }
203
204 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
205 {
206 srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
207 }
208
209 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
210 {
211 srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
212 }
213
214 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
215 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
216
217
218 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
219 {
220 return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
221 }
222
223
224 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
225 {
226 return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
227 }
228
229
230 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
231 {
232 return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
233 }
234
235
236 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
237 {
238 if(tsk->mm->context != NO_CONTEXT)
239 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
240 }
241
242 static inline void srmmu_uncache_page(unsigned long addr)
243 {
244 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
245 pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
246 pte_t *ptep = srmmu_pte_offset(pmdp, addr);
247
248 flush_cache_page_to_uncache(addr);
249 set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
250 flush_tlb_page_for_cbit(addr);
251 }
252
253 static inline void srmmu_recache_page(unsigned long addr)
254 {
255 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
256 pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
257 pte_t *ptep = srmmu_pte_offset(pmdp, addr);
258
259 set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
260 flush_tlb_page_for_cbit(addr);
261 }
262
263 static inline unsigned long srmmu_getpage(void)
264 {
265 unsigned long page = get_free_page(GFP_KERNEL);
266
267 if (can_cache_ptables)
268 return page;
269
270 if(page)
271 srmmu_uncache_page(page);
272 return page;
273 }
274
275 static inline void srmmu_putpage(unsigned long page)
276 {
277 if (!can_cache_ptables)
278 srmmu_recache_page(page);
279 free_page(page);
280 }
281
282
283 #define NEW_PGD() (pgd_t *) srmmu_getpage()
284 #define NEW_PMD() (pmd_t *) srmmu_getpage()
285 #define NEW_PTE() (pte_t *) srmmu_getpage()
286 #define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
287 #define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
288 #define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
289
290
291
292
293
294
295 static void srmmu_pte_free_kernel(pte_t *pte)
296 {
297 FREE_PTE(pte);
298 }
299
300 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
301 {
302 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
303 if(srmmu_pmd_none(*pmd)) {
304 pte_t *page = NEW_PTE();
305 if(srmmu_pmd_none(*pmd)) {
306 if(page) {
307 pmd_set(pmd, page);
308 return page + address;
309 }
310 pmd_set(pmd, BAD_PAGETABLE);
311 return NULL;
312 }
313 FREE_PTE(page);
314 }
315 if(srmmu_pmd_bad(*pmd)) {
316 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
317 pmd_set(pmd, BAD_PAGETABLE);
318 return NULL;
319 }
320 return (pte_t *) srmmu_pmd_page(*pmd) + address;
321 }
322
323 static void srmmu_pmd_free_kernel(pmd_t *pmd)
324 {
325 FREE_PMD(pmd);
326 }
327
328 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
329 {
330 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
331 if(srmmu_pgd_none(*pgd)) {
332 pmd_t *page = NEW_PMD();
333 if(srmmu_pgd_none(*pgd)) {
334 if(page) {
335 pgd_set(pgd, page);
336 return page + address;
337 }
338 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
339 return NULL;
340 }
341 FREE_PMD(page);
342 }
343 if(srmmu_pgd_bad(*pgd)) {
344 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
345 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
346 return NULL;
347 }
348 return (pmd_t *) pgd_page(*pgd) + address;
349 }
350
351 static void srmmu_pte_free(pte_t *pte)
352 {
353 FREE_PTE(pte);
354 }
355
356 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
357 {
358 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
359 if(srmmu_pmd_none(*pmd)) {
360 pte_t *page = NEW_PTE();
361 if(srmmu_pmd_none(*pmd)) {
362 if(page) {
363 pmd_set(pmd, page);
364 return page + address;
365 }
366 pmd_set(pmd, BAD_PAGETABLE);
367 return NULL;
368 }
369 FREE_PTE(page);
370 }
371 if(srmmu_pmd_bad(*pmd)) {
372 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
373 pmd_set(pmd, BAD_PAGETABLE);
374 return NULL;
375 }
376 return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
377 }
378
379
380 static void srmmu_pmd_free(pmd_t * pmd)
381 {
382 FREE_PMD(pmd);
383 }
384
385 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
386 {
387 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
388 if(srmmu_pgd_none(*pgd)) {
389 pmd_t *page = NEW_PMD();
390 if(srmmu_pgd_none(*pgd)) {
391 if(page) {
392 pgd_set(pgd, page);
393 return page + address;
394 }
395 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
396 return NULL;
397 }
398 FREE_PMD(page);
399 }
400 if(srmmu_pgd_bad(*pgd)) {
401 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
402 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
403 return NULL;
404 }
405 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
406 }
407
408 static void srmmu_pgd_free(pgd_t *pgd)
409 {
410 FREE_PGD(pgd);
411 }
412
413 static pgd_t *srmmu_pgd_alloc(void)
414 {
415 return NEW_PGD();
416 }
417
418 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
419 {
420 srmmu_set_entry(ptep, pte_val(pteval));
421 }
422
423 static void srmmu_quick_kernel_fault(unsigned long address)
424 {
425 printk("Penguin faults at address %08lx\n", address);
426 panic("Srmmu bolixed...");
427 }
428
429 static inline void alloc_context(struct mm_struct *mm)
430 {
431 struct ctx_list *ctxp;
432
433 ctxp = ctx_free.next;
434 if(ctxp != &ctx_free) {
435 remove_from_ctx_list(ctxp);
436 add_to_used_ctxlist(ctxp);
437 mm->context = ctxp->ctx_number;
438 ctxp->ctx_mm = mm;
439 return;
440 }
441 ctxp = ctx_used.next;
442 if(ctxp->ctx_mm == current->mm)
443 ctxp = ctxp->next;
444 if(ctxp == &ctx_used)
445 panic("out of mmu contexts");
446 flush_cache_mm(ctxp->ctx_mm);
447 flush_tlb_mm(ctxp->ctx_mm);
448 remove_from_ctx_list(ctxp);
449 add_to_used_ctxlist(ctxp);
450 ctxp->ctx_mm->context = NO_CONTEXT;
451 ctxp->ctx_mm = mm;
452 mm->context = ctxp->ctx_number;
453 }
454
455 static void srmmu_switch_to_context(struct task_struct *tsk)
456 {
457
458
459
460
461
462 if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
463 (tsk->flags & PF_EXITING))
464 return;
465 if(tsk->mm->context == NO_CONTEXT) {
466 alloc_context(tsk->mm);
467 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
468 }
469 srmmu_set_context(tsk->mm->context);
470 }
471
472
473 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
474 {
475 pgd_t *pgdp;
476 pmd_t *pmdp;
477 pte_t *ptep;
478 unsigned long tmp;
479
480 physaddr &= PAGE_MASK;
481 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
482 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
483 ptep = srmmu_pte_offset(pmdp, virt_addr);
484 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
485
486
487
488
489
490 tmp |= (bus_type << 28);
491 if(rdonly)
492 tmp |= SRMMU_PRIV_RDONLY;
493 else
494 tmp |= SRMMU_PRIV;
495 flush_page_to_ram(virt_addr);
496 srmmu_set_entry(ptep, tmp);
497 flush_tlb_all();
498 }
499
500 static char *srmmu_lockarea(char *vaddr, unsigned long len)
501 {
502 return vaddr;
503 }
504
505 static void srmmu_unlockarea(char *vaddr, unsigned long len)
506 {
507 }
508
509
510
511
512
513
514
515
516
517 struct task_struct *srmmu_alloc_task_struct(void)
518 {
519 unsigned long page;
520
521 page = get_free_page(GFP_KERNEL);
522 if(!page)
523 return (struct task_struct *) 0;
524 return (struct task_struct *) page;
525 }
526
527 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
528 {
529 unsigned long pages;
530
531 pages = __get_free_pages(GFP_KERNEL, 2, 0);
532 if(!pages)
533 return 0;
534 memset((void *) pages, 0, (PAGE_SIZE << 2));
535 return pages;
536 }
537
538 static void srmmu_free_task_struct(struct task_struct *tsk)
539 {
540 free_page((unsigned long) tsk);
541 }
542
543 static void srmmu_free_kernel_stack(unsigned long stack)
544 {
545 free_pages(stack, 2);
546 }
547
548
549
550
551
552 static void tsunami_flush_cache_all(void)
553 {
554 flush_user_windows();
555 tsunami_flush_icache();
556 tsunami_flush_dcache();
557 }
558
559 static void tsunami_flush_cache_mm(struct mm_struct *mm)
560 {
561 #ifndef __SMP__
562 if(mm->context != NO_CONTEXT) {
563 #endif
564 flush_user_windows();
565 tsunami_flush_icache();
566 tsunami_flush_dcache();
567 #ifndef __SMP__
568 }
569 #endif
570 }
571
572 static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
573 {
574 #ifndef __SMP__
575 if(mm->context != NO_CONTEXT) {
576 #endif
577 flush_user_windows();
578 tsunami_flush_icache();
579 tsunami_flush_dcache();
580 #ifndef __SMP__
581 }
582 #endif
583 }
584
585 static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
586 {
587 #ifndef __SMP__
588 struct mm_struct *mm = vma->vm_mm;
589 if(mm->context != NO_CONTEXT) {
590 #endif
591 flush_user_windows();
592 tsunami_flush_icache();
593 tsunami_flush_dcache();
594 #ifndef __SMP__
595 }
596 #endif
597 }
598
599 static void tsunami_flush_cache_page_to_uncache(unsigned long page)
600 {
601 tsunami_flush_dcache();
602 }
603
604
605 static void tsunami_flush_page_to_ram(unsigned long page)
606 {
607 }
608
609
610 static void tsunami_flush_page_for_dma(unsigned long page)
611 {
612 tsunami_flush_dcache();
613 }
614
615
616
617
618
619
620
621
622
623 #define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
624 nop(); nop(); nop(); nop(); nop(); } while(0)
625
626 static void tsunami_flush_tlb_all(void)
627 {
628 module_stats.invall++;
629 srmmu_flush_whole_tlb();
630 TSUNAMI_SUCKS;
631 }
632
633 static void tsunami_flush_tlb_mm(struct mm_struct *mm)
634 {
635 module_stats.invmm++;
636 #ifndef __SMP__
637 if(mm->context != NO_CONTEXT) {
638 #endif
639 srmmu_flush_whole_tlb();
640 TSUNAMI_SUCKS;
641 #ifndef __SMP__
642 }
643 #endif
644 }
645
646 static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
647 {
648 module_stats.invrnge++;
649 #ifndef __SMP__
650 if(mm->context != NO_CONTEXT) {
651 #endif
652 srmmu_flush_whole_tlb();
653 TSUNAMI_SUCKS;
654 #ifndef __SMP__
655 }
656 #endif
657 }
658
659 static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
660 {
661 int octx;
662 struct mm_struct *mm = vma->vm_mm;
663
664 #ifndef __SMP__
665 if(mm->context != NO_CONTEXT) {
666 #endif
667 octx = srmmu_get_context();
668
669 srmmu_set_context(mm->context);
670 srmmu_flush_tlb_page(page);
671 TSUNAMI_SUCKS;
672 srmmu_set_context(octx);
673 #ifndef __SMP__
674 }
675 #endif
676 module_stats.invpg++;
677 }
678
679 static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
680 {
681 srmmu_flush_tlb_page(page);
682 }
683
684
685
686
687
688
689 static void swift_flush_cache_all(void)
690 {
691 flush_user_windows();
692 swift_idflash_clear();
693 }
694
695 static void swift_flush_cache_mm(struct mm_struct *mm)
696 {
697 #ifndef __SMP__
698 if(mm->context != NO_CONTEXT) {
699 #endif
700 flush_user_windows();
701 swift_idflash_clear();
702 #ifndef __SMP__
703 }
704 #endif
705 }
706
707 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
708 {
709 #ifndef __SMP__
710 if(mm->context != NO_CONTEXT) {
711 #endif
712 flush_user_windows();
713 swift_idflash_clear();
714 #ifndef __SMP__
715 }
716 #endif
717 }
718
719 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
720 {
721 #ifndef __SMP__
722 struct mm_struct *mm = vma->vm_mm;
723 if(mm->context != NO_CONTEXT) {
724 #endif
725 flush_user_windows();
726 if(vma->vm_flags & VM_EXEC)
727 swift_flush_icache();
728 swift_flush_dcache();
729 #ifndef __SMP__
730 }
731 #endif
732 }
733
734
735 static void swift_flush_page_to_ram(unsigned long page)
736 {
737 }
738
739
740 static void swift_flush_page_for_dma(unsigned long page)
741 {
742 swift_flush_dcache();
743 }
744
745 static void swift_flush_cache_page_to_uncache(unsigned long page)
746 {
747 swift_flush_dcache();
748 }
749
750 static void swift_flush_tlb_all(void)
751 {
752 module_stats.invall++;
753 srmmu_flush_whole_tlb();
754 }
755
756 static void swift_flush_tlb_mm(struct mm_struct *mm)
757 {
758 module_stats.invmm++;
759 #ifndef __SMP__
760 if(mm->context != NO_CONTEXT)
761 #endif
762 srmmu_flush_whole_tlb();
763 }
764
765 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
766 {
767 module_stats.invrnge++;
768 #ifndef __SMP__
769 if(mm->context != NO_CONTEXT)
770 #endif
771 srmmu_flush_whole_tlb();
772 }
773
774 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
775 {
776 #ifndef __SMP__
777 struct mm_struct *mm = vma->vm_mm;
778 if(mm->context != NO_CONTEXT)
779 #endif
780 srmmu_flush_whole_tlb();
781 module_stats.invpg++;
782 }
783
784 static void swift_flush_tlb_page_for_cbit(unsigned long page)
785 {
786 srmmu_flush_whole_tlb();
787 }
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806 static void viking_flush_cache_all(void)
807 {
808 viking_flush_icache();
809 }
810
811 static void viking_flush_cache_mm(struct mm_struct *mm)
812 {
813 #ifndef __SMP__
814 if(mm->context != NO_CONTEXT) {
815 #endif
816 flush_user_windows();
817 viking_flush_icache();
818 #ifndef __SMP__
819 }
820 #endif
821 }
822
823 static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
824 {
825 #ifndef __SMP__
826 if(mm->context != NO_CONTEXT) {
827 #endif
828 flush_user_windows();
829 viking_flush_icache();
830 #ifndef __SMP__
831 }
832 #endif
833 }
834
835 static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
836 {
837 #ifndef __SMP__
838 struct mm_struct *mm = vma->vm_mm;
839 if(mm->context != NO_CONTEXT) {
840 #endif
841 flush_user_windows();
842 if(vma->vm_flags & VM_EXEC)
843 viking_flush_icache();
844 #ifndef __SMP__
845 }
846 #endif
847 }
848
849
850 static void viking_flush_page_to_ram(unsigned long page)
851 {
852 }
853
854
855 static void viking_flush_page_for_dma(unsigned long page)
856 {
857 }
858
859 static void viking_mxcc_flush_page(unsigned long page)
860 {
861 unsigned long ppage = srmmu_hwprobe(page);
862 unsigned long paddr0, paddr1;
863
864 if (!ppage)
865 return;
866
867 paddr0 = (ppage >> 28) | 0x10;
868 paddr1 = (ppage << 4) & PAGE_MASK;
869
870
871
872
873
874
875 __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
876 "or %%g0, %1, %%g3\n"
877 "1:\n\t"
878 "stda %%g2, [%2] %5\n\t"
879 "stda %%g2, [%3] %5\n\t"
880 "add %%g3, %4, %%g3\n\t"
881 "btst 0xfff, %%g3\n\t"
882 "bne 1b\n\t"
883 "nop\n\t" : :
884 "r" (paddr0), "r" (paddr1),
885 "r" (MXCC_SRCSTREAM),
886 "r" (MXCC_DESSTREAM),
887 "r" (MXCC_STREAM_SIZE),
888 "i" (ASI_M_MXCC) : "g2", "g3");
889
890
891
892
893
894
895
896
897
898 }
899
900 static void viking_no_mxcc_flush_page(unsigned long page)
901 {
902 unsigned long ppage = srmmu_hwprobe(page) >> 8;
903 int set, block;
904 unsigned long ptag[2];
905 unsigned long vaddr;
906 int i;
907
908 if (!ppage)
909 return;
910
911 for (set = 0; set < 128; set++) {
912 for (block = 0; block < 4; block++) {
913
914 viking_get_dcache_ptag(set, block, ptag);
915
916 if (ptag[1] != ppage)
917 continue;
918 if (!(ptag[0] & VIKING_PTAG_VALID))
919 continue;
920 if (!(ptag[0] & VIKING_PTAG_DIRTY))
921 continue;
922
923
924
925
926
927
928
929 vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
930 for (i = 0; i < 8; i++) {
931 __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
932 "r" (vaddr) : "g2");
933 vaddr += PAGE_SIZE;
934 }
935
936
937 break;
938 }
939 }
940 }
941
942 static void viking_flush_tlb_all(void)
943 {
944 module_stats.invall++;
945 srmmu_flush_whole_tlb();
946 }
947
948 static void viking_flush_tlb_mm(struct mm_struct *mm)
949 {
950 int octx;
951 module_stats.invmm++;
952
953 #ifndef __SMP__
954 if(mm->context != NO_CONTEXT) {
955 #endif
956 octx = srmmu_get_context();
957 srmmu_set_context(mm->context);
958 srmmu_flush_tlb_ctx();
959 srmmu_set_context(octx);
960 #ifndef __SMP__
961 }
962 #endif
963 }
964
965 static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
966 {
967 int octx;
968 module_stats.invrnge++;
969
970 #ifndef __SMP__
971 if(mm->context != NO_CONTEXT) {
972 #endif
973 octx = srmmu_get_context();
974 srmmu_set_context(mm->context);
975 start &= SRMMU_PMD_MASK;
976 while(start < end) {
977 srmmu_flush_tlb_segment(start);
978 start += SRMMU_PMD_SIZE;
979 }
980 srmmu_set_context(octx);
981 #ifndef __SMP__
982 }
983 #endif
984 }
985
986 static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
987 {
988 int octx;
989 struct mm_struct *mm = vma->vm_mm;
990
991 module_stats.invpg++;
992 #ifndef __SMP__
993 if(mm->context != NO_CONTEXT) {
994 #endif
995 octx = srmmu_get_context();
996 srmmu_set_context(mm->context);
997 srmmu_flush_tlb_page(page);
998 srmmu_set_context(octx);
999 #ifndef __SMP__
1000 }
1001 #endif
1002 }
1003
1004 static void viking_flush_tlb_page_for_cbit(unsigned long page)
1005 {
1006 srmmu_flush_tlb_page(page);
1007 }
1008
1009
1010
1011 static void cypress_flush_tlb_all(void)
1012 {
1013 module_stats.invall++;
1014 srmmu_flush_whole_tlb();
1015 }
1016
1017 static void cypress_flush_tlb_mm(struct mm_struct *mm)
1018 {
1019 int octx;
1020
1021 module_stats.invmm++;
1022 #ifndef __SMP__
1023 if(mm->context != NO_CONTEXT) {
1024 #endif
1025 octx = srmmu_get_context();
1026 srmmu_set_context(mm->context);
1027 srmmu_flush_tlb_ctx();
1028 srmmu_set_context(octx);
1029 #ifndef __SMP__
1030 }
1031 #endif
1032 }
1033
1034 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1035 {
1036 int octx;
1037
1038 module_stats.invrnge++;
1039 #ifndef __SMP__
1040 if(mm->context != NO_CONTEXT) {
1041 #endif
1042 octx = srmmu_get_context();
1043 srmmu_set_context(mm->context);
1044 start &= SRMMU_PMD_MASK;
1045 while(start < end) {
1046 srmmu_flush_tlb_segment(start);
1047 start += SRMMU_PMD_SIZE;
1048 }
1049 srmmu_set_context(octx);
1050 #ifndef __SMP__
1051 }
1052 #endif
1053 }
1054
1055 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1056 {
1057 int octx;
1058 struct mm_struct *mm = vma->vm_mm;
1059
1060 module_stats.invpg++;
1061 #ifndef __SMP__
1062 if(mm->context != NO_CONTEXT) {
1063 #endif
1064 octx = srmmu_get_context();
1065 srmmu_set_context(mm->context);
1066 srmmu_flush_tlb_page(page);
1067 srmmu_set_context(octx);
1068 #ifndef __SMP__
1069 }
1070 #endif
1071 }
1072
1073
1074 static void hypersparc_flush_cache_all(void)
1075 {
1076 flush_user_windows();
1077 hyper_flush_unconditional_combined();
1078 hyper_flush_whole_icache();
1079 }
1080
1081 static void hypersparc_flush_cache_mm(struct mm_struct *mm)
1082 {
1083 #ifndef __SMP__
1084 if(mm->context != NO_CONTEXT) {
1085 #endif
1086 flush_user_windows();
1087 hyper_flush_unconditional_combined();
1088 hyper_flush_whole_icache();
1089 #ifndef __SMP__
1090 }
1091 #endif
1092 }
1093
1094 static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1095 {
1096 #ifndef __SMP__
1097 if(mm->context != NO_CONTEXT) {
1098 #endif
1099 flush_user_windows();
1100 hyper_flush_unconditional_combined();
1101 hyper_flush_whole_icache();
1102 #ifndef __SMP__
1103 }
1104 #endif
1105 }
1106
1107
1108
1109
1110 static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1111 {
1112 struct mm_struct *mm = vma->vm_mm;
1113 volatile unsigned long clear;
1114 int octx;
1115
1116 #ifndef __SMP__
1117 if(mm->context != NO_CONTEXT) {
1118 #endif
1119 octx = srmmu_get_context();
1120 flush_user_windows();
1121 srmmu_set_context(mm->context);
1122 hyper_flush_whole_icache();
1123 if(!srmmu_hwprobe(page))
1124 goto no_mapping;
1125 hyper_flush_cache_page(page);
1126 no_mapping:
1127 clear = srmmu_get_fstatus();
1128 srmmu_set_context(octx);
1129 #ifndef __SMP__
1130 }
1131 #endif
1132 }
1133
1134
1135 static void hypersparc_flush_page_to_ram(unsigned long page)
1136 {
1137 volatile unsigned long clear;
1138
1139 if(srmmu_hwprobe(page))
1140 hyper_flush_cache_page(page);
1141 clear = srmmu_get_fstatus();
1142 }
1143
1144
1145 static void hypersparc_flush_page_for_dma(unsigned long page)
1146 {
1147 volatile unsigned long clear;
1148
1149 if(srmmu_hwprobe(page))
1150 hyper_flush_cache_page(page);
1151 clear = srmmu_get_fstatus();
1152 }
1153
1154 static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
1155 {
1156 volatile unsigned long clear;
1157
1158 if(srmmu_hwprobe(page))
1159 hyper_flush_cache_page(page);
1160 clear = srmmu_get_fstatus();
1161 }
1162
1163 static void hypersparc_flush_tlb_all(void)
1164 {
1165 module_stats.invall++;
1166 srmmu_flush_whole_tlb();
1167 }
1168
1169 static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
1170 {
1171 int octx;
1172
1173 module_stats.invmm++;
1174 #ifndef __SMP__
1175 if(mm->context != NO_CONTEXT) {
1176 #endif
1177
1178 octx = srmmu_get_context();
1179 srmmu_set_context(mm->context);
1180 srmmu_flush_tlb_ctx();
1181 srmmu_set_context(octx);
1182
1183 #ifndef __SMP__
1184 }
1185 #endif
1186 }
1187
1188 static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1189 {
1190 int octx;
1191
1192 module_stats.invrnge++;
1193 #ifndef __SMP__
1194 if(mm->context != NO_CONTEXT) {
1195 #endif
1196
1197 octx = srmmu_get_context();
1198 srmmu_set_context(mm->context);
1199 start &= SRMMU_PMD_MASK;
1200 while(start < end) {
1201 srmmu_flush_tlb_segment(start);
1202 start += SRMMU_PMD_SIZE;
1203 }
1204 srmmu_set_context(octx);
1205
1206 #ifndef __SMP__
1207 }
1208 #endif
1209 }
1210
1211 static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1212 {
1213 struct mm_struct *mm = vma->vm_mm;
1214 int octx;
1215
1216 module_stats.invpg++;
1217 #ifndef __SMP__
1218 if(mm->context != NO_CONTEXT) {
1219 #endif
1220
1221 octx = srmmu_get_context();
1222 srmmu_set_context(mm->context);
1223 srmmu_flush_tlb_page(page);
1224 srmmu_set_context(octx);
1225
1226 #ifndef __SMP__
1227 }
1228 #endif
1229 }
1230
1231 static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
1232 {
1233 srmmu_flush_tlb_page(page);
1234 }
1235
1236 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
1237 {
1238 hyper_flush_whole_icache();
1239 srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
1240 }
1241
1242 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1243 {
1244 if(tsk->mm->context != NO_CONTEXT) {
1245 hyper_flush_whole_icache();
1246 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1247 }
1248 }
1249
1250 static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
1251 {
1252
1253 __asm__ __volatile__("rd %%psr, %%g1\n\t"
1254 "wr %%g1, %4, %%psr\n\t"
1255 "nop; nop; nop;\n\t"
1256 "swap [%0], %1\n\t"
1257 "wr %%g1, 0x0, %%psr\n\t"
1258 "nop; nop; nop;\n\t" :
1259 "=r" (ptep), "=r" (pteval) :
1260 "0" (ptep), "1" (pteval), "i" (PSR_ET) :
1261 "g1");
1262 }
1263
1264 static void hypersparc_switch_to_context(struct task_struct *tsk)
1265 {
1266
1267
1268
1269
1270
1271 hyper_flush_whole_icache();
1272 if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
1273 (tsk->flags & PF_EXITING))
1274 return;
1275 if(tsk->mm->context == NO_CONTEXT) {
1276 alloc_context(tsk->mm);
1277 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
1278 }
1279 srmmu_set_context(tsk->mm->context);
1280 }
1281
1282
1283
1284 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1285 static unsigned long first_dvma_page, last_dvma_page;
1286
1287 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
1288 #define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
1289
1290 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
1291 {
1292 unsigned long first = first_dvma_page;
1293 unsigned long last = last_dvma_page;
1294 iopte_t *iopte;
1295
1296 iopte = iommu->page_table;
1297 iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
1298 while(first <= last) {
1299 iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
1300 first += PAGE_SIZE;
1301 }
1302 }
1303
1304 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
1305 {
1306 pgd_t *pgdp;
1307 pmd_t *pmdp;
1308 pte_t *ptep;
1309 unsigned long end = start + size;
1310
1311 while(start < end) {
1312 pgdp = srmmu_pgd_offset(init_task.mm, start);
1313 pmdp = srmmu_pmd_offset(pgdp, start);
1314 ptep = srmmu_pte_offset(pmdp, start);
1315 pte_val(*ptep) &= ~SRMMU_CACHE;
1316 start += PAGE_SIZE;
1317 }
1318 }
1319
1320 unsigned long iommu_init(int iommund, unsigned long memory_start,
1321 unsigned long memory_end, struct linux_sbus *sbus)
1322 {
1323 int impl, vers, ptsize;
1324 unsigned long tmp;
1325 struct iommu_struct *iommu;
1326 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
1327
1328 memory_start = LONG_ALIGN(memory_start);
1329 iommu = (struct iommu_struct *) memory_start;
1330 memory_start += sizeof(struct iommu_struct);
1331 prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
1332 iommu->regs = (struct iommu_regs *)
1333 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
1334 "IOMMU registers", iommu_promregs[0].which_io, 0x0);
1335 if(!iommu->regs)
1336 panic("Cannot map IOMMU registers.");
1337 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
1338 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
1339 tmp = iommu->regs->control;
1340 tmp &= ~(IOMMU_CTRL_RNGE);
1341 tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
1342 iommu->regs->control = tmp;
1343 iommu_invalidate(iommu->regs);
1344 iommu->plow = iommu->start = 0xfc000000;
1345 iommu->end = 0xffffffff;
1346
1347
1348 ptsize = iommu->end - iommu->start + 1;
1349 ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
1350
1351
1352 memory_start = PAGE_ALIGN(memory_start);
1353 memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
1354 iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
1355 memory_start += ptsize;
1356
1357
1358 flush_cache_all();
1359 srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
1360 flush_tlb_all();
1361 memset(iommu->page_table, 0, ptsize);
1362 srmmu_map_dvma_pages_for_iommu(iommu);
1363 iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
1364 iommu_invalidate(iommu->regs);
1365
1366 sbus->iommu = iommu;
1367 printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
1368 impl, vers, iommu->page_table, ptsize);
1369 return memory_start;
1370 }
1371
1372 static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
1373 {
1374 struct iommu_struct *iommu = sbus->iommu;
1375 unsigned long page = (unsigned long) vaddr;
1376 unsigned long start, end, offset;
1377 iopte_t *iopte;
1378
1379 offset = page & ~PAGE_MASK;
1380 page &= PAGE_MASK;
1381
1382 start = iommu->plow;
1383 end = KADB_DEBUGGER_BEGVM;
1384 iopte = iommu->lowest;
1385 while(start < end) {
1386 if(!(iopte_val(*iopte) & IOPTE_VALID))
1387 break;
1388 iopte++;
1389 start += PAGE_SIZE;
1390 }
1391
1392 flush_page_for_dma(page);
1393 vaddr = (char *) (start | offset);
1394 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1395 iommu_invalidate_page(iommu->regs, start);
1396 iommu->lowest = iopte + 1;
1397 iommu->plow = start + PAGE_SIZE;
1398
1399 return vaddr;
1400 }
1401
1402 static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1403 {
1404 struct iommu_struct *iommu = sbus->iommu;
1405 unsigned long page, start, end, offset;
1406 iopte_t *iopte = iommu->lowest;
1407
1408 start = iommu->plow;
1409 end = KADB_DEBUGGER_BEGVM;
1410 while(sz >= 0) {
1411 page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
1412 offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
1413 while(start < end) {
1414 if(!(iopte_val(*iopte) & IOPTE_VALID))
1415 break;
1416 iopte++;
1417 start += PAGE_SIZE;
1418 }
1419 if(start == KADB_DEBUGGER_BEGVM)
1420 panic("Wheee, iomapping overflow.");
1421 flush_page_for_dma(page);
1422 sg[sz].alt_addr = (char *) (start | offset);
1423 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1424 iommu_invalidate_page(iommu->regs, start);
1425 iopte++;
1426 start += PAGE_SIZE;
1427 sz--;
1428 }
1429 iommu->lowest = iopte;
1430 iommu->plow = start;
1431 }
1432
1433 static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
1434 {
1435 struct iommu_struct *iommu = sbus->iommu;
1436 unsigned long page = (unsigned long) vaddr;
1437 iopte_t *iopte;
1438
1439 if(len > PAGE_SIZE)
1440 panic("Can only handle page sized IOMMU mappings.");
1441 page &= PAGE_MASK;
1442 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1443 iopte_val(*iopte) = 0;
1444 iommu_invalidate_page(iommu->regs, page);
1445 if(iopte < iommu->lowest) {
1446 iommu->lowest = iopte;
1447 iommu->plow = page;
1448 }
1449 }
1450
1451 static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1452 {
1453 struct iommu_struct *iommu = sbus->iommu;
1454 unsigned long page;
1455 iopte_t *iopte;
1456
1457 while(sz >= 0) {
1458 page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
1459 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1460 iopte_val(*iopte) = 0;
1461 iommu_invalidate_page(iommu->regs, page);
1462 if(iopte < iommu->lowest) {
1463 iommu->lowest = iopte;
1464 iommu->plow = page;
1465 }
1466 sg[sz].alt_addr = 0;
1467 sz--;
1468 }
1469 }
1470
1471 static unsigned long mempool;
1472
1473
1474
1475
1476
1477
1478
1479 static unsigned long kbpage;
1480
1481
1482 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
1483 {
1484 return ((vaddr - PAGE_OFFSET) + kbpage);
1485 }
1486
1487 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
1488 {
1489 srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
1490 }
1491
1492 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
1493 {
1494 srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
1495 }
1496
1497 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
1498 {
1499 return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1500 }
1501
1502 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
1503 {
1504 return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1505 }
1506
1507 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
1508 {
1509 return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1510 }
1511
1512 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
1513 {
1514 return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1515 }
1516
1517
1518
1519
1520 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
1521 {
1522 unsigned long mask = size - 1;
1523 unsigned long ret;
1524
1525 if(!size)
1526 return 0x0;
1527 if(size & mask) {
1528 prom_printf("panic: srmmu_init_alloc botch\n");
1529 prom_halt();
1530 }
1531 ret = (*kbrk + mask) & ~mask;
1532 *kbrk = ret + size;
1533 memset((void*) ret, 0, size);
1534 return (void*) ret;
1535 }
1536
1537 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1538 {
1539 pgd_t *pgdp;
1540 pmd_t *pmdp;
1541 pte_t *ptep;
1542
1543 while(start < end) {
1544 pgdp = srmmu_pgd_offset(init_task.mm, start);
1545 if(srmmu_pgd_none(*pgdp)) {
1546 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1547 srmmu_early_pgd_set(pgdp, pmdp);
1548 }
1549 pmdp = srmmu_early_pmd_offset(pgdp, start);
1550 if(srmmu_pmd_none(*pmdp)) {
1551 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1552 srmmu_early_pmd_set(pmdp, ptep);
1553 }
1554 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1555 }
1556 }
1557
1558
1559
1560
1561
1562 void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
1563 {
1564 pgd_t *pgdp;
1565 pmd_t *pmdp;
1566 pte_t *ptep;
1567 int what = 0;
1568 unsigned long prompte;
1569
1570 while(start <= end) {
1571 if (start == 0)
1572 break;
1573 if(start == 0xfef00000)
1574 start = KADB_DEBUGGER_BEGVM;
1575 if(!(prompte = srmmu_hwprobe(start))) {
1576 start += PAGE_SIZE;
1577 continue;
1578 }
1579
1580
1581 what = 0;
1582
1583 if(!(start & ~(SRMMU_PMD_MASK))) {
1584 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1585 what = 1;
1586 }
1587
1588 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1589 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1590 prompte)
1591 what = 2;
1592 }
1593
1594 pgdp = srmmu_pgd_offset(init_task.mm, start);
1595 if(what == 2) {
1596 pgd_val(*pgdp) = prompte;
1597 start += SRMMU_PGDIR_SIZE;
1598 continue;
1599 }
1600 if(srmmu_pgd_none(*pgdp)) {
1601 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1602 srmmu_early_pgd_set(pgdp, pmdp);
1603 }
1604 pmdp = srmmu_early_pmd_offset(pgdp, start);
1605 if(what == 1) {
1606 pmd_val(*pmdp) = prompte;
1607 start += SRMMU_PMD_SIZE;
1608 continue;
1609 }
1610 if(srmmu_pmd_none(*pmdp)) {
1611 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1612 srmmu_early_pmd_set(pmdp, ptep);
1613 }
1614 ptep = srmmu_early_pte_offset(pmdp, start);
1615 pte_val(*ptep) = prompte;
1616 start += PAGE_SIZE;
1617 }
1618 }
1619
1620 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
1621 {
1622 unsigned long start;
1623 pgprot_t dvma_prot;
1624 pgd_t *pgdp;
1625 pmd_t *pmdp;
1626 pte_t *ptep;
1627
1628 start = DVMA_VADDR;
1629 if (viking_mxcc_present)
1630 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
1631 else
1632 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1633 while(first <= last) {
1634 pgdp = srmmu_pgd_offset(init_task.mm, start);
1635 pmdp = srmmu_pmd_offset(pgdp, start);
1636 ptep = srmmu_pte_offset(pmdp, start);
1637
1638 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1639
1640 first += PAGE_SIZE;
1641 start += PAGE_SIZE;
1642 }
1643
1644
1645 if (!viking_mxcc_present) {
1646 first = first_dvma_page;
1647 last = last_dvma_page;
1648 while(first <= last) {
1649 pgdp = srmmu_pgd_offset(init_task.mm, first);
1650 pmdp = srmmu_pmd_offset(pgdp, first);
1651 ptep = srmmu_pte_offset(pmdp, first);
1652 pte_val(*ptep) &= ~SRMMU_CACHE;
1653 first += PAGE_SIZE;
1654 }
1655 }
1656 }
1657
1658 static void srmmu_map_kernel(unsigned long start, unsigned long end)
1659 {
1660 unsigned long last_page;
1661 int srmmu_bank, phys_bank, i;
1662 pgd_t *pgdp;
1663 pmd_t *pmdp;
1664 pte_t *ptep;
1665
1666 end = PAGE_ALIGN(end);
1667
1668 if(start == (KERNBASE + PAGE_SIZE)) {
1669 unsigned long pte;
1670 unsigned long tmp;
1671
1672 pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
1673 pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
1674 ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
1675
1676
1677 tmp = kbpage;
1678 pte = (tmp) >> 4;
1679 pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1680 pte_val(*ptep) = pte;
1681 }
1682
1683
1684 last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1685 while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
1686 unsigned long tmp;
1687
1688 pgdp = srmmu_pgd_offset(init_task.mm, start);
1689 pmdp = srmmu_early_pmd_offset(pgdp, start);
1690 ptep = srmmu_early_pte_offset(pmdp, start);
1691 tmp = srmmu_hwprobe(start);
1692 tmp &= ~(0xff);
1693 tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1694 pte_val(*ptep) = tmp;
1695 start += PAGE_SIZE;
1696 tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1697
1698
1699 if(tmp != last_page + PAGE_SIZE)
1700 break;
1701 last_page = tmp;
1702 }
1703
1704
1705
1706
1707 for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
1708 if(kbpage >= sp_banks[phys_bank].base_addr &&
1709 (kbpage <
1710 (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
1711 break;
1712 }
1713 srmmu_bank = 0;
1714 srmmu_map[srmmu_bank].vbase = KERNBASE;
1715 srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
1716 srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
1717 if(kbpage != sp_banks[phys_bank].base_addr) {
1718 prom_printf("Detected PenguinPages, getting out of here.\n");
1719 prom_halt();
1720 #if 0
1721 srmmu_map[srmmu_bank].pbase = kbpage;
1722 srmmu_map[srmmu_bank].size -=
1723 (kbpage - sp_banks[phys_bank].base_addr);
1724 #endif
1725 }
1726
1727
1728
1729 while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
1730 unsigned long pteval;
1731
1732 pgdp = srmmu_pgd_offset(init_task.mm, start);
1733 pmdp = srmmu_early_pmd_offset(pgdp, start);
1734 ptep = srmmu_early_pte_offset(pmdp, start);
1735
1736 pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
1737 pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1738 pte_val(*ptep) = pteval;
1739 start += PAGE_SIZE;
1740 }
1741
1742
1743 sp_banks[phys_bank].base_addr |= 1;
1744 srmmu_bank++;
1745
1746
1747 while(start < end) {
1748 unsigned long baddr;
1749 int btg;
1750
1751
1752 for(i=0; sp_banks[i].num_bytes != 0; i++)
1753 if(!(sp_banks[i].base_addr & 1))
1754 break;
1755 if(sp_banks[i].num_bytes == 0)
1756 break;
1757
1758
1759 srmmu_map[srmmu_bank].vbase = start;
1760 srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
1761 srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
1762 srmmu_bank++;
1763
1764 btg = sp_banks[i].num_bytes;
1765 baddr = sp_banks[i].base_addr;
1766 while(btg) {
1767 pgdp = srmmu_pgd_offset(init_task.mm, start);
1768 pmdp = srmmu_early_pmd_offset(pgdp, start);
1769 ptep = srmmu_early_pte_offset(pmdp, start);
1770 pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1771 pte_val(*ptep) |= (baddr >> 4);
1772
1773 baddr += PAGE_SIZE;
1774 start += PAGE_SIZE;
1775 btg -= PAGE_SIZE;
1776 }
1777 sp_banks[i].base_addr |= 1;
1778 }
1779 if(start < end) {
1780 prom_printf("weird, didn't use all of physical memory... ");
1781 prom_halt();
1782 }
1783 for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
1784 sp_banks[phys_bank].base_addr &= ~1;
1785 #if 0
1786 for(i = 0; srmmu_map[i].size != 0; i++) {
1787 prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
1788 i, srmmu_map[i].vbase,
1789 srmmu_map[i].pbase, srmmu_map[i].size);
1790 }
1791 prom_getchar();
1792 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1793 prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
1794 i,
1795 sp_banks[i].base_addr,
1796 sp_banks[i].num_bytes);
1797 }
1798 prom_getchar();
1799 prom_halt();
1800 #endif
1801 }
1802
1803
1804 extern unsigned long free_area_init(unsigned long, unsigned long);
1805 extern unsigned long sparc_context_init(unsigned long, int);
1806
1807 extern int physmem_mapped_contig;
1808 extern int linux_num_cpus;
1809
1810 void (*poke_srmmu)(void);
1811
1812 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
1813 {
1814 unsigned long ptables_start, first_mapped_page;
1815 int i, cpunode;
1816 char node_str[128];
1817 pgd_t *pgdp;
1818 pmd_t *pmdp;
1819 pte_t *ptep;
1820
1821 physmem_mapped_contig = 0;
1822
1823 #if CONFIG_AP1000
1824 printk("Forcing num_contexts to 1024\n");
1825 num_contexts = 1024;
1826 #else
1827
1828 cpunode = prom_getchild(prom_root_node);
1829 num_contexts = 0;
1830 while((cpunode = prom_getsibling(cpunode)) != 0) {
1831 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1832 if(!strcmp(node_str, "cpu")) {
1833 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1834 break;
1835 }
1836 }
1837 #endif
1838 if(!num_contexts) {
1839 prom_printf("Something wrong, cant find cpu node in paging_init.\n");
1840 prom_halt();
1841 }
1842
1843 ptables_start = mempool = PAGE_ALIGN(start_mem);
1844 memset(swapper_pg_dir, 0, PAGE_SIZE);
1845 first_mapped_page = KERNBASE;
1846 kbpage = srmmu_hwprobe(KERNBASE);
1847 if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
1848 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1849 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1850 kbpage -= PAGE_SIZE;
1851 first_mapped_page += PAGE_SIZE;
1852 } else
1853 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1854
1855 srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1856 #if CONFIG_SUN_IO
1857 srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1858 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1859 #endif
1860
1861
1862 mempool = PAGE_ALIGN(mempool);
1863 first_dvma_page = mempool;
1864 last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1865 mempool = last_dvma_page + PAGE_SIZE;
1866
1867 #if CONFIG_AP1000
1868 ap_inherit_mappings();
1869 #else
1870 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1871 #endif
1872 srmmu_map_kernel(first_mapped_page, end_mem);
1873 #if CONFIG_SUN_IO
1874 srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1875 #endif
1876 srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1877 srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1878 for(i = 0; i < num_contexts; i++)
1879 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1880
1881 start_mem = PAGE_ALIGN(mempool);
1882
1883
1884 if(!can_cache_ptables) {
1885 for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
1886 pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
1887 pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
1888 ptep = srmmu_early_pte_offset(pmdp, ptables_start);
1889 pte_val(*ptep) &= ~SRMMU_CACHE;
1890 }
1891
1892 pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
1893 pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
1894 ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
1895 pte_val(*ptep) &= ~SRMMU_CACHE;
1896 }
1897
1898 flush_cache_all();
1899 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1900 flush_tlb_all();
1901 poke_srmmu();
1902
1903 start_mem = sparc_context_init(start_mem, num_contexts);
1904 start_mem = free_area_init(start_mem, end_mem);
1905
1906 return PAGE_ALIGN(start_mem);
1907 }
1908
1909 static char srmmuinfo[512];
1910
1911 static char *srmmu_mmu_info(void)
1912 {
1913 sprintf(srmmuinfo, "MMU type\t: %s\n"
1914 "invall\t\t: %d\n"
1915 "invmm\t\t: %d\n"
1916 "invrnge\t\t: %d\n"
1917 "invpg\t\t: %d\n"
1918 "contexts\t: %d\n"
1919 "big_chunks\t: %d\n"
1920 "little_chunks\t: %d\n",
1921 srmmu_name,
1922 module_stats.invall,
1923 module_stats.invmm,
1924 module_stats.invrnge,
1925 module_stats.invpg,
1926 num_contexts,
1927 #if 0
1928 num_big_chunks,
1929 num_little_chunks
1930 #else
1931 0, 0
1932 #endif
1933 );
1934 return srmmuinfo;
1935 }
1936
1937 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1938 {
1939 }
1940
1941 static void srmmu_exit_hook(void)
1942 {
1943 struct ctx_list *ctx_old;
1944 struct mm_struct *mm = current->mm;
1945
1946 if(mm->context != NO_CONTEXT) {
1947 flush_cache_mm(mm);
1948 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1949 flush_tlb_mm(mm);
1950 ctx_old = ctx_list_pool + mm->context;
1951 remove_from_ctx_list(ctx_old);
1952 add_to_free_ctxlist(ctx_old);
1953 mm->context = NO_CONTEXT;
1954 }
1955 }
1956
1957 static void srmmu_flush_hook(void)
1958 {
1959 if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1960 alloc_context(current->mm);
1961 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1962 srmmu_set_context(current->mm->context);
1963 }
1964 }
1965
1966 static void hypersparc_exit_hook(void)
1967 {
1968 struct ctx_list *ctx_old;
1969 struct mm_struct *mm = current->mm;
1970
1971 if(mm->context != NO_CONTEXT) {
1972
1973
1974
1975
1976
1977 flush_cache_mm(mm);
1978 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1979 flush_tlb_mm(mm);
1980 ctx_old = ctx_list_pool + mm->context;
1981 remove_from_ctx_list(ctx_old);
1982 add_to_free_ctxlist(ctx_old);
1983 mm->context = NO_CONTEXT;
1984 }
1985 }
1986
1987 static void hypersparc_flush_hook(void)
1988 {
1989 if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1990 alloc_context(current->mm);
1991 flush_cache_mm(current->mm);
1992 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1993 srmmu_set_context(current->mm->context);
1994 }
1995 }
1996
1997
1998 void srmmu_is_bad(void)
1999 {
2000 prom_printf("Could not determine SRMMU chip type.\n");
2001 prom_halt();
2002 }
2003
2004 void poke_hypersparc(void)
2005 {
2006 volatile unsigned long clear;
2007 unsigned long mreg = srmmu_get_mmureg();
2008
2009 hyper_flush_unconditional_combined();
2010
2011 mreg &= ~(HYPERSPARC_CWENABLE);
2012 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2013 mreg |= (HYPERSPARC_CMODE);
2014
2015 srmmu_set_mmureg(mreg);
2016 hyper_clear_all_tags();
2017
2018 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2019 hyper_flush_whole_icache();
2020 clear = srmmu_get_faddr();
2021 clear = srmmu_get_fstatus();
2022 }
2023
2024 void init_hypersparc(void)
2025 {
2026 unsigned long mreg = srmmu_get_mmureg();
2027
2028 srmmu_name = "ROSS HyperSparc";
2029 can_cache_ptables = 0;
2030 if(mreg & HYPERSPARC_CSIZE) {
2031 hyper_cache_size = (256 * 1024);
2032 hyper_line_size = 64;
2033 } else {
2034 hyper_cache_size = (128 * 1024);
2035 hyper_line_size = 32;
2036 }
2037
2038 flush_cache_all = hypersparc_flush_cache_all;
2039 flush_cache_mm = hypersparc_flush_cache_mm;
2040 flush_cache_range = hypersparc_flush_cache_range;
2041 flush_cache_page = hypersparc_flush_cache_page;
2042
2043 flush_tlb_all = hypersparc_flush_tlb_all;
2044 flush_tlb_mm = hypersparc_flush_tlb_mm;
2045 flush_tlb_range = hypersparc_flush_tlb_range;
2046 flush_tlb_page = hypersparc_flush_tlb_page;
2047
2048 flush_page_to_ram = hypersparc_flush_page_to_ram;
2049 flush_page_for_dma = hypersparc_flush_page_for_dma;
2050 flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
2051 flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
2052
2053 ctxd_set = hypersparc_ctxd_set;
2054 switch_to_context = hypersparc_switch_to_context;
2055 mmu_exit_hook = hypersparc_exit_hook;
2056 mmu_flush_hook = hypersparc_flush_hook;
2057 sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
2058 set_pte = hypersparc_set_pte;
2059 poke_srmmu = poke_hypersparc;
2060 }
2061
2062 void poke_cypress(void)
2063 {
2064 unsigned long mreg = srmmu_get_mmureg();
2065
2066 mreg &= ~CYPRESS_CMODE;
2067 mreg |= CYPRESS_CENABLE;
2068 srmmu_set_mmureg(mreg);
2069 }
2070
2071 void init_cypress_common(void)
2072 {
2073 can_cache_ptables = 0;
2074 flush_tlb_all = cypress_flush_tlb_all;
2075 flush_tlb_mm = cypress_flush_tlb_mm;
2076 flush_tlb_page = cypress_flush_tlb_page;
2077 flush_tlb_range = cypress_flush_tlb_range;
2078 poke_srmmu = poke_cypress;
2079
2080
2081
2082 }
2083
2084 void init_cypress_604(void)
2085 {
2086 srmmu_name = "ROSS Cypress-604(UP)";
2087 srmmu_modtype = Cypress;
2088 init_cypress_common();
2089 }
2090
2091 void init_cypress_605(unsigned long mrev)
2092 {
2093 srmmu_name = "ROSS Cypress-605(MP)";
2094 if(mrev == 0xe) {
2095 srmmu_modtype = Cypress_vE;
2096 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2097 } else {
2098 if(mrev == 0xd) {
2099 srmmu_modtype = Cypress_vD;
2100 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2101 } else {
2102 srmmu_modtype = Cypress;
2103 }
2104 }
2105 init_cypress_common();
2106 }
2107
2108 void poke_swift(void)
2109 {
2110 unsigned long mreg = srmmu_get_mmureg();
2111
2112
2113 swift_idflash_clear();
2114 mreg |= (SWIFT_IE | SWIFT_DE);
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 mreg &= ~(SWIFT_BF);
2125 srmmu_set_mmureg(mreg);
2126 }
2127
2128 #define SWIFT_MASKID_ADDR 0x10003018
2129 void init_swift(void)
2130 {
2131 unsigned long swift_rev;
2132
2133 __asm__ __volatile__("lda [%1] %2, %0\n\t"
2134 "srl %0, 0x18, %0\n\t" :
2135 "=r" (swift_rev) :
2136 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2137 srmmu_name = "Fujitsu Swift";
2138 switch(swift_rev) {
2139 case 0x11:
2140 case 0x20:
2141 case 0x23:
2142 case 0x30:
2143 srmmu_modtype = Swift_lots_o_bugs;
2144 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161 break;
2162 case 0x25:
2163 case 0x31:
2164 srmmu_modtype = Swift_bad_c;
2165 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2166
2167
2168
2169
2170 break;
2171 default:
2172 srmmu_modtype = Swift_ok;
2173 break;
2174 };
2175
2176 flush_cache_all = swift_flush_cache_all;
2177 flush_cache_mm = swift_flush_cache_mm;
2178 flush_cache_page = swift_flush_cache_page;
2179 flush_cache_range = swift_flush_cache_range;
2180
2181 flush_tlb_all = swift_flush_tlb_all;
2182 flush_tlb_mm = swift_flush_tlb_mm;
2183 flush_tlb_page = swift_flush_tlb_page;
2184 flush_tlb_range = swift_flush_tlb_range;
2185
2186 flush_page_to_ram = swift_flush_page_to_ram;
2187 flush_page_for_dma = swift_flush_page_for_dma;
2188 flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
2189 flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
2190
2191
2192
2193
2194
2195
2196
2197 poke_srmmu = poke_swift;
2198 }
2199
2200 void poke_tsunami(void)
2201 {
2202 unsigned long mreg = srmmu_get_mmureg();
2203
2204 tsunami_flush_icache();
2205 tsunami_flush_dcache();
2206 mreg &= ~TSUNAMI_ITD;
2207 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2208 srmmu_set_mmureg(mreg);
2209 }
2210
2211 void init_tsunami(void)
2212 {
2213
2214
2215
2216
2217
2218 srmmu_name = "TI Tsunami";
2219 srmmu_modtype = Tsunami;
2220 can_cache_ptables = 1;
2221
2222 flush_cache_all = tsunami_flush_cache_all;
2223 flush_cache_mm = tsunami_flush_cache_mm;
2224 flush_cache_page = tsunami_flush_cache_page;
2225 flush_cache_range = tsunami_flush_cache_range;
2226
2227 flush_tlb_all = tsunami_flush_tlb_all;
2228 flush_tlb_mm = tsunami_flush_tlb_mm;
2229 flush_tlb_page = tsunami_flush_tlb_page;
2230 flush_tlb_range = tsunami_flush_tlb_range;
2231
2232 flush_page_to_ram = tsunami_flush_page_to_ram;
2233 flush_page_for_dma = tsunami_flush_page_for_dma;
2234 flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
2235 flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
2236
2237 poke_srmmu = poke_tsunami;
2238 }
2239
2240 void poke_viking(void)
2241 {
2242 unsigned long mreg = srmmu_get_mmureg();
2243 static int smp_catch = 0;
2244
2245 if(viking_mxcc_present) {
2246 unsigned long mxcc_control;
2247
2248 __asm__ __volatile__("set -1, %%g2\n\t"
2249 "set -1, %%g3\n\t"
2250 "stda %%g2, [%1] %2\n\t"
2251 "lda [%3] %2, %0\n\t" :
2252 "=r" (mxcc_control) :
2253 "r" (MXCC_EREG), "i" (ASI_M_MXCC),
2254 "r" (MXCC_CREG) : "g2", "g3");
2255 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2256 mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
2257 mreg &= ~(VIKING_PCENABLE);
2258 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
2259 "r" (mxcc_control), "r" (MXCC_CREG),
2260 "i" (ASI_M_MXCC));
2261 srmmu_set_mmureg(mreg);
2262 mreg |= VIKING_TCENABLE;
2263 } else {
2264 unsigned long bpreg;
2265
2266 mreg &= ~(VIKING_TCENABLE);
2267 if(smp_catch++) {
2268
2269
2270
2271 bpreg = viking_get_bpreg();
2272 bpreg &= ~(VIKING_ACTION_MIX);
2273 viking_set_bpreg(bpreg);
2274
2275
2276 msi_set_sync();
2277 }
2278 }
2279
2280 viking_unlock_icache();
2281 viking_flush_icache();
2282 #if 0
2283 viking_unlock_dcache();
2284 viking_flush_dcache();
2285 #endif
2286 mreg |= VIKING_SPENABLE;
2287 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2288 mreg |= VIKING_SBENABLE;
2289 mreg &= ~(VIKING_ACENABLE);
2290 #if CONFIG_AP1000
2291 mreg &= ~(VIKING_SBENABLE);
2292 #endif
2293 #ifdef __SMP__
2294 mreg &= ~(VIKING_SBENABLE);
2295 #endif
2296 srmmu_set_mmureg(mreg);
2297 }
2298
2299 void init_viking(void)
2300 {
2301 unsigned long mreg = srmmu_get_mmureg();
2302
2303
2304
2305 if(mreg & VIKING_MMODE) {
2306 unsigned long bpreg;
2307
2308 srmmu_name = "TI Viking";
2309 viking_mxcc_present = 0;
2310 can_cache_ptables = 0;
2311
2312 bpreg = viking_get_bpreg();
2313 bpreg &= ~(VIKING_ACTION_MIX);
2314 viking_set_bpreg(bpreg);
2315
2316 msi_set_sync();
2317
2318 flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
2319 } else {
2320 srmmu_name = "TI Viking/MXCC";
2321 viking_mxcc_present = 1;
2322 can_cache_ptables = 1;
2323 flush_cache_page_to_uncache = viking_mxcc_flush_page;
2324 }
2325
2326 flush_cache_all = viking_flush_cache_all;
2327 flush_cache_mm = viking_flush_cache_mm;
2328 flush_cache_page = viking_flush_cache_page;
2329 flush_cache_range = viking_flush_cache_range;
2330
2331 flush_tlb_all = viking_flush_tlb_all;
2332 flush_tlb_mm = viking_flush_tlb_mm;
2333 flush_tlb_page = viking_flush_tlb_page;
2334 flush_tlb_range = viking_flush_tlb_range;
2335
2336 flush_page_to_ram = viking_flush_page_to_ram;
2337 flush_page_for_dma = viking_flush_page_for_dma;
2338 flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
2339
2340 poke_srmmu = poke_viking;
2341 }
2342
2343
2344 static void get_srmmu_type(void)
2345 {
2346 unsigned long mreg, psr;
2347 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2348
2349 srmmu_modtype = SRMMU_INVAL_MOD;
2350 hwbug_bitmask = 0;
2351
2352 mreg = srmmu_get_mmureg(); psr = get_psr();
2353 mod_typ = (mreg & 0xf0000000) >> 28;
2354 mod_rev = (mreg & 0x0f000000) >> 24;
2355 psr_typ = (psr >> 28) & 0xf;
2356 psr_vers = (psr >> 24) & 0xf;
2357
2358
2359 if(mod_typ == 1) {
2360 switch(mod_rev) {
2361 case 7:
2362
2363 init_hypersparc();
2364 break;
2365 case 0:
2366
2367 init_cypress_604();
2368 break;
2369 case 13:
2370 case 14:
2371 case 15:
2372
2373 init_cypress_605(mod_rev);
2374 break;
2375 default:
2376 srmmu_is_bad();
2377 break;
2378 };
2379 return;
2380 }
2381
2382
2383 if(psr_typ == 0 && psr_vers == 4) {
2384 init_swift();
2385 return;
2386 }
2387
2388
2389 if(psr_typ == 4 &&
2390 ((psr_vers == 0) ||
2391 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2392 init_viking();
2393 return;
2394 }
2395
2396
2397 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2398 init_tsunami();
2399 return;
2400 }
2401
2402
2403 srmmu_is_bad();
2404 }
2405
2406 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2407 tsetup_mmu_patchme, rtrap_mmu_patchme;
2408
2409 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2410 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2411
2412 #ifdef __SMP__
2413 extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
2414 #endif
2415
2416 extern unsigned long srmmu_fault;
2417
2418 #define PATCH_BRANCH(insn, dest) do { \
2419 iaddr = &(insn); \
2420 daddr = &(dest); \
2421 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2422 } while(0);
2423
2424 static void patch_window_trap_handlers(void)
2425 {
2426 unsigned long *iaddr, *daddr;
2427
2428 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2429 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2430 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2431 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2432 #ifdef __SMP__
2433 PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
2434 #endif
2435 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2436 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2437 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2438 }
2439
2440 #ifdef __SMP__
2441
2442 static void smp_flush_page_for_dma(unsigned long page)
2443 {
2444 xc1((smpfunc_t) local_flush_page_for_dma, page);
2445 }
2446
2447 static void smp_flush_cache_page_to_uncache(unsigned long page)
2448 {
2449 xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
2450 }
2451
2452 static void smp_flush_tlb_page_for_cbit(unsigned long page)
2453 {
2454 xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
2455 }
2456 #endif
2457
2458
2459 void ld_mmu_srmmu(void)
2460 {
2461
2462 pmd_shift = SRMMU_PMD_SHIFT;
2463 pmd_size = SRMMU_PMD_SIZE;
2464 pmd_mask = SRMMU_PMD_MASK;
2465 pgdir_shift = SRMMU_PGDIR_SHIFT;
2466 pgdir_size = SRMMU_PGDIR_SIZE;
2467 pgdir_mask = SRMMU_PGDIR_MASK;
2468
2469 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
2470 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
2471 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
2472
2473 page_none = SRMMU_PAGE_NONE;
2474 page_shared = SRMMU_PAGE_SHARED;
2475 page_copy = SRMMU_PAGE_COPY;
2476 page_readonly = SRMMU_PAGE_RDONLY;
2477 page_kernel = SRMMU_PAGE_KERNEL;
2478 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2479
2480
2481 set_pte = srmmu_set_pte;
2482 switch_to_context = srmmu_switch_to_context;
2483 pmd_align = srmmu_pmd_align;
2484 pgdir_align = srmmu_pgdir_align;
2485 vmalloc_start = srmmu_vmalloc_start;
2486
2487 pte_page = srmmu_pte_page;
2488 pmd_page = srmmu_pmd_page;
2489 pgd_page = srmmu_pgd_page;
2490
2491 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
2492
2493 pte_none = srmmu_pte_none;
2494 pte_present = srmmu_pte_present;
2495 pte_clear = srmmu_pte_clear;
2496
2497 pmd_none = srmmu_pmd_none;
2498 pmd_bad = srmmu_pmd_bad;
2499 pmd_present = srmmu_pmd_present;
2500 pmd_clear = srmmu_pmd_clear;
2501
2502 pgd_none = srmmu_pgd_none;
2503 pgd_bad = srmmu_pgd_bad;
2504 pgd_present = srmmu_pgd_present;
2505 pgd_clear = srmmu_pgd_clear;
2506
2507 mk_pte = srmmu_mk_pte;
2508 pgd_set = srmmu_pgd_set;
2509 mk_pte_io = srmmu_mk_pte_io;
2510 pte_modify = srmmu_pte_modify;
2511 pgd_offset = srmmu_pgd_offset;
2512 pmd_offset = srmmu_pmd_offset;
2513 pte_offset = srmmu_pte_offset;
2514 pte_free_kernel = srmmu_pte_free_kernel;
2515 pmd_free_kernel = srmmu_pmd_free_kernel;
2516 pte_alloc_kernel = srmmu_pte_alloc_kernel;
2517 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
2518 pte_free = srmmu_pte_free;
2519 pte_alloc = srmmu_pte_alloc;
2520 pmd_free = srmmu_pmd_free;
2521 pmd_alloc = srmmu_pmd_alloc;
2522 pgd_free = srmmu_pgd_free;
2523 pgd_alloc = srmmu_pgd_alloc;
2524
2525 pte_write = srmmu_pte_write;
2526 pte_dirty = srmmu_pte_dirty;
2527 pte_young = srmmu_pte_young;
2528 pte_wrprotect = srmmu_pte_wrprotect;
2529 pte_mkclean = srmmu_pte_mkclean;
2530 pte_mkold = srmmu_pte_mkold;
2531 pte_mkwrite = srmmu_pte_mkwrite;
2532 pte_mkdirty = srmmu_pte_mkdirty;
2533 pte_mkyoung = srmmu_pte_mkyoung;
2534 update_mmu_cache = srmmu_update_mmu_cache;
2535 mmu_exit_hook = srmmu_exit_hook;
2536 mmu_flush_hook = srmmu_flush_hook;
2537 mmu_lockarea = srmmu_lockarea;
2538 mmu_unlockarea = srmmu_unlockarea;
2539
2540 mmu_get_scsi_one = srmmu_get_scsi_one;
2541 mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
2542 mmu_release_scsi_one = srmmu_release_scsi_one;
2543 mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
2544
2545 mmu_info = srmmu_mmu_info;
2546 mmu_v2p = srmmu_v2p;
2547 mmu_p2v = srmmu_p2v;
2548
2549
2550 alloc_kernel_stack = srmmu_alloc_kernel_stack;
2551 alloc_task_struct = srmmu_alloc_task_struct;
2552 free_kernel_stack = srmmu_free_kernel_stack;
2553 free_task_struct = srmmu_free_task_struct;
2554
2555 quick_kernel_fault = srmmu_quick_kernel_fault;
2556
2557
2558 ctxd_set = srmmu_ctxd_set;
2559 pmd_set = srmmu_pmd_set;
2560
2561 get_srmmu_type();
2562 patch_window_trap_handlers();
2563
2564 #ifdef __SMP__
2565
2566
2567 local_flush_cache_all = flush_cache_all;
2568 local_flush_cache_mm = flush_cache_mm;
2569 local_flush_cache_range = flush_cache_range;
2570 local_flush_cache_page = flush_cache_page;
2571 local_flush_tlb_all = flush_tlb_all;
2572 local_flush_tlb_mm = flush_tlb_mm;
2573 local_flush_tlb_range = flush_tlb_range;
2574 local_flush_tlb_page = flush_tlb_page;
2575 local_flush_page_to_ram = flush_page_to_ram;
2576 local_flush_page_for_dma = flush_page_for_dma;
2577 local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
2578 local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
2579
2580 flush_cache_all = smp_flush_cache_all;
2581 flush_cache_mm = smp_flush_cache_mm;
2582 flush_cache_range = smp_flush_cache_range;
2583 flush_cache_page = smp_flush_cache_page;
2584 flush_tlb_all = smp_flush_tlb_all;
2585 flush_tlb_mm = smp_flush_tlb_mm;
2586 flush_tlb_range = smp_flush_tlb_range;
2587 flush_tlb_page = smp_flush_tlb_page;
2588 flush_page_to_ram = smp_flush_page_to_ram;
2589 flush_page_for_dma = smp_flush_page_for_dma;
2590 flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
2591 flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
2592 #endif
2593 }