This source file includes following definitions.
- srmmu_v2p
- srmmu_p2v
- srmmu_swap
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_vmalloc_start
- srmmu_pgd_page
- srmmu_pmd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_clear
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_clear
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_clear
- srmmu_pte_write
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_wrprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_mkwrite
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_mk_pte
- srmmu_mk_pte_io
- srmmu_ctxd_set
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_uncache_page
- srmmu_recache_page
- srmmu_getpage
- srmmu_putpage
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- srmmu_set_pte
- srmmu_quick_kernel_fault
- alloc_context
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_lockarea
- srmmu_unlockarea
- srmmu_alloc_task_struct
- srmmu_alloc_kernel_stack
- srmmu_free_task_struct
- srmmu_free_kernel_stack
- tsunami_flush_cache_all
- tsunami_flush_cache_mm
- tsunami_flush_cache_range
- tsunami_flush_cache_page
- tsunami_flush_cache_page_to_uncache
- tsunami_flush_page_to_ram
- tsunami_flush_page_for_dma
- tsunami_flush_tlb_all
- tsunami_flush_tlb_mm
- tsunami_flush_tlb_range
- tsunami_flush_tlb_page
- tsunami_flush_tlb_page_for_cbit
- swift_flush_cache_all
- swift_flush_cache_mm
- swift_flush_cache_range
- swift_flush_cache_page
- swift_flush_page_to_ram
- swift_flush_page_for_dma
- swift_flush_cache_page_to_uncache
- swift_flush_tlb_all
- swift_flush_tlb_mm
- swift_flush_tlb_range
- swift_flush_tlb_page
- swift_flush_tlb_page_for_cbit
- viking_flush_cache_all
- viking_flush_cache_mm
- viking_flush_cache_range
- viking_flush_cache_page
- viking_flush_page_to_ram
- viking_flush_page_for_dma
- viking_mxcc_flush_page
- viking_no_mxcc_flush_page
- viking_flush_tlb_all
- viking_flush_tlb_mm
- viking_flush_tlb_range
- viking_flush_tlb_page
- viking_flush_tlb_page_for_cbit
- cypress_flush_tlb_all
- cypress_flush_tlb_mm
- cypress_flush_tlb_range
- cypress_flush_tlb_page
- hypersparc_flush_cache_all
- hypersparc_flush_cache_mm
- hypersparc_flush_cache_range
- hypersparc_flush_cache_page
- hypersparc_flush_page_to_ram
- hypersparc_flush_page_for_dma
- hypersparc_flush_cache_page_to_uncache
- hypersparc_flush_tlb_all
- hypersparc_flush_tlb_mm
- hypersparc_flush_tlb_range
- hypersparc_flush_tlb_page
- hypersparc_flush_tlb_page_for_cbit
- hypersparc_ctxd_set
- hypersparc_update_rootmmu_dir
- hypersparc_set_pte
- hypersparc_switch_to_context
- srmmu_map_dvma_pages_for_iommu
- srmmu_uncache_iommu_page_table
- iommu_init
- srmmu_get_scsi_one
- srmmu_get_scsi_sgl
- srmmu_release_scsi_one
- srmmu_release_scsi_sgl
- srmmu_early_paddr
- srmmu_early_pgd_set
- srmmu_early_pmd_set
- srmmu_early_pgd_page
- srmmu_early_pmd_page
- srmmu_early_pmd_offset
- srmmu_early_pte_offset
- srmmu_init_alloc
- srmmu_allocate_ptable_skeleton
- srmmu_inherit_prom_mappings
- srmmu_map_dvma_pages_for_cpu
- srmmu_map_kernel
- srmmu_paging_init
- srmmu_mmu_info
- srmmu_update_mmu_cache
- srmmu_exit_hook
- srmmu_flush_hook
- hypersparc_exit_hook
- hypersparc_flush_hook
- srmmu_is_bad
- poke_hypersparc
- init_hypersparc
- poke_cypress
- init_cypress_common
- init_cypress_604
- init_cypress_605
- poke_swift
- init_swift
- poke_tsunami
- init_tsunami
- poke_viking
- init_viking
- get_srmmu_type
- patch_window_trap_handlers
- smp_flush_page_for_dma
- smp_flush_cache_page_to_uncache
- smp_flush_tlb_page_for_cbit
- ld_mmu_srmmu
1
2
3
4
5
6
7
8
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/io.h>
16 #include <asm/kdebug.h>
17 #include <asm/vaddrs.h>
18 #include <asm/traps.h>
19 #include <asm/smp.h>
20 #include <asm/mbus.h>
21 #include <asm/cache.h>
22 #include <asm/oplib.h>
23 #include <asm/sbus.h>
24 #include <asm/iommu.h>
25 #include <asm/asi.h>
26 #include <asm/msi.h>
27
28
29 #include <asm/viking.h>
30 #include <asm/mxcc.h>
31 #include <asm/ross.h>
32 #include <asm/tsunami.h>
33 #include <asm/swift.h>
34
35 enum mbus_module srmmu_modtype;
36 unsigned int hwbug_bitmask;
37 int hyper_cache_size;
38 int hyper_line_size;
39
40 #ifdef __SMP__
41 extern void smp_capture(void);
42 extern void smp_release(void);
43 #else
44 #define smp_capture()
45 #define smp_release()
46 #endif
47
48 static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
49 static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
50
51 static void (*flush_page_for_dma)(unsigned long page);
52 static void (*flush_cache_page_to_uncache)(unsigned long page);
53 static void (*flush_tlb_page_for_cbit)(unsigned long page);
54 #ifdef __SMP__
55 static void (*local_flush_page_for_dma)(unsigned long page);
56 static void (*local_flush_cache_page_to_uncache)(unsigned long page);
57 static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
58 #endif
59
60 static struct srmmu_stats {
61 int invall;
62 int invpg;
63 int invrnge;
64 int invmm;
65 } module_stats;
66
67 static char *srmmu_name;
68
69 ctxd_t *srmmu_ctx_table_phys;
70 ctxd_t *srmmu_context_table;
71
72 static struct srmmu_trans {
73 unsigned long vbase;
74 unsigned long pbase;
75 int size;
76 } srmmu_map[SPARC_PHYS_BANKS];
77
78 static int can_cache_ptables = 0;
79 static int viking_mxcc_present = 0;
80
81
82
83
84
85 static inline unsigned long srmmu_v2p(unsigned long vaddr)
86 {
87 int i;
88
89 for(i=0; srmmu_map[i].size != 0; i++) {
90 if(srmmu_map[i].vbase <= vaddr &&
91 (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
92 return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
93 }
94 return 0xffffffffUL;
95 }
96
97 static inline unsigned long srmmu_p2v(unsigned long paddr)
98 {
99 int i;
100
101 for(i=0; srmmu_map[i].size != 0; i++) {
102 if(srmmu_map[i].pbase <= paddr &&
103 (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
104 return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
105 }
106 return 0xffffffffUL;
107 }
108
109
110
111
112
113 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
114 {
115 #if CONFIG_AP1000
116
117 if (!(value&0xf0000000))
118 value |= 0x80000000;
119 if (value == 0x80000000) value = 0;
120 #endif
121 __asm__ __volatile__("swap [%2], %0\n\t" :
122 "=&r" (value) :
123 "0" (value), "r" (addr));
124 return value;
125 }
126
127
128 #define srmmu_set_entry(ptr, newentry) \
129 srmmu_swap((unsigned long *) (ptr), (newentry))
130
131
132
133 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
134 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
135
136 static unsigned long srmmu_vmalloc_start(void)
137 {
138 return SRMMU_VMALLOC_START;
139 }
140
141 static unsigned long srmmu_pgd_page(pgd_t pgd)
142 { return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
143
144 static unsigned long srmmu_pmd_page(pmd_t pmd)
145 { return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
146
147 static unsigned long srmmu_pte_page(pte_t pte)
148 { return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
149
150 static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
151 static int srmmu_pte_present(pte_t pte)
152 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
153
154 static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
155
156 static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
157 static int srmmu_pmd_bad(pmd_t pmd)
158 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
159
160 static int srmmu_pmd_present(pmd_t pmd)
161 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
162
163 static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
164
165 static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
166 static int srmmu_pgd_bad(pgd_t pgd)
167 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
168
169 static int srmmu_pgd_present(pgd_t pgd)
170 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
171
172 static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
173
174 static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
175 static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
176 static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
177
178 static pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
179 static pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
180 static pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~SRMMU_REF; return pte; }
181 static pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) |= SRMMU_WRITE; return pte; }
182 static pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= SRMMU_DIRTY; return pte; }
183 static pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= SRMMU_REF; return pte; }
184
185
186
187
188
189 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
190 { pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
191
192 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
193 {
194 pte_t pte;
195 pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
196 return pte;
197 }
198
199 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
200 {
201 srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
202 }
203
204 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
205 {
206 srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
207 }
208
209 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
210 {
211 srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
212 }
213
214 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
215 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
216
217
218 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
219 {
220 return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
221 }
222
223
224 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
225 {
226 return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
227 }
228
229
230 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
231 {
232 return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
233 }
234
235
236 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
237 {
238 if(tsk->mm->context != NO_CONTEXT)
239 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
240 }
241
242 static inline void srmmu_uncache_page(unsigned long addr)
243 {
244 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
245 pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
246 pte_t *ptep = srmmu_pte_offset(pmdp, addr);
247
248 flush_cache_page_to_uncache(addr);
249 set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
250 flush_tlb_page_for_cbit(addr);
251 }
252
253 static inline void srmmu_recache_page(unsigned long addr)
254 {
255 pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
256 pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
257 pte_t *ptep = srmmu_pte_offset(pmdp, addr);
258
259 set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
260 flush_tlb_page_for_cbit(addr);
261 }
262
263 static inline unsigned long srmmu_getpage(void)
264 {
265 unsigned long page = get_free_page(GFP_KERNEL);
266
267 if (can_cache_ptables)
268 return page;
269
270 if(page)
271 srmmu_uncache_page(page);
272 return page;
273 }
274
275 static inline void srmmu_putpage(unsigned long page)
276 {
277 if (!can_cache_ptables)
278 srmmu_recache_page(page);
279 free_page(page);
280 }
281
282
283 #define NEW_PGD() (pgd_t *) srmmu_getpage()
284 #define NEW_PMD() (pmd_t *) srmmu_getpage()
285 #define NEW_PTE() (pte_t *) srmmu_getpage()
286 #define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
287 #define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
288 #define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
289
290
291
292
293
294
295 static void srmmu_pte_free_kernel(pte_t *pte)
296 {
297 FREE_PTE(pte);
298 }
299
300 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
301 {
302 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
303 if(srmmu_pmd_none(*pmd)) {
304 pte_t *page = NEW_PTE();
305 if(srmmu_pmd_none(*pmd)) {
306 if(page) {
307 pmd_set(pmd, page);
308 return page + address;
309 }
310 pmd_set(pmd, BAD_PAGETABLE);
311 return NULL;
312 }
313 FREE_PTE(page);
314 }
315 if(srmmu_pmd_bad(*pmd)) {
316 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
317 pmd_set(pmd, BAD_PAGETABLE);
318 return NULL;
319 }
320 return (pte_t *) srmmu_pmd_page(*pmd) + address;
321 }
322
323 static void srmmu_pmd_free_kernel(pmd_t *pmd)
324 {
325 FREE_PMD(pmd);
326 }
327
328 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
329 {
330 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
331 if(srmmu_pgd_none(*pgd)) {
332 pmd_t *page = NEW_PMD();
333 if(srmmu_pgd_none(*pgd)) {
334 if(page) {
335 pgd_set(pgd, page);
336 return page + address;
337 }
338 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
339 return NULL;
340 }
341 FREE_PMD(page);
342 }
343 if(srmmu_pgd_bad(*pgd)) {
344 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
345 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
346 return NULL;
347 }
348 return (pmd_t *) pgd_page(*pgd) + address;
349 }
350
351 static void srmmu_pte_free(pte_t *pte)
352 {
353 FREE_PTE(pte);
354 }
355
356 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
357 {
358 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
359 if(srmmu_pmd_none(*pmd)) {
360 pte_t *page = NEW_PTE();
361 if(srmmu_pmd_none(*pmd)) {
362 if(page) {
363 pmd_set(pmd, page);
364 return page + address;
365 }
366 pmd_set(pmd, BAD_PAGETABLE);
367 return NULL;
368 }
369 FREE_PTE(page);
370 }
371 if(srmmu_pmd_bad(*pmd)) {
372 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
373 pmd_set(pmd, BAD_PAGETABLE);
374 return NULL;
375 }
376 return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
377 }
378
379
380 static void srmmu_pmd_free(pmd_t * pmd)
381 {
382 FREE_PMD(pmd);
383 }
384
385 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
386 {
387 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
388 if(srmmu_pgd_none(*pgd)) {
389 pmd_t *page = NEW_PMD();
390 if(srmmu_pgd_none(*pgd)) {
391 if(page) {
392 pgd_set(pgd, page);
393 return page + address;
394 }
395 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
396 return NULL;
397 }
398 FREE_PMD(page);
399 }
400 if(srmmu_pgd_bad(*pgd)) {
401 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
402 pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
403 return NULL;
404 }
405 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
406 }
407
408 static void srmmu_pgd_free(pgd_t *pgd)
409 {
410 FREE_PGD(pgd);
411 }
412
413 static pgd_t *srmmu_pgd_alloc(void)
414 {
415 return NEW_PGD();
416 }
417
418 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
419 {
420 srmmu_set_entry(ptep, pte_val(pteval));
421 }
422
423 static void srmmu_quick_kernel_fault(unsigned long address)
424 {
425 printk("Penguin faults at address %08lx\n", address);
426 panic("Srmmu bolixed...");
427 }
428
429 static inline void alloc_context(struct mm_struct *mm)
430 {
431 struct ctx_list *ctxp;
432
433 ctxp = ctx_free.next;
434 if(ctxp != &ctx_free) {
435 remove_from_ctx_list(ctxp);
436 add_to_used_ctxlist(ctxp);
437 mm->context = ctxp->ctx_number;
438 ctxp->ctx_mm = mm;
439 return;
440 }
441 ctxp = ctx_used.next;
442 if(ctxp->ctx_mm == current->mm)
443 ctxp = ctxp->next;
444 if(ctxp == &ctx_used)
445 panic("out of mmu contexts");
446 flush_cache_mm(ctxp->ctx_mm);
447 flush_tlb_mm(ctxp->ctx_mm);
448 remove_from_ctx_list(ctxp);
449 add_to_used_ctxlist(ctxp);
450 ctxp->ctx_mm->context = NO_CONTEXT;
451 ctxp->ctx_mm = mm;
452 mm->context = ctxp->ctx_number;
453 }
454
455 static void srmmu_switch_to_context(struct task_struct *tsk)
456 {
457
458
459
460
461
462 if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
463 (tsk->flags & PF_EXITING))
464 return;
465 if(tsk->mm->context == NO_CONTEXT) {
466 alloc_context(tsk->mm);
467 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
468 }
469 srmmu_set_context(tsk->mm->context);
470 }
471
472
473 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
474 {
475 pgd_t *pgdp;
476 pmd_t *pmdp;
477 pte_t *ptep;
478 unsigned long tmp;
479
480 physaddr &= PAGE_MASK;
481 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
482 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
483 ptep = srmmu_pte_offset(pmdp, virt_addr);
484 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
485
486
487
488
489
490 tmp |= (bus_type << 28);
491 if(rdonly)
492 tmp |= SRMMU_PRIV_RDONLY;
493 else
494 tmp |= SRMMU_PRIV;
495 flush_page_to_ram(virt_addr);
496 srmmu_set_entry(ptep, tmp);
497 flush_tlb_all();
498 }
499
500 static char *srmmu_lockarea(char *vaddr, unsigned long len)
501 {
502 return vaddr;
503 }
504
505 static void srmmu_unlockarea(char *vaddr, unsigned long len)
506 {
507 }
508
509
510
511
512
513
514
515
516
517 struct task_struct *srmmu_alloc_task_struct(void)
518 {
519 unsigned long page;
520
521 page = get_free_page(GFP_KERNEL);
522 if(!page)
523 return (struct task_struct *) 0;
524 return (struct task_struct *) page;
525 }
526
527 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
528 {
529 unsigned long pages;
530
531 pages = __get_free_pages(GFP_KERNEL, 2, 0);
532 if(!pages)
533 return 0;
534 memset((void *) pages, 0, (PAGE_SIZE << 2));
535 return pages;
536 }
537
538 static void srmmu_free_task_struct(struct task_struct *tsk)
539 {
540 free_page((unsigned long) tsk);
541 }
542
543 static void srmmu_free_kernel_stack(unsigned long stack)
544 {
545 free_pages(stack, 2);
546 }
547
548
549
550
551
552 static void tsunami_flush_cache_all(void)
553 {
554 flush_user_windows();
555 tsunami_flush_icache();
556 tsunami_flush_dcache();
557 }
558
559 static void tsunami_flush_cache_mm(struct mm_struct *mm)
560 {
561 #ifndef __SMP__
562 if(mm->context != NO_CONTEXT) {
563 #endif
564 flush_user_windows();
565 tsunami_flush_icache();
566 tsunami_flush_dcache();
567 #ifndef __SMP__
568 }
569 #endif
570 }
571
572 static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
573 {
574 #ifndef __SMP__
575 if(mm->context != NO_CONTEXT) {
576 #endif
577 flush_user_windows();
578 tsunami_flush_icache();
579 tsunami_flush_dcache();
580 #ifndef __SMP__
581 }
582 #endif
583 }
584
585 static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
586 {
587 #ifndef __SMP__
588 struct mm_struct *mm = vma->vm_mm;
589 if(mm->context != NO_CONTEXT) {
590 #endif
591 flush_user_windows();
592 tsunami_flush_icache();
593 tsunami_flush_dcache();
594 #ifndef __SMP__
595 }
596 #endif
597 }
598
599 static void tsunami_flush_cache_page_to_uncache(unsigned long page)
600 {
601 tsunami_flush_dcache();
602 }
603
604
605 static void tsunami_flush_page_to_ram(unsigned long page)
606 {
607 tsunami_flush_icache();
608 tsunami_flush_dcache();
609 }
610
611
612 static void tsunami_flush_page_for_dma(unsigned long page)
613 {
614 tsunami_flush_icache();
615 tsunami_flush_dcache();
616 }
617
618
619
620
621
622
623
624
625
626 #define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
627 nop(); nop(); nop(); nop(); nop(); } while(0)
628
629 static void tsunami_flush_tlb_all(void)
630 {
631 module_stats.invall++;
632 srmmu_flush_whole_tlb();
633 TSUNAMI_SUCKS;
634 }
635
636 static void tsunami_flush_tlb_mm(struct mm_struct *mm)
637 {
638 module_stats.invmm++;
639 #ifndef __SMP__
640 if(mm->context != NO_CONTEXT) {
641 #endif
642 srmmu_flush_whole_tlb();
643 TSUNAMI_SUCKS;
644 #ifndef __SMP__
645 }
646 #endif
647 }
648
649 static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
650 {
651 module_stats.invrnge++;
652 #ifndef __SMP__
653 if(mm->context != NO_CONTEXT) {
654 #endif
655 srmmu_flush_whole_tlb();
656 TSUNAMI_SUCKS;
657 #ifndef __SMP__
658 }
659 #endif
660 }
661
662 static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
663 {
664 int octx;
665 struct mm_struct *mm = vma->vm_mm;
666
667 #ifndef __SMP__
668 if(mm->context != NO_CONTEXT) {
669 #endif
670 octx = srmmu_get_context();
671
672 srmmu_set_context(mm->context);
673 srmmu_flush_tlb_page(page);
674 TSUNAMI_SUCKS;
675 srmmu_set_context(octx);
676 #ifndef __SMP__
677 }
678 #endif
679 module_stats.invpg++;
680 }
681
682 static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
683 {
684 srmmu_flush_tlb_page(page);
685 }
686
687
688
689
690
691
692 static void swift_flush_cache_all(void)
693 {
694 flush_user_windows();
695 swift_idflash_clear();
696 }
697
698 static void swift_flush_cache_mm(struct mm_struct *mm)
699 {
700 #ifndef __SMP__
701 if(mm->context != NO_CONTEXT) {
702 #endif
703 flush_user_windows();
704 swift_idflash_clear();
705 #ifndef __SMP__
706 }
707 #endif
708 }
709
710 static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
711 {
712 #ifndef __SMP__
713 if(mm->context != NO_CONTEXT) {
714 #endif
715 flush_user_windows();
716 swift_idflash_clear();
717 #ifndef __SMP__
718 }
719 #endif
720 }
721
722 static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
723 {
724 #ifndef __SMP__
725 struct mm_struct *mm = vma->vm_mm;
726 if(mm->context != NO_CONTEXT) {
727 #endif
728 flush_user_windows();
729 if(vma->vm_flags & VM_EXEC)
730 swift_flush_icache();
731 swift_flush_dcache();
732 #ifndef __SMP__
733 }
734 #endif
735 }
736
737
738 static void swift_flush_page_to_ram(unsigned long page)
739 {
740 }
741
742
743 static void swift_flush_page_for_dma(unsigned long page)
744 {
745 swift_flush_dcache();
746 }
747
748 static void swift_flush_cache_page_to_uncache(unsigned long page)
749 {
750 swift_flush_dcache();
751 }
752
753 static void swift_flush_tlb_all(void)
754 {
755 module_stats.invall++;
756 srmmu_flush_whole_tlb();
757 }
758
759 static void swift_flush_tlb_mm(struct mm_struct *mm)
760 {
761 module_stats.invmm++;
762 #ifndef __SMP__
763 if(mm->context != NO_CONTEXT)
764 #endif
765 srmmu_flush_whole_tlb();
766 }
767
768 static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
769 {
770 module_stats.invrnge++;
771 #ifndef __SMP__
772 if(mm->context != NO_CONTEXT)
773 #endif
774 srmmu_flush_whole_tlb();
775 }
776
777 static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
778 {
779 #ifndef __SMP__
780 struct mm_struct *mm = vma->vm_mm;
781 if(mm->context != NO_CONTEXT)
782 #endif
783 srmmu_flush_whole_tlb();
784 module_stats.invpg++;
785 }
786
787 static void swift_flush_tlb_page_for_cbit(unsigned long page)
788 {
789 srmmu_flush_whole_tlb();
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809 static void viking_flush_cache_all(void)
810 {
811 }
812
813 static void viking_flush_cache_mm(struct mm_struct *mm)
814 {
815 #ifndef __SMP__
816 if(mm->context != NO_CONTEXT) {
817 #endif
818 flush_user_windows();
819 #ifndef __SMP__
820 }
821 #endif
822 }
823
824 static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
825 {
826 #ifndef __SMP__
827 if(mm->context != NO_CONTEXT) {
828 #endif
829 flush_user_windows();
830 #ifndef __SMP__
831 }
832 #endif
833 }
834
835 static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
836 {
837 #ifndef __SMP__
838 struct mm_struct *mm = vma->vm_mm;
839 if(mm->context != NO_CONTEXT) {
840 #endif
841 flush_user_windows();
842 #ifndef __SMP__
843 }
844 #endif
845 }
846
847
848 static void viking_flush_page_to_ram(unsigned long page)
849 {
850 }
851
852
853 static void viking_flush_page_for_dma(unsigned long page)
854 {
855 }
856
857 static void viking_mxcc_flush_page(unsigned long page)
858 {
859 unsigned long ppage = srmmu_hwprobe(page);
860 unsigned long paddr0, paddr1;
861
862 if (!ppage)
863 return;
864
865 paddr0 = (ppage >> 28) | 0x10;
866 paddr1 = (ppage << 4) & PAGE_MASK;
867
868
869
870
871
872
873 __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
874 "or %%g0, %1, %%g3\n"
875 "1:\n\t"
876 "stda %%g2, [%2] %5\n\t"
877 "stda %%g2, [%3] %5\n\t"
878 "add %%g3, %4, %%g3\n\t"
879 "btst 0xfff, %%g3\n\t"
880 "bne 1b\n\t"
881 "nop\n\t" : :
882 "r" (paddr0), "r" (paddr1),
883 "r" (MXCC_SRCSTREAM),
884 "r" (MXCC_DESSTREAM),
885 "r" (MXCC_STREAM_SIZE),
886 "i" (ASI_M_MXCC) : "g2", "g3");
887
888
889
890
891
892
893
894
895
896 }
897
898 static void viking_no_mxcc_flush_page(unsigned long page)
899 {
900 unsigned long ppage = srmmu_hwprobe(page) >> 8;
901 int set, block;
902 unsigned long ptag[2];
903 unsigned long vaddr;
904 int i;
905
906 if (!ppage)
907 return;
908
909 for (set = 0; set < 128; set++) {
910 for (block = 0; block < 4; block++) {
911
912 viking_get_dcache_ptag(set, block, ptag);
913
914 if (ptag[1] != ppage)
915 continue;
916 if (!(ptag[0] & VIKING_PTAG_VALID))
917 continue;
918 if (!(ptag[0] & VIKING_PTAG_DIRTY))
919 continue;
920
921
922
923
924
925
926
927 vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
928 for (i = 0; i < 8; i++) {
929 __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
930 "r" (vaddr) : "g2");
931 vaddr += PAGE_SIZE;
932 }
933
934
935 break;
936 }
937 }
938 }
939
940 static void viking_flush_tlb_all(void)
941 {
942 module_stats.invall++;
943 srmmu_flush_whole_tlb();
944 }
945
946 static void viking_flush_tlb_mm(struct mm_struct *mm)
947 {
948 int octx;
949 module_stats.invmm++;
950
951 #ifndef __SMP__
952 if(mm->context != NO_CONTEXT) {
953 #endif
954 octx = srmmu_get_context();
955 srmmu_set_context(mm->context);
956 srmmu_flush_tlb_ctx();
957 srmmu_set_context(octx);
958 #ifndef __SMP__
959 }
960 #endif
961 }
962
963 static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
964 {
965 int octx;
966 module_stats.invrnge++;
967
968 #ifndef __SMP__
969 if(mm->context != NO_CONTEXT) {
970 #endif
971 octx = srmmu_get_context();
972 srmmu_set_context(mm->context);
973 start &= SRMMU_PMD_MASK;
974 while(start < end) {
975 srmmu_flush_tlb_segment(start);
976 start += SRMMU_PMD_SIZE;
977 }
978 srmmu_set_context(octx);
979 #ifndef __SMP__
980 }
981 #endif
982 }
983
984 static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
985 {
986 int octx;
987 struct mm_struct *mm = vma->vm_mm;
988
989 module_stats.invpg++;
990 #ifndef __SMP__
991 if(mm->context != NO_CONTEXT) {
992 #endif
993 octx = srmmu_get_context();
994 srmmu_set_context(mm->context);
995 srmmu_flush_tlb_page(page);
996 srmmu_set_context(octx);
997 #ifndef __SMP__
998 }
999 #endif
1000 }
1001
1002 static void viking_flush_tlb_page_for_cbit(unsigned long page)
1003 {
1004 srmmu_flush_tlb_page(page);
1005 }
1006
1007
1008
1009 static void cypress_flush_tlb_all(void)
1010 {
1011 module_stats.invall++;
1012 srmmu_flush_whole_tlb();
1013 }
1014
1015 static void cypress_flush_tlb_mm(struct mm_struct *mm)
1016 {
1017 int octx;
1018
1019 module_stats.invmm++;
1020 #ifndef __SMP__
1021 if(mm->context != NO_CONTEXT) {
1022 #endif
1023 octx = srmmu_get_context();
1024 srmmu_set_context(mm->context);
1025 srmmu_flush_tlb_ctx();
1026 srmmu_set_context(octx);
1027 #ifndef __SMP__
1028 }
1029 #endif
1030 }
1031
1032 static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1033 {
1034 int octx;
1035
1036 module_stats.invrnge++;
1037 #ifndef __SMP__
1038 if(mm->context != NO_CONTEXT) {
1039 #endif
1040 octx = srmmu_get_context();
1041 srmmu_set_context(mm->context);
1042 start &= SRMMU_PMD_MASK;
1043 while(start < end) {
1044 srmmu_flush_tlb_segment(start);
1045 start += SRMMU_PMD_SIZE;
1046 }
1047 srmmu_set_context(octx);
1048 #ifndef __SMP__
1049 }
1050 #endif
1051 }
1052
1053 static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1054 {
1055 int octx;
1056 struct mm_struct *mm = vma->vm_mm;
1057
1058 module_stats.invpg++;
1059 #ifndef __SMP__
1060 if(mm->context != NO_CONTEXT) {
1061 #endif
1062 octx = srmmu_get_context();
1063 srmmu_set_context(mm->context);
1064 srmmu_flush_tlb_page(page);
1065 srmmu_set_context(octx);
1066 #ifndef __SMP__
1067 }
1068 #endif
1069 }
1070
1071
1072 static void hypersparc_flush_cache_all(void)
1073 {
1074 flush_user_windows();
1075 hyper_flush_unconditional_combined();
1076 hyper_flush_whole_icache();
1077 }
1078
1079 static void hypersparc_flush_cache_mm(struct mm_struct *mm)
1080 {
1081 #ifndef __SMP__
1082 if(mm->context != NO_CONTEXT) {
1083 #endif
1084 flush_user_windows();
1085 hyper_flush_unconditional_combined();
1086 hyper_flush_whole_icache();
1087 #ifndef __SMP__
1088 }
1089 #endif
1090 }
1091
1092 static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1093 {
1094 #ifndef __SMP__
1095 if(mm->context != NO_CONTEXT) {
1096 #endif
1097 flush_user_windows();
1098 hyper_flush_unconditional_combined();
1099 hyper_flush_whole_icache();
1100 #ifndef __SMP__
1101 }
1102 #endif
1103 }
1104
1105
1106
1107
1108 static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1109 {
1110 struct mm_struct *mm = vma->vm_mm;
1111 volatile unsigned long clear;
1112 int octx;
1113
1114 #ifndef __SMP__
1115 if(mm->context != NO_CONTEXT) {
1116 #endif
1117 octx = srmmu_get_context();
1118 flush_user_windows();
1119 srmmu_set_context(mm->context);
1120 hyper_flush_whole_icache();
1121 if(!srmmu_hwprobe(page))
1122 goto no_mapping;
1123 hyper_flush_cache_page(page);
1124 no_mapping:
1125 clear = srmmu_get_fstatus();
1126 srmmu_set_context(octx);
1127 #ifndef __SMP__
1128 }
1129 #endif
1130 }
1131
1132
1133 static void hypersparc_flush_page_to_ram(unsigned long page)
1134 {
1135 volatile unsigned long clear;
1136
1137 if(srmmu_hwprobe(page))
1138 hyper_flush_cache_page(page);
1139 clear = srmmu_get_fstatus();
1140 }
1141
1142
1143 static void hypersparc_flush_page_for_dma(unsigned long page)
1144 {
1145 volatile unsigned long clear;
1146
1147 if(srmmu_hwprobe(page))
1148 hyper_flush_cache_page(page);
1149 clear = srmmu_get_fstatus();
1150 }
1151
1152 static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
1153 {
1154 volatile unsigned long clear;
1155
1156 if(srmmu_hwprobe(page))
1157 hyper_flush_cache_page(page);
1158 clear = srmmu_get_fstatus();
1159 }
1160
1161 static void hypersparc_flush_tlb_all(void)
1162 {
1163 module_stats.invall++;
1164 srmmu_flush_whole_tlb();
1165 }
1166
1167 static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
1168 {
1169 int octx;
1170
1171 module_stats.invmm++;
1172 #ifndef __SMP__
1173 if(mm->context != NO_CONTEXT) {
1174 #endif
1175
1176 octx = srmmu_get_context();
1177 srmmu_set_context(mm->context);
1178 srmmu_flush_tlb_ctx();
1179 srmmu_set_context(octx);
1180
1181 #ifndef __SMP__
1182 }
1183 #endif
1184 }
1185
1186 static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
1187 {
1188 int octx;
1189
1190 module_stats.invrnge++;
1191 #ifndef __SMP__
1192 if(mm->context != NO_CONTEXT) {
1193 #endif
1194
1195 octx = srmmu_get_context();
1196 srmmu_set_context(mm->context);
1197 start &= SRMMU_PMD_MASK;
1198 while(start < end) {
1199 srmmu_flush_tlb_segment(start);
1200 start += SRMMU_PMD_SIZE;
1201 }
1202 srmmu_set_context(octx);
1203
1204 #ifndef __SMP__
1205 }
1206 #endif
1207 }
1208
1209 static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1210 {
1211 struct mm_struct *mm = vma->vm_mm;
1212 int octx;
1213
1214 module_stats.invpg++;
1215 #ifndef __SMP__
1216 if(mm->context != NO_CONTEXT) {
1217 #endif
1218
1219 octx = srmmu_get_context();
1220 srmmu_set_context(mm->context);
1221 srmmu_flush_tlb_page(page);
1222 srmmu_set_context(octx);
1223
1224 #ifndef __SMP__
1225 }
1226 #endif
1227 }
1228
1229 static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
1230 {
1231 srmmu_flush_tlb_page(page);
1232 }
1233
1234 static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
1235 {
1236 hyper_flush_whole_icache();
1237 srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
1238 }
1239
1240 static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
1241 {
1242 if(tsk->mm->context != NO_CONTEXT) {
1243 hyper_flush_whole_icache();
1244 ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
1245 }
1246 }
1247
1248 static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
1249 {
1250
1251 __asm__ __volatile__("rd %%psr, %%g1\n\t"
1252 "wr %%g1, %4, %%psr\n\t"
1253 "nop; nop; nop;\n\t"
1254 "swap [%0], %1\n\t"
1255 "wr %%g1, 0x0, %%psr\n\t"
1256 "nop; nop; nop;\n\t" :
1257 "=r" (ptep), "=r" (pteval) :
1258 "0" (ptep), "1" (pteval), "i" (PSR_ET) :
1259 "g1");
1260 }
1261
1262 static void hypersparc_switch_to_context(struct task_struct *tsk)
1263 {
1264
1265
1266
1267
1268
1269 hyper_flush_whole_icache();
1270 if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
1271 (tsk->flags & PF_EXITING))
1272 return;
1273 if(tsk->mm->context == NO_CONTEXT) {
1274 alloc_context(tsk->mm);
1275 ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
1276 }
1277 srmmu_set_context(tsk->mm->context);
1278 }
1279
1280
1281
1282 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1283 static unsigned long first_dvma_page, last_dvma_page;
1284
1285 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
1286 #define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
1287
1288 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
1289 {
1290 unsigned long first = first_dvma_page;
1291 unsigned long last = last_dvma_page;
1292 iopte_t *iopte;
1293
1294 iopte = iommu->page_table;
1295 iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
1296 while(first <= last) {
1297 iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
1298 first += PAGE_SIZE;
1299 }
1300 }
1301
1302 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
1303 {
1304 pgd_t *pgdp;
1305 pmd_t *pmdp;
1306 pte_t *ptep;
1307 unsigned long end = start + size;
1308
1309 while(start < end) {
1310 pgdp = srmmu_pgd_offset(init_task.mm, start);
1311 pmdp = srmmu_pmd_offset(pgdp, start);
1312 ptep = srmmu_pte_offset(pmdp, start);
1313 pte_val(*ptep) &= ~SRMMU_CACHE;
1314 start += PAGE_SIZE;
1315 }
1316 }
1317
1318 unsigned long iommu_init(int iommund, unsigned long memory_start,
1319 unsigned long memory_end, struct linux_sbus *sbus)
1320 {
1321 int impl, vers, ptsize;
1322 unsigned long tmp;
1323 struct iommu_struct *iommu;
1324 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
1325
1326 memory_start = LONG_ALIGN(memory_start);
1327 iommu = (struct iommu_struct *) memory_start;
1328 memory_start += sizeof(struct iommu_struct);
1329 prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
1330 iommu->regs = (struct iommu_regs *)
1331 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
1332 "IOMMU registers", iommu_promregs[0].which_io, 0x0);
1333 if(!iommu->regs)
1334 panic("Cannot map IOMMU registers.");
1335 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
1336 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
1337 tmp = iommu->regs->control;
1338 tmp &= ~(IOMMU_CTRL_RNGE);
1339 tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
1340 iommu->regs->control = tmp;
1341 iommu_invalidate(iommu->regs);
1342 iommu->plow = iommu->start = 0xfc000000;
1343 iommu->end = 0xffffffff;
1344
1345
1346 ptsize = iommu->end - iommu->start + 1;
1347 ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
1348
1349
1350 memory_start = PAGE_ALIGN(memory_start);
1351 memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
1352 iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
1353 memory_start += ptsize;
1354
1355
1356 flush_cache_all();
1357 srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
1358 flush_tlb_all();
1359 memset(iommu->page_table, 0, ptsize);
1360 srmmu_map_dvma_pages_for_iommu(iommu);
1361 iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
1362 iommu_invalidate(iommu->regs);
1363
1364 sbus->iommu = iommu;
1365 printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
1366 impl, vers, iommu->page_table, ptsize);
1367 return memory_start;
1368 }
1369
1370 static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
1371 {
1372 struct iommu_struct *iommu = sbus->iommu;
1373 unsigned long page = (unsigned long) vaddr;
1374 unsigned long start, end, offset;
1375 iopte_t *iopte;
1376
1377 offset = page & ~PAGE_MASK;
1378 page &= PAGE_MASK;
1379
1380 start = iommu->plow;
1381 end = KADB_DEBUGGER_BEGVM;
1382 iopte = iommu->lowest;
1383 while(start < end) {
1384 if(!(iopte_val(*iopte) & IOPTE_VALID))
1385 break;
1386 iopte++;
1387 start += PAGE_SIZE;
1388 }
1389
1390 flush_page_for_dma(page);
1391 vaddr = (char *) (start | offset);
1392 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1393 iommu_invalidate_page(iommu->regs, start);
1394 iommu->lowest = iopte + 1;
1395 iommu->plow = start + PAGE_SIZE;
1396
1397 return vaddr;
1398 }
1399
1400 static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1401 {
1402 struct iommu_struct *iommu = sbus->iommu;
1403 unsigned long page, start, end, offset;
1404 iopte_t *iopte = iommu->lowest;
1405
1406 start = iommu->plow;
1407 end = KADB_DEBUGGER_BEGVM;
1408 while(sz >= 0) {
1409 page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
1410 offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
1411 while(start < end) {
1412 if(!(iopte_val(*iopte) & IOPTE_VALID))
1413 break;
1414 iopte++;
1415 start += PAGE_SIZE;
1416 }
1417 if(start == KADB_DEBUGGER_BEGVM)
1418 panic("Wheee, iomapping overflow.");
1419 flush_page_for_dma(page);
1420 sg[sz].alt_addr = (char *) (start | offset);
1421 iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
1422 iommu_invalidate_page(iommu->regs, start);
1423 iopte++;
1424 start += PAGE_SIZE;
1425 sz--;
1426 }
1427 iommu->lowest = iopte;
1428 iommu->plow = start;
1429 }
1430
1431 static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
1432 {
1433 struct iommu_struct *iommu = sbus->iommu;
1434 unsigned long page = (unsigned long) vaddr;
1435 iopte_t *iopte;
1436
1437 if(len > PAGE_SIZE)
1438 panic("Can only handle page sized IOMMU mappings.");
1439 page &= PAGE_MASK;
1440 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1441 iopte_val(*iopte) = 0;
1442 iommu_invalidate_page(iommu->regs, page);
1443 if(iopte < iommu->lowest) {
1444 iommu->lowest = iopte;
1445 iommu->plow = page;
1446 }
1447 }
1448
1449 static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1450 {
1451 struct iommu_struct *iommu = sbus->iommu;
1452 unsigned long page;
1453 iopte_t *iopte;
1454
1455 while(sz >= 0) {
1456 page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
1457 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
1458 iopte_val(*iopte) = 0;
1459 iommu_invalidate_page(iommu->regs, page);
1460 if(iopte < iommu->lowest) {
1461 iommu->lowest = iopte;
1462 iommu->plow = page;
1463 }
1464 sg[sz].alt_addr = 0;
1465 sz--;
1466 }
1467 }
1468
1469 static unsigned long mempool;
1470
1471
1472
1473
1474
1475
1476
1477 static unsigned long kbpage;
1478
1479
1480 static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
1481 {
1482 return ((vaddr - PAGE_OFFSET) + kbpage);
1483 }
1484
1485 static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
1486 {
1487 srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
1488 }
1489
1490 static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
1491 {
1492 srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
1493 }
1494
1495 static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
1496 {
1497 return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1498 }
1499
1500 static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
1501 {
1502 return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
1503 }
1504
1505 static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
1506 {
1507 return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
1508 }
1509
1510 static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
1511 {
1512 return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
1513 }
1514
1515
1516
1517
1518 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
1519 {
1520 unsigned long mask = size - 1;
1521 unsigned long ret;
1522
1523 if(!size)
1524 return 0x0;
1525 if(size & mask) {
1526 prom_printf("panic: srmmu_init_alloc botch\n");
1527 prom_halt();
1528 }
1529 ret = (*kbrk + mask) & ~mask;
1530 *kbrk = ret + size;
1531 memset((void*) ret, 0, size);
1532 return (void*) ret;
1533 }
1534
1535 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
1536 {
1537 pgd_t *pgdp;
1538 pmd_t *pmdp;
1539 pte_t *ptep;
1540
1541 while(start < end) {
1542 pgdp = srmmu_pgd_offset(init_task.mm, start);
1543 if(srmmu_pgd_none(*pgdp)) {
1544 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1545 srmmu_early_pgd_set(pgdp, pmdp);
1546 }
1547 pmdp = srmmu_early_pmd_offset(pgdp, start);
1548 if(srmmu_pmd_none(*pmdp)) {
1549 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1550 srmmu_early_pmd_set(pmdp, ptep);
1551 }
1552 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
1553 }
1554 }
1555
1556
1557
1558
1559
1560 void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
1561 {
1562 pgd_t *pgdp;
1563 pmd_t *pmdp;
1564 pte_t *ptep;
1565 int what = 0;
1566 unsigned long prompte;
1567
1568 while(start <= end) {
1569 if (start == 0)
1570 break;
1571 if(start == 0xfef00000)
1572 start = KADB_DEBUGGER_BEGVM;
1573 if(!(prompte = srmmu_hwprobe(start))) {
1574 start += PAGE_SIZE;
1575 continue;
1576 }
1577
1578
1579 what = 0;
1580
1581 if(!(start & ~(SRMMU_PMD_MASK))) {
1582 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
1583 what = 1;
1584 }
1585
1586 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1587 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1588 prompte)
1589 what = 2;
1590 }
1591
1592 pgdp = srmmu_pgd_offset(init_task.mm, start);
1593 if(what == 2) {
1594 pgd_val(*pgdp) = prompte;
1595 start += SRMMU_PGDIR_SIZE;
1596 continue;
1597 }
1598 if(srmmu_pgd_none(*pgdp)) {
1599 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1600 srmmu_early_pgd_set(pgdp, pmdp);
1601 }
1602 pmdp = srmmu_early_pmd_offset(pgdp, start);
1603 if(what == 1) {
1604 pmd_val(*pmdp) = prompte;
1605 start += SRMMU_PMD_SIZE;
1606 continue;
1607 }
1608 if(srmmu_pmd_none(*pmdp)) {
1609 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1610 srmmu_early_pmd_set(pmdp, ptep);
1611 }
1612 ptep = srmmu_early_pte_offset(pmdp, start);
1613 pte_val(*ptep) = prompte;
1614 start += PAGE_SIZE;
1615 }
1616 }
1617
1618 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
1619 {
1620 unsigned long start;
1621 pgprot_t dvma_prot;
1622 pgd_t *pgdp;
1623 pmd_t *pmdp;
1624 pte_t *ptep;
1625
1626 start = DVMA_VADDR;
1627 if (viking_mxcc_present)
1628 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
1629 else
1630 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1631 while(first <= last) {
1632 pgdp = srmmu_pgd_offset(init_task.mm, start);
1633 pmdp = srmmu_pmd_offset(pgdp, start);
1634 ptep = srmmu_pte_offset(pmdp, start);
1635
1636 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1637
1638 first += PAGE_SIZE;
1639 start += PAGE_SIZE;
1640 }
1641
1642
1643 if (!viking_mxcc_present) {
1644 first = first_dvma_page;
1645 last = last_dvma_page;
1646 while(first <= last) {
1647 pgdp = srmmu_pgd_offset(init_task.mm, first);
1648 pmdp = srmmu_pmd_offset(pgdp, first);
1649 ptep = srmmu_pte_offset(pmdp, first);
1650 pte_val(*ptep) &= ~SRMMU_CACHE;
1651 first += PAGE_SIZE;
1652 }
1653 }
1654 }
1655
1656 static void srmmu_map_kernel(unsigned long start, unsigned long end)
1657 {
1658 unsigned long last_page;
1659 int srmmu_bank, phys_bank, i;
1660 pgd_t *pgdp;
1661 pmd_t *pmdp;
1662 pte_t *ptep;
1663
1664 end = PAGE_ALIGN(end);
1665
1666 if(start == (KERNBASE + PAGE_SIZE)) {
1667 unsigned long pte;
1668 unsigned long tmp;
1669
1670 pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
1671 pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
1672 ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
1673
1674
1675 tmp = kbpage;
1676 pte = (tmp) >> 4;
1677 pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1678 pte_val(*ptep) = pte;
1679 }
1680
1681
1682 last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1683 while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
1684 unsigned long tmp;
1685
1686 pgdp = srmmu_pgd_offset(init_task.mm, start);
1687 pmdp = srmmu_early_pmd_offset(pgdp, start);
1688 ptep = srmmu_early_pte_offset(pmdp, start);
1689 tmp = srmmu_hwprobe(start);
1690 tmp &= ~(0xff);
1691 tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
1692 pte_val(*ptep) = tmp;
1693 start += PAGE_SIZE;
1694 tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
1695
1696
1697 if(tmp != last_page + PAGE_SIZE)
1698 break;
1699 last_page = tmp;
1700 }
1701
1702
1703
1704
1705 for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
1706 if(kbpage >= sp_banks[phys_bank].base_addr &&
1707 (kbpage <
1708 (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
1709 break;
1710 }
1711 srmmu_bank = 0;
1712 srmmu_map[srmmu_bank].vbase = KERNBASE;
1713 srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
1714 srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
1715 if(kbpage != sp_banks[phys_bank].base_addr) {
1716 prom_printf("Detected PenguinPages, getting out of here.\n");
1717 prom_halt();
1718 #if 0
1719 srmmu_map[srmmu_bank].pbase = kbpage;
1720 srmmu_map[srmmu_bank].size -=
1721 (kbpage - sp_banks[phys_bank].base_addr);
1722 #endif
1723 }
1724
1725
1726
1727 while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
1728 unsigned long pteval;
1729
1730 pgdp = srmmu_pgd_offset(init_task.mm, start);
1731 pmdp = srmmu_early_pmd_offset(pgdp, start);
1732 ptep = srmmu_early_pte_offset(pmdp, start);
1733
1734 pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
1735 pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1736 pte_val(*ptep) = pteval;
1737 start += PAGE_SIZE;
1738 }
1739
1740
1741 sp_banks[phys_bank].base_addr |= 1;
1742 srmmu_bank++;
1743
1744
1745 while(start < end) {
1746 unsigned long baddr;
1747 int btg;
1748
1749
1750 for(i=0; sp_banks[i].num_bytes != 0; i++)
1751 if(!(sp_banks[i].base_addr & 1))
1752 break;
1753 if(sp_banks[i].num_bytes == 0)
1754 break;
1755
1756
1757 srmmu_map[srmmu_bank].vbase = start;
1758 srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
1759 srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
1760 srmmu_bank++;
1761
1762 btg = sp_banks[i].num_bytes;
1763 baddr = sp_banks[i].base_addr;
1764 while(btg) {
1765 pgdp = srmmu_pgd_offset(init_task.mm, start);
1766 pmdp = srmmu_early_pmd_offset(pgdp, start);
1767 ptep = srmmu_early_pte_offset(pmdp, start);
1768 pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
1769 pte_val(*ptep) |= (baddr >> 4);
1770
1771 baddr += PAGE_SIZE;
1772 start += PAGE_SIZE;
1773 btg -= PAGE_SIZE;
1774 }
1775 sp_banks[i].base_addr |= 1;
1776 }
1777 if(start < end) {
1778 prom_printf("weird, didn't use all of physical memory... ");
1779 prom_halt();
1780 }
1781 for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
1782 sp_banks[phys_bank].base_addr &= ~1;
1783 #if 0
1784 for(i = 0; srmmu_map[i].size != 0; i++) {
1785 prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
1786 i, srmmu_map[i].vbase,
1787 srmmu_map[i].pbase, srmmu_map[i].size);
1788 }
1789 prom_getchar();
1790 for(i = 0; sp_banks[i].num_bytes != 0; i++) {
1791 prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
1792 i,
1793 sp_banks[i].base_addr,
1794 sp_banks[i].num_bytes);
1795 }
1796 prom_getchar();
1797 prom_halt();
1798 #endif
1799 }
1800
1801
1802 extern unsigned long free_area_init(unsigned long, unsigned long);
1803 extern unsigned long sparc_context_init(unsigned long, int);
1804
1805 extern int physmem_mapped_contig;
1806 extern int linux_num_cpus;
1807
1808 void (*poke_srmmu)(void);
1809
1810 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
1811 {
1812 unsigned long ptables_start, first_mapped_page;
1813 int i, cpunode;
1814 char node_str[128];
1815 pgd_t *pgdp;
1816 pmd_t *pmdp;
1817 pte_t *ptep;
1818
1819 physmem_mapped_contig = 0;
1820
1821 #if CONFIG_AP1000
1822 printk("Forcing num_contexts to 1024\n");
1823 num_contexts = 1024;
1824 #else
1825
1826 cpunode = prom_getchild(prom_root_node);
1827 num_contexts = 0;
1828 while((cpunode = prom_getsibling(cpunode)) != 0) {
1829 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1830 if(!strcmp(node_str, "cpu")) {
1831 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1832 break;
1833 }
1834 }
1835 #endif
1836 if(!num_contexts) {
1837 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1838 prom_halt();
1839 }
1840
1841 ptables_start = mempool = PAGE_ALIGN(start_mem);
1842 memset(swapper_pg_dir, 0, PAGE_SIZE);
1843 first_mapped_page = KERNBASE;
1844 kbpage = srmmu_hwprobe(KERNBASE);
1845 if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
1846 kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
1847 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1848 kbpage -= PAGE_SIZE;
1849 first_mapped_page += PAGE_SIZE;
1850 } else
1851 kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
1852
1853 srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
1854 #if CONFIG_SUN_IO
1855 srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1856 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1857 #endif
1858
1859
1860 mempool = PAGE_ALIGN(mempool);
1861 first_dvma_page = mempool;
1862 last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1863 mempool = last_dvma_page + PAGE_SIZE;
1864
1865 #if CONFIG_AP1000
1866 ap_inherit_mappings();
1867 #else
1868 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1869 #endif
1870 srmmu_map_kernel(first_mapped_page, end_mem);
1871 #if CONFIG_SUN_IO
1872 srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1873 #endif
1874 srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1875 srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
1876 for(i = 0; i < num_contexts; i++)
1877 ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1878
1879 start_mem = PAGE_ALIGN(mempool);
1880
1881
1882 if(!can_cache_ptables) {
1883 for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
1884 pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
1885 pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
1886 ptep = srmmu_early_pte_offset(pmdp, ptables_start);
1887 pte_val(*ptep) &= ~SRMMU_CACHE;
1888 }
1889
1890 pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
1891 pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
1892 ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
1893 pte_val(*ptep) &= ~SRMMU_CACHE;
1894 }
1895
1896 flush_cache_all();
1897 srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
1898 flush_tlb_all();
1899 poke_srmmu();
1900
1901 start_mem = sparc_context_init(start_mem, num_contexts);
1902 start_mem = free_area_init(start_mem, end_mem);
1903
1904 return PAGE_ALIGN(start_mem);
1905 }
1906
1907 static char srmmuinfo[512];
1908
1909 static char *srmmu_mmu_info(void)
1910 {
1911 sprintf(srmmuinfo, "MMU type\t: %s\n"
1912 "invall\t\t: %d\n"
1913 "invmm\t\t: %d\n"
1914 "invrnge\t\t: %d\n"
1915 "invpg\t\t: %d\n"
1916 "contexts\t: %d\n"
1917 "big_chunks\t: %d\n"
1918 "little_chunks\t: %d\n",
1919 srmmu_name,
1920 module_stats.invall,
1921 module_stats.invmm,
1922 module_stats.invrnge,
1923 module_stats.invpg,
1924 num_contexts,
1925 #if 0
1926 num_big_chunks,
1927 num_little_chunks
1928 #else
1929 0, 0
1930 #endif
1931 );
1932 return srmmuinfo;
1933 }
1934
1935 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1936 {
1937 }
1938
1939 static void srmmu_exit_hook(void)
1940 {
1941 struct ctx_list *ctx_old;
1942 struct mm_struct *mm = current->mm;
1943
1944 if(mm->context != NO_CONTEXT) {
1945 flush_cache_mm(mm);
1946 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1947 flush_tlb_mm(mm);
1948 ctx_old = ctx_list_pool + mm->context;
1949 remove_from_ctx_list(ctx_old);
1950 add_to_free_ctxlist(ctx_old);
1951 mm->context = NO_CONTEXT;
1952 }
1953 }
1954
1955 static void srmmu_flush_hook(void)
1956 {
1957 if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1958 alloc_context(current->mm);
1959 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1960 srmmu_set_context(current->mm->context);
1961 }
1962 }
1963
1964 static void hypersparc_exit_hook(void)
1965 {
1966 struct ctx_list *ctx_old;
1967 struct mm_struct *mm = current->mm;
1968
1969 if(mm->context != NO_CONTEXT) {
1970
1971
1972
1973
1974
1975 flush_cache_mm(mm);
1976 ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1977 flush_tlb_mm(mm);
1978 ctx_old = ctx_list_pool + mm->context;
1979 remove_from_ctx_list(ctx_old);
1980 add_to_free_ctxlist(ctx_old);
1981 mm->context = NO_CONTEXT;
1982 }
1983 }
1984
1985 static void hypersparc_flush_hook(void)
1986 {
1987 if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1988 alloc_context(current->mm);
1989 flush_cache_mm(current->mm);
1990 ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1991 srmmu_set_context(current->mm->context);
1992 }
1993 }
1994
1995
1996 void srmmu_is_bad(void)
1997 {
1998 prom_printf("Could not determine SRMMU chip type.\n");
1999 prom_halt();
2000 }
2001
2002 void poke_hypersparc(void)
2003 {
2004 volatile unsigned long clear;
2005 unsigned long mreg = srmmu_get_mmureg();
2006
2007 hyper_flush_unconditional_combined();
2008
2009 mreg &= ~(HYPERSPARC_CWENABLE);
2010 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
2011 mreg |= (HYPERSPARC_CMODE);
2012
2013 srmmu_set_mmureg(mreg);
2014 hyper_clear_all_tags();
2015
2016 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
2017 hyper_flush_whole_icache();
2018 clear = srmmu_get_faddr();
2019 clear = srmmu_get_fstatus();
2020 }
2021
2022 void init_hypersparc(void)
2023 {
2024 unsigned long mreg = srmmu_get_mmureg();
2025
2026 srmmu_name = "ROSS HyperSparc";
2027 can_cache_ptables = 0;
2028 if(mreg & HYPERSPARC_CSIZE) {
2029 hyper_cache_size = (256 * 1024);
2030 hyper_line_size = 64;
2031 } else {
2032 hyper_cache_size = (128 * 1024);
2033 hyper_line_size = 32;
2034 }
2035
2036 flush_cache_all = hypersparc_flush_cache_all;
2037 flush_cache_mm = hypersparc_flush_cache_mm;
2038 flush_cache_range = hypersparc_flush_cache_range;
2039 flush_cache_page = hypersparc_flush_cache_page;
2040
2041 flush_tlb_all = hypersparc_flush_tlb_all;
2042 flush_tlb_mm = hypersparc_flush_tlb_mm;
2043 flush_tlb_range = hypersparc_flush_tlb_range;
2044 flush_tlb_page = hypersparc_flush_tlb_page;
2045
2046 flush_page_to_ram = hypersparc_flush_page_to_ram;
2047 flush_page_for_dma = hypersparc_flush_page_for_dma;
2048 flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
2049 flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
2050
2051 ctxd_set = hypersparc_ctxd_set;
2052 switch_to_context = hypersparc_switch_to_context;
2053 mmu_exit_hook = hypersparc_exit_hook;
2054 mmu_flush_hook = hypersparc_flush_hook;
2055 sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
2056 set_pte = hypersparc_set_pte;
2057 poke_srmmu = poke_hypersparc;
2058 }
2059
2060 void poke_cypress(void)
2061 {
2062 unsigned long mreg = srmmu_get_mmureg();
2063
2064 mreg &= ~CYPRESS_CMODE;
2065 mreg |= CYPRESS_CENABLE;
2066 srmmu_set_mmureg(mreg);
2067 }
2068
2069 void init_cypress_common(void)
2070 {
2071 can_cache_ptables = 0;
2072 flush_tlb_all = cypress_flush_tlb_all;
2073 flush_tlb_mm = cypress_flush_tlb_mm;
2074 flush_tlb_page = cypress_flush_tlb_page;
2075 flush_tlb_range = cypress_flush_tlb_range;
2076 poke_srmmu = poke_cypress;
2077
2078
2079
2080 }
2081
2082 void init_cypress_604(void)
2083 {
2084 srmmu_name = "ROSS Cypress-604(UP)";
2085 srmmu_modtype = Cypress;
2086 init_cypress_common();
2087 }
2088
2089 void init_cypress_605(unsigned long mrev)
2090 {
2091 srmmu_name = "ROSS Cypress-605(MP)";
2092 if(mrev == 0xe) {
2093 srmmu_modtype = Cypress_vE;
2094 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
2095 } else {
2096 if(mrev == 0xd) {
2097 srmmu_modtype = Cypress_vD;
2098 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
2099 } else {
2100 srmmu_modtype = Cypress;
2101 }
2102 }
2103 init_cypress_common();
2104 }
2105
2106 void poke_swift(void)
2107 {
2108 unsigned long mreg = srmmu_get_mmureg();
2109
2110
2111 swift_idflash_clear();
2112 mreg |= (SWIFT_IE | SWIFT_DE);
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 mreg &= ~(SWIFT_BF);
2123 srmmu_set_mmureg(mreg);
2124 }
2125
2126 #define SWIFT_MASKID_ADDR 0x10003018
2127 void init_swift(void)
2128 {
2129 unsigned long swift_rev;
2130
2131 __asm__ __volatile__("lda [%1] %2, %0\n\t"
2132 "srl %0, 0x18, %0\n\t" :
2133 "=r" (swift_rev) :
2134 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
2135 srmmu_name = "Fujitsu Swift";
2136 switch(swift_rev) {
2137 case 0x11:
2138 case 0x20:
2139 case 0x23:
2140 case 0x30:
2141 srmmu_modtype = Swift_lots_o_bugs;
2142 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159 break;
2160 case 0x25:
2161 case 0x31:
2162 srmmu_modtype = Swift_bad_c;
2163 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
2164
2165
2166
2167
2168 break;
2169 default:
2170 srmmu_modtype = Swift_ok;
2171 break;
2172 };
2173
2174 flush_cache_all = swift_flush_cache_all;
2175 flush_cache_mm = swift_flush_cache_mm;
2176 flush_cache_page = swift_flush_cache_page;
2177 flush_cache_range = swift_flush_cache_range;
2178
2179 flush_tlb_all = swift_flush_tlb_all;
2180 flush_tlb_mm = swift_flush_tlb_mm;
2181 flush_tlb_page = swift_flush_tlb_page;
2182 flush_tlb_range = swift_flush_tlb_range;
2183
2184 flush_page_to_ram = swift_flush_page_to_ram;
2185 flush_page_for_dma = swift_flush_page_for_dma;
2186 flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
2187 flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
2188
2189
2190
2191
2192
2193
2194
2195 poke_srmmu = poke_swift;
2196 }
2197
2198 void poke_tsunami(void)
2199 {
2200 unsigned long mreg = srmmu_get_mmureg();
2201
2202 tsunami_flush_icache();
2203 tsunami_flush_dcache();
2204 mreg &= ~TSUNAMI_ITD;
2205 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
2206 srmmu_set_mmureg(mreg);
2207 }
2208
2209 void init_tsunami(void)
2210 {
2211
2212
2213
2214
2215
2216 srmmu_name = "TI Tsunami";
2217 srmmu_modtype = Tsunami;
2218 can_cache_ptables = 1;
2219
2220 flush_cache_all = tsunami_flush_cache_all;
2221 flush_cache_mm = tsunami_flush_cache_mm;
2222 flush_cache_page = tsunami_flush_cache_page;
2223 flush_cache_range = tsunami_flush_cache_range;
2224
2225 flush_tlb_all = tsunami_flush_tlb_all;
2226 flush_tlb_mm = tsunami_flush_tlb_mm;
2227 flush_tlb_page = tsunami_flush_tlb_page;
2228 flush_tlb_range = tsunami_flush_tlb_range;
2229
2230 flush_page_to_ram = tsunami_flush_page_to_ram;
2231 flush_page_for_dma = tsunami_flush_page_for_dma;
2232 flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
2233 flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
2234
2235 poke_srmmu = poke_tsunami;
2236 }
2237
2238 void poke_viking(void)
2239 {
2240 unsigned long mreg = srmmu_get_mmureg();
2241 static int smp_catch = 0;
2242
2243 if(viking_mxcc_present) {
2244 unsigned long mxcc_control;
2245
2246 __asm__ __volatile__("set -1, %%g2\n\t"
2247 "set -1, %%g3\n\t"
2248 "stda %%g2, [%1] %2\n\t"
2249 "lda [%3] %2, %0\n\t" :
2250 "=r" (mxcc_control) :
2251 "r" (MXCC_EREG), "i" (ASI_M_MXCC),
2252 "r" (MXCC_CREG) : "g2", "g3");
2253 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
2254 mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
2255 mreg &= ~(VIKING_PCENABLE);
2256 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
2257 "r" (mxcc_control), "r" (MXCC_CREG),
2258 "i" (ASI_M_MXCC));
2259 srmmu_set_mmureg(mreg);
2260 mreg |= VIKING_TCENABLE;
2261 } else {
2262 unsigned long bpreg;
2263
2264 mreg &= ~(VIKING_TCENABLE);
2265 if(smp_catch++) {
2266
2267
2268
2269 bpreg = viking_get_bpreg();
2270 bpreg &= ~(VIKING_ACTION_MIX);
2271 viking_set_bpreg(bpreg);
2272
2273
2274 msi_set_sync();
2275 }
2276 }
2277
2278 viking_unlock_icache();
2279 viking_flush_icache();
2280 #if 0
2281 viking_unlock_dcache();
2282 viking_flush_dcache();
2283 #endif
2284 mreg |= VIKING_SPENABLE;
2285 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
2286 mreg |= VIKING_SBENABLE;
2287 mreg &= ~(VIKING_ACENABLE);
2288 #if CONFIG_AP1000
2289 mreg &= ~(VIKING_SBENABLE);
2290 #endif
2291 #ifdef __SMP__
2292 mreg &= ~(VIKING_SBENABLE);
2293 #endif
2294 srmmu_set_mmureg(mreg);
2295 }
2296
2297 void init_viking(void)
2298 {
2299 unsigned long mreg = srmmu_get_mmureg();
2300
2301
2302
2303 if(mreg & VIKING_MMODE) {
2304 unsigned long bpreg;
2305
2306 srmmu_name = "TI Viking";
2307 viking_mxcc_present = 0;
2308 can_cache_ptables = 0;
2309
2310 bpreg = viking_get_bpreg();
2311 bpreg &= ~(VIKING_ACTION_MIX);
2312 viking_set_bpreg(bpreg);
2313
2314 msi_set_sync();
2315
2316 flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
2317 } else {
2318 srmmu_name = "TI Viking/MXCC";
2319 viking_mxcc_present = 1;
2320 can_cache_ptables = 1;
2321 flush_cache_page_to_uncache = viking_mxcc_flush_page;
2322 }
2323
2324 flush_cache_all = viking_flush_cache_all;
2325 flush_cache_mm = viking_flush_cache_mm;
2326 flush_cache_page = viking_flush_cache_page;
2327 flush_cache_range = viking_flush_cache_range;
2328
2329 flush_tlb_all = viking_flush_tlb_all;
2330 flush_tlb_mm = viking_flush_tlb_mm;
2331 flush_tlb_page = viking_flush_tlb_page;
2332 flush_tlb_range = viking_flush_tlb_range;
2333
2334 flush_page_to_ram = viking_flush_page_to_ram;
2335 flush_page_for_dma = viking_flush_page_for_dma;
2336 flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
2337
2338 poke_srmmu = poke_viking;
2339 }
2340
2341
2342 static void get_srmmu_type(void)
2343 {
2344 unsigned long mreg, psr;
2345 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
2346
2347 srmmu_modtype = SRMMU_INVAL_MOD;
2348 hwbug_bitmask = 0;
2349
2350 mreg = srmmu_get_mmureg(); psr = get_psr();
2351 mod_typ = (mreg & 0xf0000000) >> 28;
2352 mod_rev = (mreg & 0x0f000000) >> 24;
2353 psr_typ = (psr >> 28) & 0xf;
2354 psr_vers = (psr >> 24) & 0xf;
2355
2356
2357 if(mod_typ == 1) {
2358 switch(mod_rev) {
2359 case 7:
2360
2361 init_hypersparc();
2362 break;
2363 case 0:
2364
2365 init_cypress_604();
2366 break;
2367 case 13:
2368 case 14:
2369 case 15:
2370
2371 init_cypress_605(mod_rev);
2372 break;
2373 default:
2374 srmmu_is_bad();
2375 break;
2376 };
2377 return;
2378 }
2379
2380
2381 if(psr_typ == 0 && psr_vers == 4) {
2382 init_swift();
2383 return;
2384 }
2385
2386
2387 if(psr_typ == 4 &&
2388 ((psr_vers == 0) ||
2389 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2390 init_viking();
2391 return;
2392 }
2393
2394
2395 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2396 init_tsunami();
2397 return;
2398 }
2399
2400
2401 srmmu_is_bad();
2402 }
2403
2404 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2405 tsetup_mmu_patchme, rtrap_mmu_patchme;
2406
2407 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2408 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2409
2410 #ifdef __SMP__
2411 extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
2412 #endif
2413
2414 extern unsigned long srmmu_fault;
2415
2416 #define PATCH_BRANCH(insn, dest) do { \
2417 iaddr = &(insn); \
2418 daddr = &(dest); \
2419 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
2420 } while(0);
2421
2422 static void patch_window_trap_handlers(void)
2423 {
2424 unsigned long *iaddr, *daddr;
2425
2426 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
2427 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
2428 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
2429 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
2430 #ifdef __SMP__
2431 PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
2432 #endif
2433 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
2434 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
2435 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
2436 }
2437
2438 #ifdef __SMP__
2439
2440 static void smp_flush_page_for_dma(unsigned long page)
2441 {
2442 xc1((smpfunc_t) local_flush_page_for_dma, page);
2443 }
2444
2445 static void smp_flush_cache_page_to_uncache(unsigned long page)
2446 {
2447 xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
2448 }
2449
2450 static void smp_flush_tlb_page_for_cbit(unsigned long page)
2451 {
2452 xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
2453 }
2454 #endif
2455
2456
2457 void ld_mmu_srmmu(void)
2458 {
2459
2460 pmd_shift = SRMMU_PMD_SHIFT;
2461 pmd_size = SRMMU_PMD_SIZE;
2462 pmd_mask = SRMMU_PMD_MASK;
2463 pgdir_shift = SRMMU_PGDIR_SHIFT;
2464 pgdir_size = SRMMU_PGDIR_SIZE;
2465 pgdir_mask = SRMMU_PGDIR_MASK;
2466
2467 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
2468 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
2469 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
2470
2471 page_none = SRMMU_PAGE_NONE;
2472 page_shared = SRMMU_PAGE_SHARED;
2473 page_copy = SRMMU_PAGE_COPY;
2474 page_readonly = SRMMU_PAGE_RDONLY;
2475 page_kernel = SRMMU_PAGE_KERNEL;
2476 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
2477
2478
2479 set_pte = srmmu_set_pte;
2480 switch_to_context = srmmu_switch_to_context;
2481 pmd_align = srmmu_pmd_align;
2482 pgdir_align = srmmu_pgdir_align;
2483 vmalloc_start = srmmu_vmalloc_start;
2484
2485 pte_page = srmmu_pte_page;
2486 pmd_page = srmmu_pmd_page;
2487 pgd_page = srmmu_pgd_page;
2488
2489 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
2490
2491 pte_none = srmmu_pte_none;
2492 pte_present = srmmu_pte_present;
2493 pte_clear = srmmu_pte_clear;
2494
2495 pmd_none = srmmu_pmd_none;
2496 pmd_bad = srmmu_pmd_bad;
2497 pmd_present = srmmu_pmd_present;
2498 pmd_clear = srmmu_pmd_clear;
2499
2500 pgd_none = srmmu_pgd_none;
2501 pgd_bad = srmmu_pgd_bad;
2502 pgd_present = srmmu_pgd_present;
2503 pgd_clear = srmmu_pgd_clear;
2504
2505 mk_pte = srmmu_mk_pte;
2506 pgd_set = srmmu_pgd_set;
2507 mk_pte_io = srmmu_mk_pte_io;
2508 pte_modify = srmmu_pte_modify;
2509 pgd_offset = srmmu_pgd_offset;
2510 pmd_offset = srmmu_pmd_offset;
2511 pte_offset = srmmu_pte_offset;
2512 pte_free_kernel = srmmu_pte_free_kernel;
2513 pmd_free_kernel = srmmu_pmd_free_kernel;
2514 pte_alloc_kernel = srmmu_pte_alloc_kernel;
2515 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
2516 pte_free = srmmu_pte_free;
2517 pte_alloc = srmmu_pte_alloc;
2518 pmd_free = srmmu_pmd_free;
2519 pmd_alloc = srmmu_pmd_alloc;
2520 pgd_free = srmmu_pgd_free;
2521 pgd_alloc = srmmu_pgd_alloc;
2522
2523 pte_write = srmmu_pte_write;
2524 pte_dirty = srmmu_pte_dirty;
2525 pte_young = srmmu_pte_young;
2526 pte_wrprotect = srmmu_pte_wrprotect;
2527 pte_mkclean = srmmu_pte_mkclean;
2528 pte_mkold = srmmu_pte_mkold;
2529 pte_mkwrite = srmmu_pte_mkwrite;
2530 pte_mkdirty = srmmu_pte_mkdirty;
2531 pte_mkyoung = srmmu_pte_mkyoung;
2532 update_mmu_cache = srmmu_update_mmu_cache;
2533 mmu_exit_hook = srmmu_exit_hook;
2534 mmu_flush_hook = srmmu_flush_hook;
2535 mmu_lockarea = srmmu_lockarea;
2536 mmu_unlockarea = srmmu_unlockarea;
2537
2538 mmu_get_scsi_one = srmmu_get_scsi_one;
2539 mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
2540 mmu_release_scsi_one = srmmu_release_scsi_one;
2541 mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
2542
2543 mmu_info = srmmu_mmu_info;
2544 mmu_v2p = srmmu_v2p;
2545 mmu_p2v = srmmu_p2v;
2546
2547
2548 alloc_kernel_stack = srmmu_alloc_kernel_stack;
2549 alloc_task_struct = srmmu_alloc_task_struct;
2550 free_kernel_stack = srmmu_free_kernel_stack;
2551 free_task_struct = srmmu_free_task_struct;
2552
2553 quick_kernel_fault = srmmu_quick_kernel_fault;
2554
2555
2556 ctxd_set = srmmu_ctxd_set;
2557 pmd_set = srmmu_pmd_set;
2558
2559 get_srmmu_type();
2560 patch_window_trap_handlers();
2561
2562 #ifdef __SMP__
2563
2564
2565 local_flush_cache_all = flush_cache_all;
2566 local_flush_cache_mm = flush_cache_mm;
2567 local_flush_cache_range = flush_cache_range;
2568 local_flush_cache_page = flush_cache_page;
2569 local_flush_tlb_all = flush_tlb_all;
2570 local_flush_tlb_mm = flush_tlb_mm;
2571 local_flush_tlb_range = flush_tlb_range;
2572 local_flush_tlb_page = flush_tlb_page;
2573 local_flush_page_to_ram = flush_page_to_ram;
2574 local_flush_page_for_dma = flush_page_for_dma;
2575 local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
2576 local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
2577
2578 flush_cache_all = smp_flush_cache_all;
2579 flush_cache_mm = smp_flush_cache_mm;
2580 flush_cache_range = smp_flush_cache_range;
2581 flush_cache_page = smp_flush_cache_page;
2582 flush_tlb_all = smp_flush_tlb_all;
2583 flush_tlb_mm = smp_flush_tlb_mm;
2584 flush_tlb_range = smp_flush_tlb_range;
2585 flush_tlb_page = smp_flush_tlb_page;
2586 flush_page_to_ram = smp_flush_page_to_ram;
2587 flush_page_for_dma = smp_flush_page_for_dma;
2588 flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
2589 flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
2590 #endif
2591 }