This source file includes following definitions.
- srmmu_swap
- gensrmmu_read_physical
- msparc_read_physical
- gensrmmu_write_physical
- msparc_write_physical
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_vmalloc_start
- srmmu_pgd_page
- srmmu_pmd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_inuse
- srmmu_pte_clear
- srmmu_pte_reuse
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_inuse
- srmmu_pmd_clear
- srmmu_pmd_reuse
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_inuse
- srmmu_pgd_clear
- srmmu_pgd_reuse
- srmmu_pte_write
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_wrprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_mkwrite
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_mk_pte
- srmmu_mk_pte_io
- srmmu_ctxd_set
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- tsunami_invalidate_all
- tsunami_invalidate_mm
- tsunami_invalidate_range
- tsunami_invalidate_page
- swift_invalidate_all
- swift_invalidate_mm
- swift_invalidate_range
- swift_invalidate_page
- viking_invalidate_all
- viking_invalidate_mm
- viking_invalidate_range
- viking_invalidate_page
- cypress_invalidate_all
- cypress_invalidate_mm
- cypress_invalidate_range
- cypress_invalidate_page
- hypersparc_invalidate_all
- hypersparc_invalidate_mm
- hypersparc_invalidate_range
- hypersparc_invalidate_page
- srmmu_set_pte
- srmmu_quick_kernel_fault
- alloc_context
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_lockarea
- srmmu_unlockarea
- srmmu_map_dvma_pages_for_iommu
- srmmu_uncache_iommu_page_table
- iommu_init
- srmmu_get_scsi_buffer
- srmmu_release_scsi_buffer
- srmmu_alloc_task_struct
- srmmu_alloc_kernel_stack
- srmmu_free_task_struct
- srmmu_free_kernel_stack
- srmmu_init_alloc
- srmmu_allocate_ptable_skeleton
- srmmu_inherit_prom_mappings
- srmmu_map_dvma_pages_for_cpu
- srmmu_map_kernel
- srmmu_paging_init
- srmmu_test_wp
- srmmu_mmu_info
- srmmu_update_mmu_cache
- srmmu_exit_hook
- srmmu_flush_hook
- srmmu_is_bad
- init_hypersparc
- init_cypress_common
- init_cypress_604
- init_cypress_605
- init_swift
- init_tsunami
- init_viking
- get_srmmu_type
- patch_window_trap_handlers
- ld_mmu_srmmu
1
2
3
4
5
6
7
8 #include <linux/kernel.h>
9
10 #include <asm/page.h>
11 #include <asm/pgtable.h>
12 #include <asm/io.h>
13 #include <asm/kdebug.h>
14 #include <asm/vaddrs.h>
15 #include <asm/traps.h>
16 #include <asm/mp.h>
17 #include <asm/mbus.h>
18 #include <asm/cache.h>
19 #include <asm/oplib.h>
20 #include <asm/sbus.h>
21 #include <asm/iommu.h>
22
23
24 #include <asm/viking.h>
25 #include <asm/ross.h>
26 #include <asm/tsunami.h>
27 #include <asm/swift.h>
28
29 enum mbus_module srmmu_modtype;
30 unsigned int hwbug_bitmask;
31
32 int hyper_cache_size;
33
34 ctxd_t *srmmu_context_table;
35
36
37
38
39
40 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
41 {
42 __asm__ __volatile__("swap [%1], %0\n\t" :
43 "=&r" (value), "=&r" (addr) :
44 "0" (value), "1" (addr));
45 return value;
46 }
47
48
49 #define srmmu_set_entry(ptr, newentry) \
50 srmmu_swap((unsigned long *) (ptr), (newentry))
51
52
53
54
55 unsigned long (*srmmu_read_physical)(unsigned long paddr);
56 void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
57
58 static unsigned long gensrmmu_read_physical(unsigned long paddr)
59 {
60 unsigned long word;
61
62 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
63 "=r" (word) :
64 "r" (paddr), "i" (ASI_M_BYPASS) :
65 "memory");
66 return word;
67 }
68
69 static unsigned long msparc_read_physical(unsigned long paddr)
70 {
71 unsigned long word, flags;
72
73 save_flags(flags); cli();
74 __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
75 "or %%g1, %4, %%g2\n\t"
76 "sta %%g2, [%%g0] %3\n\t"
77 "lda [%1] %2, %0\n\t"
78 "sta %%g1, [%%g0] %3\n\t" :
79 "=r" (word) :
80 "r" (paddr), "i" (ASI_M_BYPASS),
81 "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
82 "g1", "g2", "memory");
83 restore_flags(flags);
84 return word;
85 }
86
87 static void gensrmmu_write_physical(unsigned long paddr, unsigned long word)
88 {
89 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
90 "r" (word), "r" (paddr), "i" (ASI_M_BYPASS) :
91 "memory");
92 }
93
94 static void msparc_write_physical(unsigned long paddr, unsigned long word)
95 {
96 unsigned long flags;
97
98 save_flags(flags); cli();
99 __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
100 "or %%g1, %4, %%g2\n\t"
101 "sta %%g2, [%%g0] %3\n\t"
102 "sta %0, [%1] %2\n\t"
103 "sta %%g1, [%%g0] %3\n\t" : :
104 "r" (word), "r" (paddr), "i" (ASI_M_BYPASS),
105 "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
106 "g1", "g2", "memory");
107 restore_flags(flags);
108 }
109
110 static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
111 static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
112
113 static unsigned long srmmu_vmalloc_start(void)
114 {
115 return SRMMU_VMALLOC_START;
116 }
117
118 static unsigned long srmmu_pgd_page(pgd_t pgd)
119 { return PAGE_OFFSET + ((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
120
121 static unsigned long srmmu_pmd_page(pmd_t pmd)
122 { return PAGE_OFFSET + ((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
123
124 static unsigned long srmmu_pte_page(pte_t pte)
125 { return PAGE_OFFSET + ((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
126
127 static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
128 static int srmmu_pte_present(pte_t pte)
129 { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
130
131 static int srmmu_pte_inuse(pte_t *ptep)
132 { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
133
134 static void srmmu_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
135 static void srmmu_pte_reuse(pte_t *ptep)
136 {
137 if(!mem_map[MAP_NR(ptep)].reserved)
138 mem_map[MAP_NR(ptep)].count++;
139 }
140
141 static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
142 static int srmmu_pmd_bad(pmd_t pmd)
143 { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
144
145 static int srmmu_pmd_present(pmd_t pmd)
146 { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
147
148 static int srmmu_pmd_inuse(pmd_t *pmdp)
149 { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
150
151 static void srmmu_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
152 static void srmmu_pmd_reuse(pmd_t * pmdp)
153 {
154 if (!mem_map[MAP_NR(pmdp)].reserved)
155 mem_map[MAP_NR(pmdp)].count++;
156 }
157
158 static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
159 static int srmmu_pgd_bad(pgd_t pgd)
160 { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
161
162 static int srmmu_pgd_present(pgd_t pgd)
163 { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
164
165 static int srmmu_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
166 static void srmmu_pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
167 static void srmmu_pgd_reuse(pgd_t *pgdp)
168 {
169 if (!mem_map[MAP_NR(pgdp)].reserved)
170 mem_map[MAP_NR(pgdp)].count++;
171 }
172
173 static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
174 static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
175 static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
176
177 static pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
178 static pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
179 static pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~SRMMU_REF; return pte; }
180 static pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) |= SRMMU_WRITE; return pte; }
181 static pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= SRMMU_DIRTY; return pte; }
182 static pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= SRMMU_REF; return pte; }
183
184
185
186
187
188 static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
189 { pte_t pte; pte_val(pte) = ((page - PAGE_OFFSET) >> 4) | pgprot_val(pgprot); return pte; }
190
191 static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot)
192 { pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
193
194 static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
195 { srmmu_set_entry(ctxp, (SRMMU_ET_PTD | ((((unsigned long) pgdp) - PAGE_OFFSET) >> 4))); }
196
197 static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
198 { srmmu_set_entry(pgdp, (SRMMU_ET_PTD | ((((unsigned long) pmdp) - PAGE_OFFSET) >> 4))); }
199
200 static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
201 { srmmu_set_entry(pmdp, (SRMMU_ET_PTD | ((((unsigned long) ptep) - PAGE_OFFSET) >> 4))); }
202
203 static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
204 { pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
205
206
207 static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
208 {
209 return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
210 }
211
212
213 static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
214 {
215 return (pmd_t *) pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
216 }
217
218
219 static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
220 {
221 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
222 }
223
224
225 static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
226 {
227 if(tsk->mm->context != NO_CONTEXT)
228 srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
229 }
230
231
232
233
234
235
236 static void srmmu_pte_free_kernel(pte_t *pte)
237 {
238 mem_map[MAP_NR(pte)].reserved = 0;
239 free_page((unsigned long) pte);
240 }
241
242 static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
243 {
244 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
245 if(srmmu_pmd_none(*pmd)) {
246 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
247 if(srmmu_pmd_none(*pmd)) {
248 if(page) {
249 srmmu_pmd_set(pmd, page);
250 mem_map[MAP_NR(page)].reserved = 1;
251 return page + address;
252 }
253 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
254 return NULL;
255 }
256 free_page((unsigned long) page);
257 }
258 if(srmmu_pmd_bad(*pmd)) {
259 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
260 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
261 return NULL;
262 }
263 return (pte_t *) srmmu_pmd_page(*pmd) + address;
264 }
265
266 static void srmmu_pmd_free_kernel(pmd_t *pmd)
267 {
268 mem_map[MAP_NR(pmd)].reserved = 0;
269 free_page((unsigned long) pmd);
270 }
271
272 static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
273 {
274 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
275 if(srmmu_pgd_none(*pgd)) {
276 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
277 if(srmmu_pgd_none(*pgd)) {
278 if(page) {
279 srmmu_pgd_set(pgd, page);
280 mem_map[MAP_NR(page)].reserved = 1;
281 return page + address;
282 }
283 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
284 return NULL;
285 }
286 free_page((unsigned long) page);
287 }
288 if(srmmu_pgd_bad(*pgd)) {
289 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
290 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
291 return NULL;
292 }
293 return (pmd_t *) pgd_page(*pgd) + address;
294 }
295
296 static void srmmu_pte_free(pte_t *pte)
297 {
298 free_page((unsigned long) pte);
299 }
300
301 static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
302 {
303 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
304 if(srmmu_pmd_none(*pmd)) {
305 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
306 if(srmmu_pmd_none(*pmd)) {
307 if(page) {
308 srmmu_pmd_set(pmd, page);
309 return page + address;
310 }
311 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
312 return NULL;
313 }
314 free_page((unsigned long) page);
315 }
316 if(srmmu_pmd_bad(*pmd)) {
317 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
318 srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
319 return NULL;
320 }
321 return (pte_t *) pmd_page(*pmd) + address;
322 }
323
324
325 static void srmmu_pmd_free(pmd_t * pmd)
326 {
327 free_page((unsigned long) pmd);
328 }
329
330 static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
331 {
332 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
333 if(srmmu_pgd_none(*pgd)) {
334 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
335 if(srmmu_pgd_none(*pgd)) {
336 if(page) {
337 srmmu_pgd_set(pgd, page);
338 return page + address;
339 }
340 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
341 return NULL;
342 }
343 free_page((unsigned long) page);
344 }
345 if(srmmu_pgd_bad(*pgd)) {
346 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
347 srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
348 return NULL;
349 }
350 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
351 }
352
353 static void srmmu_pgd_free(pgd_t *pgd)
354 {
355 free_page((unsigned long) pgd);
356 }
357
358 static pgd_t *srmmu_pgd_alloc(void)
359 {
360 return (pgd_t *) get_free_page(GFP_KERNEL);
361 }
362
363
364
365
366
367 inline void tsunami_invalidate_all(void)
368 {
369 tsunami_invalidate_icache();
370 tsunami_invalidate_dcache();
371 srmmu_flush_whole_tlb();
372 }
373 static void tsunami_invalidate_mm(struct mm_struct *mm)
374 {
375 tsunami_invalidate_all();
376 }
377
378 static void tsunami_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
379 {
380 tsunami_invalidate_all();
381 }
382
383
384 static void tsunami_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
385 {
386 tsunami_invalidate_all();
387 }
388
389
390
391
392
393 static inline void swift_invalidate_all(void)
394 {
395 unsigned long addr = 0;
396
397
398 for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16) {
399 swift_inv_insn_tag(addr);
400 swift_inv_data_tag(addr);
401 }
402 srmmu_flush_whole_tlb();
403 }
404
405 static void swift_invalidate_mm(struct mm_struct *mm)
406 {
407 unsigned long flags;
408 int cc, ncc = mm->context;
409
410 if(ncc == NO_CONTEXT)
411 return;
412
413
414 save_flags(flags); cli();
415 cc = srmmu_get_context();
416 if(cc != ncc)
417 srmmu_set_context(ncc);
418
419 swift_flush_context();
420 srmmu_flush_tlb_ctx();
421
422 if(cc != ncc)
423 srmmu_set_context(cc);
424 restore_flags(flags);
425 }
426
427 static void swift_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
428 {
429 unsigned long flags, addr;
430 int cc, ncc = mm->context;
431
432 if(ncc == NO_CONTEXT)
433 return;
434
435 save_flags(flags); cli();
436 cc = srmmu_get_context();
437 if(cc != ncc)
438 srmmu_set_context(ncc);
439
440
441 addr = start & SRMMU_PGDIR_MASK;
442 while(addr < end) {
443 swift_flush_region(addr);
444 srmmu_flush_tlb_region(addr);
445 addr += SRMMU_PGDIR_SIZE;
446 }
447
448 if(cc != ncc)
449 srmmu_set_context(cc);
450 restore_flags(flags);
451 }
452
453 static void swift_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
454 {
455 unsigned long flags;
456 int cc, ncc = vmp->vm_mm->context;
457
458 if(ncc == NO_CONTEXT)
459 return;
460
461 save_flags(flags); cli();
462 cc = srmmu_get_context();
463 if(cc != ncc)
464 srmmu_set_context(ncc);
465
466 swift_flush_page(page);
467 srmmu_flush_tlb_page(page);
468
469 if(cc != ncc)
470 srmmu_set_context(cc);
471 restore_flags(flags);
472 }
473
474
475
476
477
478
479
480
481
482
483
484
485
486 static inline void viking_invalidate_all(void)
487 {
488 viking_flush_icache();
489 viking_flush_dcache();
490 srmmu_flush_whole_tlb();
491 }
492 static void viking_invalidate_mm(struct mm_struct *mm)
493 {
494 unsigned long flags;
495 int cc, ncc = mm->context;
496
497 if(ncc == NO_CONTEXT)
498 return;
499
500 save_flags(flags); cli();
501 cc = srmmu_get_context();
502 if(cc != ncc)
503 srmmu_set_context(ncc);
504
505 viking_flush_icache();
506 viking_flush_dcache();
507 srmmu_flush_tlb_ctx();
508
509 if(cc != ncc)
510 srmmu_set_context(cc);
511 restore_flags(flags);
512 }
513
514 static void viking_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
515 {
516 unsigned long flags, addr;
517 int cc, ncc = mm->context;
518
519 if(ncc == NO_CONTEXT)
520 return;
521
522 save_flags(flags); cli();
523 cc = srmmu_get_context();
524 if(cc != ncc)
525 srmmu_set_context(ncc);
526
527
528 viking_flush_icache();
529 viking_flush_dcache();
530 addr = start & SRMMU_PGDIR_MASK;
531 while(addr < end) {
532 srmmu_flush_tlb_region(addr);
533 addr += SRMMU_PGDIR_SIZE;
534 }
535
536 if(cc != ncc)
537 srmmu_set_context(cc);
538 restore_flags(flags);
539 }
540 static void viking_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
541 {
542 unsigned long flags;
543 int cc, ncc = vmp->vm_mm->context;
544
545 if(ncc == NO_CONTEXT)
546 return;
547
548 save_flags(flags); cli();
549 cc = srmmu_get_context();
550 if(cc != ncc)
551 srmmu_set_context(ncc);
552
553 viking_flush_icache();
554 viking_flush_dcache();
555 srmmu_flush_tlb_page(page);
556
557 if(cc != ncc)
558 srmmu_set_context(cc);
559 restore_flags(flags);
560 }
561
562
563 static inline void cypress_invalidate_all(void)
564 {
565 srmmu_flush_whole_tlb();
566 }
567 static void cypress_invalidate_mm(struct mm_struct *mm)
568 {
569 unsigned long flags;
570 int cc, ncc = mm->context;
571
572 if(ncc == NO_CONTEXT)
573 return;
574
575
576 save_flags(flags); cli();
577 cc = srmmu_get_context();
578 if(cc != ncc)
579 srmmu_set_context(ncc);
580
581 cypress_flush_context();
582 srmmu_flush_whole_tlb();
583
584 if(cc != ncc)
585 srmmu_set_context(cc);
586 restore_flags(flags);
587 }
588 static void cypress_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
589 {
590 unsigned long flags, addr;
591 int cc, ncc = mm->context;
592
593 if(ncc == NO_CONTEXT)
594 return;
595
596 save_flags(flags); cli();
597 cc = srmmu_get_context();
598 if(cc != ncc)
599 srmmu_set_context(ncc);
600
601
602 addr = start & SRMMU_PGDIR_MASK;
603 while(addr < end) {
604 cypress_flush_region(addr);
605 addr += SRMMU_PGDIR_SIZE;
606 }
607 srmmu_flush_whole_tlb();
608
609 if(cc != ncc)
610 srmmu_set_context(cc);
611 restore_flags(flags);
612 }
613
614 static void cypress_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
615 {
616 unsigned long flags;
617 int cc, ncc = vmp->vm_mm->context;
618
619 if(ncc == NO_CONTEXT)
620 return;
621
622 save_flags(flags); cli();
623 cc = srmmu_get_context();
624 if(cc != ncc)
625 srmmu_set_context(ncc);
626
627 swift_flush_page(page);
628 srmmu_flush_whole_tlb();
629
630 if(cc != ncc)
631 srmmu_set_context(cc);
632 restore_flags(flags);
633 }
634
635
636 static inline void hypersparc_invalidate_all(void)
637 {
638
639 hyper_flush_whole_icache();
640 srmmu_flush_whole_tlb();
641 }
642
643 static void hypersparc_invalidate_mm(struct mm_struct *mm)
644 {
645
646 }
647
648 static void hypersparc_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
649 {
650
651 }
652
653 static void hypersparc_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
654 {
655
656 }
657
658 static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
659 {
660 srmmu_set_entry(ptep, pte_val(pteval));
661 }
662
663 static void srmmu_quick_kernel_fault(unsigned long address)
664 {
665 printk("SRMMU: quick_kernel_fault called for %08lx\n", address);
666 panic("Srmmu bolixed...");
667 }
668
669 static inline void alloc_context(struct mm_struct *mm)
670 {
671 struct ctx_list *ctxp;
672
673 ctxp = ctx_free.next;
674 if(ctxp != &ctx_free) {
675 remove_from_ctx_list(ctxp);
676 add_to_used_ctxlist(ctxp);
677 mm->context = ctxp->ctx_number;
678 ctxp->ctx_mm = mm;
679 return;
680 }
681 ctxp = ctx_used.next;
682 if(ctxp->ctx_mm == current->mm)
683 ctxp = ctxp->next;
684 if(ctxp == &ctx_used)
685 panic("out of mmu contexts");
686 remove_from_ctx_list(ctxp);
687 add_to_used_ctxlist(ctxp);
688 ctxp->ctx_mm->context = NO_CONTEXT;
689 ctxp->ctx_mm = mm;
690 mm->context = ctxp->ctx_number;
691 }
692
693 static void srmmu_switch_to_context(struct task_struct *tsk)
694 {
695
696
697
698
699
700 if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
701 (tsk->flags & PF_EXITING))
702 return;
703 if(tsk->mm->context == NO_CONTEXT) {
704 alloc_context(tsk->mm);
705 srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
706 }
707 srmmu_set_context(tsk->mm->context);
708 }
709
710
711 void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
712 {
713 pgd_t *pgdp;
714 pmd_t *pmdp;
715 pte_t *ptep;
716 unsigned long tmp;
717
718 physaddr &= PAGE_MASK;
719 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
720 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
721 ptep = srmmu_pte_offset(pmdp, virt_addr);
722 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
723
724
725
726
727
728 tmp |= (bus_type << 28);
729 if(rdonly)
730 tmp |= SRMMU_PRIV_RDONLY;
731 else
732 tmp |= SRMMU_PRIV;
733 srmmu_set_entry(ptep, tmp);
734 invalidate_all();
735 }
736
737 static char *srmmu_lockarea(char *vaddr, unsigned long len)
738 {
739 return vaddr;
740 }
741
742 static void srmmu_unlockarea(char *vaddr, unsigned long len)
743 {
744 }
745
746
747
748 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
749 static unsigned long first_dvma_page, last_dvma_page;
750
751 static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
752 {
753 unsigned long first = first_dvma_page;
754 unsigned long last = last_dvma_page;
755 iopte_t *iopte;
756
757 iopte = iommu->page_table;
758 iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
759 while(first <= last) {
760 iopte_val(*iopte++) = ((((first - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
761 (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
762 first += PAGE_SIZE;
763 }
764 }
765
766 void srmmu_uncache_iommu_page_table(unsigned long start, int size)
767 {
768 pgd_t *pgdp;
769 pmd_t *pmdp;
770 pte_t *ptep;
771 unsigned long end = start + size;
772
773 while(start < end) {
774 pgdp = srmmu_pgd_offset(init_task.mm, start);
775 pmdp = srmmu_pmd_offset(pgdp, start);
776 ptep = srmmu_pte_offset(pmdp, start);
777 pte_val(*ptep) &= ~SRMMU_CACHE;
778 start += PAGE_SIZE;
779 }
780 }
781
782 unsigned long iommu_init(int iommund, unsigned long memory_start,
783 unsigned long memory_end, struct linux_sbus *sbus)
784 {
785 int impl, vers, ptsize;
786 unsigned long tmp;
787 struct iommu_struct *iommu;
788 struct linux_prom_registers iommu_promregs[PROMREG_MAX];
789
790 memory_start = LONG_ALIGN(memory_start);
791 iommu = (struct iommu_struct *) memory_start;
792 memory_start += sizeof(struct iommu_struct);
793 prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
794 iommu->regs = (struct iommu_regs *)
795 sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
796 "IOMMU registers", iommu_promregs[0].which_io, 0x0);
797 if(!iommu->regs)
798 panic("Cannot map IOMMU registers.");
799 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
800 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
801 tmp = iommu->regs->control;
802 tmp &= ~(IOMMU_CTRL_RNGE);
803 tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
804 iommu->regs->control = tmp;
805 iommu_invalidate(iommu->regs);
806 iommu->start = 0xfc000000;
807 iommu->end = 0xffffffff;
808
809
810 ptsize = iommu->end - iommu->start + 1;
811 ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
812
813
814 memory_start = PAGE_ALIGN(memory_start);
815 memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
816 iommu->page_table = (iopte_t *) memory_start;
817 memory_start += ptsize;
818
819
820 memset(iommu->page_table, 0, ptsize);
821 srmmu_map_dvma_pages_for_iommu(iommu);
822 iommu->regs->base = (((unsigned long) iommu->page_table) - PAGE_OFFSET) >> 4;
823 srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
824 iommu_invalidate(iommu->regs);
825 invalidate_all();
826
827 sbus->iommu = iommu;
828 printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
829 impl, vers, iommu->page_table, ptsize);
830 return memory_start;
831 }
832
833
834 static char *srmmu_get_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
835 {
836 struct iommu_struct *iommu = sbus->iommu;
837 unsigned long page = (unsigned long) vaddr;
838 unsigned long start, end, offset;
839 iopte_t *iopte;
840
841 if(len > PAGE_SIZE)
842 panic("Can only handle page sized iommu mappings.");
843 offset = page & ~PAGE_MASK;
844 page &= PAGE_MASK;
845
846 start = iommu->start;
847 end = KADB_DEBUGGER_BEGVM;
848 iopte = iommu->page_table;
849 while(start < end) {
850 if(!(iopte_val(*iopte) & IOPTE_VALID))
851 break;
852 iopte++;
853 start += PAGE_SIZE;
854 }
855 if(start == KADB_DEBUGGER_BEGVM)
856 panic("Could not find free iommu entry in get_scsi_buffer.");
857
858 vaddr = (char *) (start | offset);
859 iopte_val(*iopte) = ((((page - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
860 (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
861 iommu_invalidate(iommu->regs);
862 invalidate_all();
863
864 return vaddr;
865 }
866
867 static void srmmu_release_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
868 {
869 struct iommu_struct *iommu = sbus->iommu;
870 unsigned long page = (unsigned long) vaddr;
871 iopte_t *iopte;
872
873 if(len > PAGE_SIZE)
874 panic("Can only handle page sized IOMMU mappings.");
875 page &= PAGE_MASK;
876 iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
877 iopte_val(*iopte) = 0;
878 iommu_invalidate(iommu->regs);
879 invalidate_all();
880 }
881
882
883
884
885
886
887
888
889
890
891 struct task_struct *srmmu_alloc_task_struct(void)
892 {
893 unsigned long page;
894
895 page = get_free_page(GFP_KERNEL);
896 if(!page)
897 return (struct task_struct *) 0;
898 return (struct task_struct *) page;
899 }
900
901 unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
902 {
903 unsigned long pages;
904
905 pages = __get_free_pages(GFP_KERNEL, 1, 0);
906 if(!pages)
907 return 0;
908 memset((void *) pages, 0, (PAGE_SIZE << 1));
909 return pages;
910 }
911
912 static void srmmu_free_task_struct(struct task_struct *tsk)
913 {
914 free_page((unsigned long) tsk);
915 }
916
917 static void srmmu_free_kernel_stack(unsigned long stack)
918 {
919 free_pages(stack, 1);
920 }
921
922 static unsigned long mempool;
923
924
925
926
927 static void *srmmu_init_alloc(unsigned long *kbrk, unsigned size)
928 {
929 register unsigned mask = size - 1;
930 register unsigned long ret;
931
932 if(size==0) return 0x0;
933 if(size & mask) {
934 prom_printf("panic: srmmu_init_alloc botch\n");
935 prom_halt();
936 }
937 ret = (*kbrk + mask) & ~mask;
938 *kbrk = ret + size;
939 memset((void*) ret, 0, size);
940 return (void*) ret;
941 }
942
943 static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
944 {
945 pgd_t *pgdp;
946 pmd_t *pmdp;
947 pte_t *ptep;
948
949 while(start < end) {
950 pgdp = srmmu_pgd_offset(init_task.mm, start);
951 if(srmmu_pgd_none(*pgdp)) {
952 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
953 srmmu_pgd_set(pgdp, pmdp);
954 }
955 pmdp = srmmu_pmd_offset(pgdp, start);
956 if(srmmu_pmd_none(*pmdp)) {
957 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
958 srmmu_pmd_set(pmdp, ptep);
959 }
960 start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
961 }
962 }
963
964
965
966
967
968 static inline void srmmu_inherit_prom_mappings(void)
969 {
970 pgd_t *pgdp;
971 pmd_t *pmdp;
972 pte_t *ptep;
973 unsigned long start, end;
974 unsigned long prompte;
975
976 start = KADB_DEBUGGER_BEGVM;
977 end = LINUX_OPPROM_ENDVM;
978 while(start < end) {
979
980 prompte = srmmu_hwprobe(start);
981
982 if((prompte & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
983 pgdp = srmmu_pgd_offset(init_task.mm, start);
984 if(srmmu_pgd_none(*pgdp)) {
985 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
986 srmmu_pgd_set(pgdp, pmdp);
987 }
988 pmdp = srmmu_pmd_offset(pgdp, start);
989 if(srmmu_pmd_none(*pmdp)) {
990 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
991 srmmu_pmd_set(pmdp, ptep);
992 }
993 ptep = srmmu_pte_offset(pmdp, start);
994 pte_val(*ptep) = prompte;
995 }
996 start += PAGE_SIZE;
997 }
998 }
999
1000 static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
1001 {
1002 unsigned long start;
1003 pgprot_t dvma_prot;
1004 pgd_t *pgdp;
1005 pmd_t *pmdp;
1006 pte_t *ptep;
1007
1008 start = DVMA_VADDR;
1009 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
1010 while(first <= last) {
1011 pgdp = srmmu_pgd_offset(init_task.mm, start);
1012 pmdp = srmmu_pmd_offset(pgdp, start);
1013 ptep = srmmu_pte_offset(pmdp, start);
1014
1015
1016 srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
1017
1018 first += PAGE_SIZE;
1019 start += PAGE_SIZE;
1020 }
1021 }
1022
1023 static void srmmu_map_kernel(unsigned long start, unsigned long end)
1024 {
1025 pgd_t *pgdp;
1026 pmd_t *pmdp;
1027 pte_t *ptep;
1028
1029 end = (PAGE_ALIGN(end) + PAGE_SIZE);
1030 while(start < end) {
1031 pgdp = srmmu_pgd_offset(init_task.mm, start);
1032 if(srmmu_pgd_none(*pgdp)) {
1033 pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
1034 srmmu_pgd_set(pgdp, pmdp);
1035 }
1036 pmdp = srmmu_pmd_offset(pgdp, start);
1037 if(srmmu_pmd_none(*pmdp)) {
1038 ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
1039 srmmu_pmd_set(pmdp, ptep);
1040 }
1041 ptep = srmmu_pte_offset(pmdp, start);
1042 *ptep = srmmu_mk_pte(start, SRMMU_PAGE_KERNEL);
1043 start += PAGE_SIZE;
1044 }
1045 }
1046
1047
1048 extern unsigned long free_area_init(unsigned long, unsigned long);
1049 extern unsigned long sparc_context_init(unsigned long, int);
1050
1051 unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
1052 {
1053 int i, cpunode;
1054 char node_str[128];
1055
1056
1057 cpunode = prom_getchild(prom_root_node);
1058 num_contexts = 0;
1059 while((cpunode = prom_getsibling(cpunode)) != 0) {
1060 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1061 if(!strcmp(node_str, "cpu")) {
1062 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1063 break;
1064 }
1065 }
1066 if(!num_contexts) {
1067 prom_printf("Something wrong, cant find cpu node in paging_init.\n");
1068 prom_halt();
1069 }
1070
1071 prom_printf("Number of MMU contexts %d\n", num_contexts);
1072 mempool = start_mem;
1073 memset(swapper_pg_dir, 0, PAGE_SIZE);
1074 srmmu_map_kernel(KERNBASE, end_mem);
1075 srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
1076 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
1077 mempool = PAGE_ALIGN(mempool);
1078 first_dvma_page = mempool;
1079 last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
1080 mempool = last_dvma_page + PAGE_SIZE;
1081 srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
1082
1083 srmmu_inherit_prom_mappings();
1084 srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
1085 for(i = 0; i < num_contexts; i++)
1086 srmmu_ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
1087
1088 prom_printf("Taking over MMU from PROM.\n");
1089 srmmu_flush_whole_tlb();
1090 srmmu_set_ctable_ptr(((unsigned)srmmu_context_table) - PAGE_OFFSET);
1091 srmmu_flush_whole_tlb();
1092
1093 start_mem = PAGE_ALIGN(mempool);
1094 start_mem = sparc_context_init(start_mem, num_contexts);
1095 start_mem = free_area_init(start_mem, end_mem);
1096
1097 prom_printf("survived...\n");
1098 return PAGE_ALIGN(start_mem);
1099 }
1100
1101
1102 void srmmu_test_wp(void)
1103 {
1104 pgd_t *pgdp;
1105
1106 wp_works_ok = -1;
1107
1108
1109
1110
1111
1112
1113 __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
1114 if (wp_works_ok < 0)
1115 wp_works_ok = 0;
1116
1117 pgdp = srmmu_pgd_offset(init_task.mm, 0x0);
1118 pgd_val(*pgdp) = 0x0;
1119 }
1120
1121 static char *srmmu_mmu_info(void)
1122 {
1123 return "";
1124 }
1125
1126 static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1127 {
1128 }
1129
1130 static void srmmu_exit_hook(void)
1131 {
1132 struct ctx_list *ctx_old;
1133 struct mm_struct *mm = current->mm;
1134
1135 if(mm->context != NO_CONTEXT) {
1136 srmmu_ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
1137 ctx_old = ctx_list_pool + mm->context;
1138 remove_from_ctx_list(ctx_old);
1139 add_to_free_ctxlist(ctx_old);
1140 mm->context = NO_CONTEXT;
1141 }
1142 }
1143
1144 static void
1145 srmmu_flush_hook(void)
1146 {
1147 if(current->tss.flags & SPARC_FLAG_KTHREAD) {
1148 alloc_context(current->mm);
1149 srmmu_ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
1150 srmmu_set_context(current->mm->context);
1151 }
1152 }
1153
1154
1155 void srmmu_is_bad(void)
1156 {
1157 prom_printf("Could not determine SRMMU chip type.\n");
1158 prom_halt();
1159 }
1160
1161 void init_hypersparc(void)
1162 {
1163 unsigned long mreg = srmmu_get_mmureg();
1164
1165 prom_printf("HyperSparc MMU detected.\n");
1166 if(mreg & HYPERSPARC_CSIZE)
1167 hyper_cache_size = (256 * 1024);
1168 else
1169 hyper_cache_size = (128 * 1024);
1170
1171 srmmu_modtype = HyperSparc;
1172 hwbug_bitmask |= HWBUG_VACFLUSH_BITROT;
1173
1174 hyper_flush_whole_icache();
1175 hyper_flush_all_combined();
1176
1177
1178 mreg &= ~(HYPERSPARC_CWENABLE | HYPERSPARC_CMODE | HYPERSPARC_WBENABLE);
1179 mreg |= HYPERSPARC_CENABLE;
1180 srmmu_set_mmureg(mreg);
1181 put_ross_icr(get_ross_icr() | 0x3);
1182 invalidate_all = hypersparc_invalidate_all;
1183 invalidate_mm = hypersparc_invalidate_mm;
1184 invalidate_page = hypersparc_invalidate_page;
1185 invalidate_range = hypersparc_invalidate_range;
1186 }
1187
1188 void init_cypress_common(void)
1189 {
1190 unsigned long mreg = srmmu_get_mmureg();
1191
1192 mreg &= ~CYPRESS_CMODE;
1193 mreg |= CYPRESS_CENABLE;
1194 srmmu_set_mmureg(mreg);
1195 invalidate_all = cypress_invalidate_all;
1196 invalidate_mm = cypress_invalidate_mm;
1197 invalidate_page = cypress_invalidate_page;
1198 invalidate_range = cypress_invalidate_range;
1199 }
1200
1201 void init_cypress_604(void)
1202 {
1203 prom_printf("Cypress 604(UP) MMU detected.\n");
1204 srmmu_modtype = Cypress;
1205 init_cypress_common();
1206 }
1207
1208 void init_cypress_605(unsigned long mrev)
1209 {
1210 prom_printf("Cypress 605(MP) MMU detected.\n");
1211 if(mrev == 0xe) {
1212 srmmu_modtype = Cypress_vE;
1213 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1214 } else {
1215 if(mrev == 0xd) {
1216 srmmu_modtype = Cypress_vD;
1217 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1218 } else {
1219 srmmu_modtype = Cypress;
1220 }
1221 }
1222 init_cypress_common();
1223 }
1224
1225 #define SWIFT_REVISION_ADDR 0x10003000
1226 void init_swift(void)
1227 {
1228 unsigned long swift_rev, addr;
1229 unsigned long mreg = srmmu_get_mmureg();
1230
1231 prom_printf("Swift MMU detected.\n");
1232 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1233 "srl %0, 0x18, %0\n\t" :
1234 "=r" (swift_rev) :
1235 "r" (SWIFT_REVISION_ADDR), "i" (0x20));
1236 switch(swift_rev) {
1237 case 0x11:
1238 case 0x20:
1239 case 0x23:
1240 case 0x30:
1241 srmmu_modtype = Swift_lots_o_bugs;
1242 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 break;
1260 case 0x25:
1261 case 0x31:
1262 srmmu_modtype = Swift_bad_c;
1263 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1264
1265
1266
1267
1268 break;
1269 default:
1270 srmmu_modtype = Swift_ok;
1271 break;
1272 };
1273
1274 for(addr = 0; addr < (PAGE_SIZE * 4); addr += 16) {
1275 swift_inv_insn_tag(addr);
1276 swift_inv_data_tag(addr);
1277 }
1278 mreg |= (SWIFT_IE | SWIFT_DE);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 mreg &= ~(SWIFT_BF);
1289 srmmu_set_mmureg(mreg);
1290
1291 invalidate_all = swift_invalidate_all;
1292 invalidate_mm = swift_invalidate_mm;
1293 invalidate_page = swift_invalidate_page;
1294 invalidate_range = swift_invalidate_range;
1295
1296
1297
1298
1299 }
1300
1301 void init_tsunami(unsigned long mreg)
1302 {
1303
1304
1305
1306
1307
1308 prom_printf("Tsunami MMU detected.\n");
1309 srmmu_modtype = Tsunami;
1310 tsunami_invalidate_icache();
1311 tsunami_invalidate_dcache();
1312 mreg &= ~TSUNAMI_ITD;
1313 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1314 srmmu_set_mmureg(mreg);
1315 invalidate_all = tsunami_invalidate_all;
1316 invalidate_mm = tsunami_invalidate_mm;
1317 invalidate_page = tsunami_invalidate_page;
1318 invalidate_range = tsunami_invalidate_range;
1319 }
1320
1321 void init_viking(unsigned long psr_vers, unsigned long mod_rev)
1322 {
1323 unsigned long mreg = srmmu_get_mmureg();
1324
1325
1326
1327 prom_printf("Viking MMU detected.\n");
1328 if(!psr_vers && ! mod_rev) {
1329 srmmu_modtype = Viking_12;
1330 hwbug_bitmask |= (HWBUG_MODIFIED_BITROT | HWBUG_PC_BADFAULT_ADDR);
1331
1332
1333
1334
1335
1336
1337
1338 } else {
1339 if(psr_vers) {
1340 srmmu_modtype = Viking_2x;
1341 hwbug_bitmask |= HWBUG_PC_BADFAULT_ADDR;
1342 } else {
1343 if(mod_rev == 1) {
1344 srmmu_modtype = Viking_30;
1345 hwbug_bitmask |= HWBUG_PACINIT_BITROT;
1346
1347
1348
1349
1350
1351
1352 } else {
1353 if(mod_rev < 8)
1354 srmmu_modtype = Viking_35;
1355 else
1356 srmmu_modtype = Viking_new;
1357 }
1358 }
1359 }
1360
1361 viking_flush_icache();
1362 viking_flush_dcache();
1363 mreg |= (VIKING_DCENABLE | VIKING_ICENABLE | VIKING_SBENABLE |
1364 VIKING_TCENABLE | VIKING_DPENABLE);
1365 srmmu_set_mmureg(mreg);
1366 invalidate_all = viking_invalidate_all;
1367 invalidate_mm = viking_invalidate_mm;
1368 invalidate_page = viking_invalidate_page;
1369 invalidate_range = viking_invalidate_range;
1370 }
1371
1372
1373 static void get_srmmu_type(void)
1374 {
1375 unsigned long mreg, psr;
1376 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1377
1378 srmmu_modtype = SRMMU_INVAL_MOD;
1379 hwbug_bitmask = 0;
1380
1381 mreg = srmmu_get_mmureg(); psr = get_psr();
1382 mod_typ = (mreg & 0xf0000000) >> 28;
1383 mod_rev = (mreg & 0x0f000000) >> 24;
1384 psr_typ = (psr >> 28) & 0xf;
1385 psr_vers = (psr >> 24) & 0xf;
1386
1387
1388 if(mod_typ == 1) {
1389 switch(mod_rev) {
1390 case 7:
1391
1392 init_hypersparc();
1393 break;
1394 case 0:
1395
1396 init_cypress_604();
1397 break;
1398 case 13:
1399 case 14:
1400 case 15:
1401
1402 init_cypress_605(mod_rev);
1403 break;
1404 default:
1405 srmmu_is_bad();
1406 break;
1407 };
1408 return;
1409 }
1410
1411
1412 if(psr_typ == 0 && psr_vers == 4) {
1413 init_swift();
1414 return;
1415 }
1416
1417
1418 if(psr_typ == 4 &&
1419 ((psr_vers == 0) ||
1420 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1421 init_viking(psr_vers, mod_rev);
1422 return;
1423 }
1424
1425
1426 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1427 init_tsunami(mreg);
1428 return;
1429 }
1430
1431
1432 srmmu_is_bad();
1433 }
1434
1435 extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
1436 tsetup_mmu_patchme, rtrap_mmu_patchme;
1437
1438 extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
1439 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
1440
1441 extern unsigned long srmmu_fault;
1442
1443 #define PATCH_BRANCH(insn, dest) do { \
1444 iaddr = &(insn); \
1445 daddr = &(dest); \
1446 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
1447 } while(0);
1448
1449 static void patch_window_trap_handlers(void)
1450 {
1451 unsigned long *iaddr, *daddr;
1452
1453 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
1454 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
1455 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
1456 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
1457 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
1458 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
1459 }
1460
1461
1462 void ld_mmu_srmmu(void)
1463 {
1464 prom_printf("Loading srmmu MMU routines\n");
1465
1466
1467 pmd_shift = SRMMU_PMD_SHIFT;
1468 pmd_size = SRMMU_PMD_SIZE;
1469 pmd_mask = SRMMU_PMD_MASK;
1470 pgdir_shift = SRMMU_PGDIR_SHIFT;
1471 pgdir_size = SRMMU_PGDIR_SIZE;
1472 pgdir_mask = SRMMU_PGDIR_MASK;
1473
1474 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
1475 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
1476 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
1477
1478 page_none = SRMMU_PAGE_NONE;
1479 page_shared = SRMMU_PAGE_SHARED;
1480 page_copy = SRMMU_PAGE_COPY;
1481 page_readonly = SRMMU_PAGE_RDONLY;
1482 page_kernel = SRMMU_PAGE_KERNEL;
1483 pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
1484
1485
1486 set_pte = srmmu_set_pte;
1487 switch_to_context = srmmu_switch_to_context;
1488 pmd_align = srmmu_pmd_align;
1489 pgdir_align = srmmu_pgdir_align;
1490 vmalloc_start = srmmu_vmalloc_start;
1491
1492 pte_page = srmmu_pte_page;
1493 pmd_page = srmmu_pmd_page;
1494 pgd_page = srmmu_pgd_page;
1495
1496 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
1497
1498 pte_none = srmmu_pte_none;
1499 pte_present = srmmu_pte_present;
1500 pte_inuse = srmmu_pte_inuse;
1501 pte_clear = srmmu_pte_clear;
1502 pte_reuse = srmmu_pte_reuse;
1503
1504 pmd_none = srmmu_pmd_none;
1505 pmd_bad = srmmu_pmd_bad;
1506 pmd_present = srmmu_pmd_present;
1507 pmd_inuse = srmmu_pmd_inuse;
1508 pmd_clear = srmmu_pmd_clear;
1509 pmd_reuse = srmmu_pmd_reuse;
1510
1511 pgd_none = srmmu_pgd_none;
1512 pgd_bad = srmmu_pgd_bad;
1513 pgd_present = srmmu_pgd_present;
1514 pgd_inuse = srmmu_pgd_inuse;
1515 pgd_clear = srmmu_pgd_clear;
1516 pgd_reuse = srmmu_pgd_reuse;
1517
1518 mk_pte = srmmu_mk_pte;
1519 pgd_set = srmmu_pgd_set;
1520 mk_pte_io = srmmu_mk_pte_io;
1521 pte_modify = srmmu_pte_modify;
1522 pgd_offset = srmmu_pgd_offset;
1523 pmd_offset = srmmu_pmd_offset;
1524 pte_offset = srmmu_pte_offset;
1525 pte_free_kernel = srmmu_pte_free_kernel;
1526 pmd_free_kernel = srmmu_pmd_free_kernel;
1527 pte_alloc_kernel = srmmu_pte_alloc_kernel;
1528 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
1529 pte_free = srmmu_pte_free;
1530 pte_alloc = srmmu_pte_alloc;
1531 pmd_free = srmmu_pmd_free;
1532 pmd_alloc = srmmu_pmd_alloc;
1533 pgd_free = srmmu_pgd_free;
1534 pgd_alloc = srmmu_pgd_alloc;
1535
1536 pte_write = srmmu_pte_write;
1537 pte_dirty = srmmu_pte_dirty;
1538 pte_young = srmmu_pte_young;
1539 pte_wrprotect = srmmu_pte_wrprotect;
1540 pte_mkclean = srmmu_pte_mkclean;
1541 pte_mkold = srmmu_pte_mkold;
1542 pte_mkwrite = srmmu_pte_mkwrite;
1543 pte_mkdirty = srmmu_pte_mkdirty;
1544 pte_mkyoung = srmmu_pte_mkyoung;
1545 update_mmu_cache = srmmu_update_mmu_cache;
1546 mmu_exit_hook = srmmu_exit_hook;
1547 mmu_flush_hook = srmmu_flush_hook;
1548 mmu_lockarea = srmmu_lockarea;
1549 mmu_unlockarea = srmmu_unlockarea;
1550 mmu_get_scsi_buffer = srmmu_get_scsi_buffer;
1551 mmu_release_scsi_buffer = srmmu_release_scsi_buffer;
1552 mmu_info = srmmu_mmu_info;
1553
1554
1555 alloc_kernel_stack = srmmu_alloc_kernel_stack;
1556 alloc_task_struct = srmmu_alloc_task_struct;
1557 free_kernel_stack = srmmu_free_kernel_stack;
1558 free_task_struct = srmmu_free_task_struct;
1559
1560 quick_kernel_fault = srmmu_quick_kernel_fault;
1561
1562 get_srmmu_type();
1563 if(!(srmmu_get_mmureg() & 0x800)) {
1564 srmmu_read_physical = msparc_read_physical;
1565 srmmu_write_physical = msparc_write_physical;
1566 } else {
1567 srmmu_read_physical = gensrmmu_read_physical;
1568 srmmu_write_physical = gensrmmu_write_physical;
1569 }
1570 patch_window_trap_handlers();
1571 }