This source file includes following definitions.
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_virt_to_phys
- srmmu_phys_to_virt
- srmmu_vmalloc_start
- srmmu_pmd_page
- srmmu_pgd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_inuse
- srmmu_pte_clear
- srmmu_pte_reuse
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_inuse
- srmmu_pmd_clear
- srmmu_pmd_reuse
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_inuse
- srmmu_pgd_clear
- srmmu_pgd_reuse
- srmmu_pte_read
- srmmu_pte_write
- srmmu_pte_exec
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_cow
- srmmu_pte_wrprotect
- srmmu_pte_rdprotect
- srmmu_pte_exprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_uncow
- srmmu_pte_mkwrite
- srmmu_pte_mkread
- srmmu_pte_mkexec
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_pte_mkcow
- srmmu_mk_pte
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- srmmu_invalidate
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_init_twalk
- srmmu_init_alloc
- srmmu_patch_fhandlers
- srmmu_paging_init
- srmmu_test_wp
- ld_mmu_srmmu
1
2
3
4
5
6
7 #include <linux/kernel.h>
8
9 #include <asm/page.h>
10 #include <asm/pgtable.h>
11 #include <asm/kdebug.h>
12 #include <asm/vaddrs.h>
13 #include <asm/traps.h>
14 #include <asm/mp.h>
15 #include <asm/cache.h>
16 #include <asm/oplib.h>
17
18 extern unsigned long free_area_init(unsigned long, unsigned long);
19
20 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
21 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
22
23
24
25
26
27
28
29
30
31
32 static inline unsigned int
33 srmmu_virt_to_phys(unsigned int vaddr)
34 {
35 unsigned int paddr = 0;
36 unsigned int voff = (vaddr - PAGE_OFFSET);
37 int i;
38
39 for(i=0; sp_banks[i].num_bytes != 0; i++) {
40 if(voff < paddr + sp_banks[i].num_bytes) {
41
42 return sp_banks[i].base_addr + voff - paddr;
43 } else
44 paddr += sp_banks[i].num_bytes;
45 }
46
47 printk("srmmu_virt_to_phys: SRMMU virt to phys translation failed, halting\n");
48 halt();
49 }
50
51 static inline unsigned long
52 srmmu_phys_to_virt(unsigned long paddr)
53 {
54 int i;
55 unsigned long offset = PAGE_OFFSET;
56
57 for (i=0; sp_banks[i].num_bytes != 0; i++)
58 {
59 if (paddr >= sp_banks[i].base_addr &&
60 paddr < (sp_banks[i].base_addr
61 + sp_banks[i].num_bytes)) {
62 return (paddr - sp_banks[i].base_addr) + offset;
63 } else
64 offset += sp_banks[i].num_bytes;
65 }
66 printk("srmmu_phys_to_virt: Could not make translation, halting...\n");
67 halt();
68 }
69
70 unsigned long
71 srmmu_vmalloc_start(void)
72 {
73 return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
74 }
75
76 unsigned long
77 srmmu_pmd_page(pmd_t pmd)
78 {
79 unsigned long page;
80
81 page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
82 return srmmu_phys_to_virt(page);
83 }
84
85 unsigned long
86 srmmu_pgd_page(pgd_t pgd)
87 {
88 unsigned long page;
89
90 page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
91 return srmmu_phys_to_virt(page);
92 }
93
94 unsigned long
95 srmmu_pte_page(pte_t pte)
96 {
97 unsigned long page;
98
99 page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
100 printk("srmmu_pte_page: page = %08lx\n", page);
101 return srmmu_phys_to_virt(page);
102 }
103
104 int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
105 int srmmu_pte_present(pte_t pte) { return pte_val(pte) & SRMMU_ET_PTE; }
106 int srmmu_pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)] != 1; }
107 void srmmu_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
108 void srmmu_pte_reuse(pte_t *ptep)
109 {
110 if(!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
111 mem_map[MAP_NR(ptep)]++;
112 }
113
114 int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
115 int srmmu_pmd_bad(pmd_t pmd)
116 {
117 return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
118 (srmmu_pmd_page(pmd) > high_memory);
119 }
120
121 int srmmu_pmd_present(pmd_t pmd) { return pmd_val(pmd) & SRMMU_ET_PTD; }
122 int srmmu_pmd_inuse(pmd_t *pmdp) { return mem_map[MAP_NR(pmdp)] != 1; }
123 void srmmu_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
124 void srmmu_pmd_reuse(pmd_t * pmdp)
125 {
126 if (!(mem_map[MAP_NR(pmdp)] & MAP_PAGE_RESERVED))
127 mem_map[MAP_NR(pmdp)]++;
128 }
129
130 int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
131 int srmmu_pgd_bad(pgd_t pgd)
132 {
133 return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
134 (srmmu_pgd_page(pgd) > high_memory);
135 }
136 int srmmu_pgd_present(pgd_t pgd) { return pgd_val(pgd) & SRMMU_ET_PTD; }
137 int srmmu_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)] != 1; }
138 void srmmu_pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
139 void srmmu_pgd_reuse(pgd_t *pgdp)
140 {
141 if (!(mem_map[MAP_NR(pgdp)] & MAP_PAGE_RESERVED))
142 mem_map[MAP_NR(pgdp)]++;
143 }
144
145
146
147
148
149 int srmmu_pte_read(pte_t pte) { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
150 int srmmu_pte_write(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
151 int srmmu_pte_exec(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
152 int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
153 int srmmu_pte_young(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_REF; }
154 int srmmu_pte_cow(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_COW; }
155
156
157
158
159 pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
160 pte_t srmmu_pte_rdprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
161 pte_t srmmu_pte_exprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
162 pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
163 pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
164 pte_t srmmu_pte_uncow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
165 pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
166 pte_t srmmu_pte_mkread(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
167 pte_t srmmu_pte_mkexec(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
168 pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
169 pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
170 pte_t srmmu_pte_mkcow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
171
172
173
174
175
176 pte_t
177 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
178 {
179 pte_t pte;
180
181 if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
182 page = (srmmu_virt_to_phys(page) >> SRMMU_PTE_PPN_PADDR_SHIFT);
183 pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
184 pte_val(pte) |= pgprot_val(pgprot);
185 return pte;
186 }
187
188 void
189 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
190 {
191 unsigned long page = (unsigned long) pmdp;
192
193 page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
194
195 pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
196 }
197
198 void
199 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
200 {
201 unsigned long page = (unsigned long) ptep;
202
203 page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
204
205 pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
206 }
207
208 pte_t
209 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
210 {
211 pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
212 return pte;
213 }
214
215
216 pgd_t *
217 srmmu_pgd_offset(struct task_struct * tsk, unsigned long address)
218 {
219 return ((pgd_t *) tsk->tss.pgd_ptr) +
220 ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
221 }
222
223
224 pmd_t *
225 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
226 {
227 return ((pmd_t *) pgd_page(*dir)) +
228 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
229 }
230
231
232 pte_t *
233 srmmu_pte_offset(pmd_t * dir, unsigned long address)
234 {
235 return ((pte_t *) pmd_page(*dir)) +
236 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
237 }
238
239
240 void
241 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
242 {
243
244 if(tsk->tss.context != -1) {
245 pgd_t *ctable_ptr = 0;
246 ctable_ptr = (pgd_t *) srmmu_phys_to_virt(srmmu_get_ctable_ptr());
247 ctable_ptr += tsk->tss.context;
248 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
249
250 srmmu_flush_whole_tlb();
251 }
252
253 tsk->tss.pgd_ptr = (unsigned long) pgdir;
254
255 return;
256 }
257
258
259
260
261
262
263 void
264 srmmu_pte_free_kernel(pte_t *pte)
265 {
266 mem_map[MAP_NR(pte)] = 1;
267 free_page((unsigned long) pte);
268 }
269
270 pte_t *
271 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
272 {
273 pte_t *page;
274
275 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
276 if (srmmu_pmd_none(*pmd)) {
277 page = (pte_t *) get_free_page(GFP_KERNEL);
278 if (srmmu_pmd_none(*pmd)) {
279 if (page) {
280 srmmu_pmd_set(pmd, page);
281 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
282 return page + address;
283 }
284 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
285 return NULL;
286 }
287 free_page((unsigned long) page);
288 }
289 if (srmmu_pmd_bad(*pmd)) {
290 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
291 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
292 return NULL;
293 }
294 return (pte_t *) srmmu_pmd_page(*pmd) + address;
295 }
296
297
298 void
299 srmmu_pmd_free_kernel(pmd_t *pmd)
300 {
301 mem_map[MAP_NR(pmd)] = 1;
302 free_page((unsigned long) pmd);
303 }
304
305 pmd_t *
306 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
307 {
308 pmd_t *page;
309
310 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
311 if (srmmu_pgd_none(*pgd)) {
312 page = (pmd_t *) get_free_page(GFP_KERNEL);
313 if (srmmu_pgd_none(*pgd)) {
314 if (page) {
315 srmmu_pgd_set(pgd, page);
316 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
317 return page + address;
318 }
319 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
320 return NULL;
321 }
322 free_page((unsigned long) page);
323 }
324 if (srmmu_pgd_bad(*pgd)) {
325 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
326 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
327 return NULL;
328 }
329 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
330 }
331
332 void
333 srmmu_pte_free(pte_t *pte)
334 {
335 free_page((unsigned long) pte);
336 }
337
338 pte_t *
339 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
340 {
341 pte_t *page;
342
343 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
344 if (srmmu_pmd_none(*pmd)) {
345 page = (pte_t *) get_free_page(GFP_KERNEL);
346 if (srmmu_pmd_none(*pmd)) {
347 if (page) {
348 srmmu_pmd_set(pmd, page);
349 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
350 return page + address;
351 }
352 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
353 return NULL;
354 }
355 free_page((unsigned long) page);
356 }
357 if (srmmu_pmd_bad(*pmd)) {
358 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
359 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
360 return NULL;
361 }
362 return (pte_t *) srmmu_pmd_page(*pmd) + address;
363 }
364
365
366
367
368
369 void
370 srmmu_pmd_free(pmd_t * pmd)
371 {
372 free_page((unsigned long) pmd);
373 }
374
375 pmd_t *
376 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
377 {
378 pmd_t *page;
379
380 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
381 if (srmmu_pgd_none(*pgd)) {
382 page = (pmd_t *) get_free_page(GFP_KERNEL);
383 if (srmmu_pgd_none(*pgd)) {
384 if (page) {
385 srmmu_pgd_set(pgd, page);
386 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
387 return page + address;
388 }
389 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
390 return NULL;
391 }
392 free_page((unsigned long) page);
393 }
394 if (srmmu_pgd_bad(*pgd)) {
395 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
396 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
397 return NULL;
398 }
399 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
400 }
401
402 void
403 srmmu_pgd_free(pgd_t *pgd)
404 {
405 free_page((unsigned long) pgd);
406 }
407
408
409
410
411
412 pgd_t *
413 srmmu_pgd_alloc(void)
414 {
415 return (pgd_t *) get_free_page(GFP_KERNEL);
416 }
417
418
419
420
421
422
423 void
424 srmmu_invalidate(void)
425 {
426 srmmu_flush_whole_tlb();
427 return;
428 }
429
430
431 void
432 srmmu_switch_to_context(int context)
433 {
434 printk("switching to context %d\n", context);
435
436 return;
437 }
438
439
440
441
442
443
444
445
446
447
448 void
449 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
450 int bus_type, int rdonly)
451 {
452 pgd_t *pgdp;
453 pmd_t *pmdp;
454 pte_t *ptep;
455
456 pgdp = srmmu_pgd_offset(&init_task, virt_addr);
457 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
458 ptep = srmmu_pte_offset(pmdp, virt_addr);
459 pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
460
461 if(!rdonly)
462 pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
463 else
464 pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
465
466 pte_val(*ptep) |= (bus_type << 28);
467 pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK);
468 srmmu_flush_whole_tlb();
469 flush_ei_ctx(0x0);
470
471 return;
472 }
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496 unsigned int
497 srmmu_init_twalk(unsigned virt, int trace)
498 {
499 unsigned int wh, root;
500
501 root = (unsigned int) srmmu_get_ctable_ptr();
502 if(trace) printk(":0x%x >> ", virt);
503
504 if(trace) printk(" 0x%x :", root);
505 wh = ldw_sun4m_bypass(root);
506 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
507 if(trace) printk("\n");
508 return 0;
509 }
510 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
511 wh &= ~SRMMU_PTE_ET_MASK;
512 wh |= 0x3;
513 if(trace) printk("\n");
514 printk("AIEEE context table level pte prom mapping!\n");
515 prom_halt();
516 return 0;
517 }
518
519 if(trace) printk(" 0x%x .", wh);
520 wh = ldw_sun4m_bypass(
521 ((wh & SRMMU_PTD_PTP_MASK) << 4)
522 + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
523
524 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
525 if(trace) printk("\n");
526 return 0;
527 }
528 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
529 wh &= ~SRMMU_PTE_ET_MASK;
530 if(trace) printk("\n");
531 return wh;
532 }
533
534 if(trace) printk(" 0x%x .", wh);
535 wh = ldw_sun4m_bypass(
536 ((wh & SRMMU_PTD_PTP_MASK) << 4)
537 + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
538 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
539 if(trace) printk("\n");
540 return 0;
541 }
542 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
543 wh &= ~SRMMU_PTE_ET_MASK;
544 wh |= 0x1;
545 if(trace) printk("\n");
546 return wh;
547 }
548
549 if(trace) printk(" 0x%x .", wh);
550 wh = ldw_sun4m_bypass(
551 ((wh & SRMMU_PTD_PTP_MASK) << 4)
552 + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
553 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
554 if(trace) printk("\n");
555 return 0;
556 }
557 if(trace) printk(" 0x%x\n", wh);
558 return wh;
559 }
560
561
562
563
564
565
566
567
568
569 static void *
570 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
571 {
572 register unsigned mask = size - 1;
573 register unsigned long ret;
574
575 if(size==0) return 0x0;
576 if(size & mask) {
577 printk("panic: srmmu_init_alloc botch\n");
578 prom_halt();
579 }
580 ret = (*kbrk + mask) & ~mask;
581 *kbrk = ret + size;
582 memset((void*) ret, 0, size);
583 return (void*) ret;
584 }
585
586 extern unsigned long srmmu_data_fault, srmmu_text_fault;
587
588
589 void
590 srmmu_patch_fhandlers(void)
591 {
592
593 sparc_ttable[SP_TRAP_TFLT].inst_one = SPARC_MOV_CONST_L3(0x1);
594 sparc_ttable[SP_TRAP_TFLT].inst_two =
595 SPARC_BRANCH((unsigned long) &srmmu_text_fault,
596 (unsigned long) &sparc_ttable[SP_TRAP_TFLT].inst_two);
597 sparc_ttable[SP_TRAP_TFLT].inst_three = SPARC_RD_PSR_L0;
598 sparc_ttable[SP_TRAP_TFLT].inst_four = SPARC_NOP;
599
600 sparc_ttable[SP_TRAP_DFLT].inst_one = SPARC_MOV_CONST_L3(0x9);
601 sparc_ttable[SP_TRAP_DFLT].inst_two =
602 SPARC_BRANCH((unsigned long) &srmmu_data_fault,
603 (unsigned long) &sparc_ttable[SP_TRAP_DFLT].inst_two);
604 sparc_ttable[SP_TRAP_DFLT].inst_three = SPARC_RD_PSR_L0;
605 sparc_ttable[SP_TRAP_DFLT].inst_four = SPARC_NOP;
606
607 return;
608 }
609
610
611
612
613
614
615
616 static unsigned long mempool;
617
618
619
620
621 pgd_t *lnx_root;
622
623 extern char start[];
624
625 unsigned long
626 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
627 {
628 unsigned long vaddr;
629 int i;
630
631 pte_t *ptep = 0;
632 pmd_t *pmdp = 0;
633 pgd_t *pgdp = 0;
634
635 mempool = start_mem;
636 lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
637
638 memset(swapper_pg_dir, 0, PAGE_SIZE);
639
640
641
642
643 pmdp = (pmd_t *) swapper_pg_dir;
644 for(i = 0; i < num_contexts; i++)
645 srmmu_pgd_set(&lnx_root[i], pmdp);
646
647
648 for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
649 pgdp = srmmu_pgd_offset(&init_task, vaddr);
650 if(srmmu_pgd_none(*pgdp)) {
651 pmdp = srmmu_init_alloc(&mempool,
652 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
653 srmmu_pgd_set(pgdp, pmdp);
654 }
655
656 pmdp = srmmu_pmd_offset(pgdp, vaddr);
657 if(srmmu_pmd_none(*pmdp)) {
658 ptep = srmmu_init_alloc(&mempool,
659 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
660 srmmu_pmd_set(pmdp, ptep);
661 }
662
663 ptep = srmmu_pte_offset(pmdp, vaddr);
664 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
665 }
666
667
668 for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
669 vaddr += SRMMU_PMD_SIZE) {
670 pgdp = srmmu_pgd_offset(&init_task, vaddr);
671 if(srmmu_pgd_none(*pgdp)) {
672 pmdp = srmmu_init_alloc(&mempool,
673 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
674 srmmu_pgd_set(pgdp, pmdp);
675 }
676 pmdp = srmmu_pmd_offset(pgdp, vaddr);
677 if(srmmu_pmd_none(*pmdp)) {
678 ptep = srmmu_init_alloc(&mempool,
679 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
680 srmmu_pmd_set(pmdp, ptep);
681 }
682 }
683
684
685 printk("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
686 (PERCPU_VADDR + PERCPU_LEN));
687 for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
688 vaddr += PERCPU_ENTSIZE) {
689 pgdp = srmmu_pgd_offset(&init_task, vaddr);
690 if(srmmu_pgd_none(*pgdp)) {
691 pmdp = srmmu_init_alloc(&mempool,
692 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
693 srmmu_pgd_set(pgdp, pmdp);
694 }
695 pmdp = srmmu_pmd_offset(pgdp, vaddr);
696 if(srmmu_pmd_none(*pmdp)) {
697 ptep = srmmu_init_alloc(&mempool,
698 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
699 srmmu_pmd_set(pmdp, ptep);
700 }
701 ptep = srmmu_pte_offset(pmdp, vaddr);
702
703 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
704
705 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
706 SRMMU_PAGE_KERNEL);
707
708 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
709 SRMMU_PAGE_KERNEL);
710
711 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
712 SRMMU_PAGE_KERNEL);
713 }
714 percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
715
716
717
718
719
720
721 for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
722 unsigned int prom_pte;
723
724 prom_pte = srmmu_init_twalk(vaddr, 0);
725
726 if(prom_pte) {
727 pgdp = srmmu_pgd_offset(&init_task, vaddr);
728 if((prom_pte&0x3) == 0x0) {
729 prom_pte &= ~0x3;
730 prom_pte |= SRMMU_ET_PTE;
731 pgd_val(*pgdp) = prom_pte;
732 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
733 continue;
734 }
735 if(srmmu_pgd_none(*pgdp)) {
736 pmdp = srmmu_init_alloc(&mempool,
737 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
738 srmmu_pgd_set(pgdp, pmdp);
739 }
740
741 pmdp = srmmu_pmd_offset(pgdp, vaddr);
742 if((prom_pte&0x3) == 0x1) {
743 prom_pte &= ~0x3;
744 prom_pte |= SRMMU_ET_PTE;
745 pgd_val(*pgdp) = prom_pte;
746 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
747 continue;
748 }
749 if(srmmu_pmd_none(*pmdp)) {
750 ptep = srmmu_init_alloc(&mempool,
751 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
752 srmmu_pmd_set(pmdp, ptep);
753 }
754
755 ptep = srmmu_pte_offset(pmdp, vaddr);
756 pte_val(*ptep) = prom_pte;
757
758 }
759 vaddr += PAGE_SIZE;
760 }
761
762
763
764
765
766
767
768 printk("Taking over MMU from PROM.\n");
769
770 srmmu_set_ctable_ptr(srmmu_virt_to_phys((unsigned)lnx_root));
771
772 srmmu_flush_whole_tlb();
773
774
775 start_mem = PAGE_ALIGN(mempool);
776 start_mem = free_area_init(start_mem, end_mem);
777 start_mem = PAGE_ALIGN(start_mem);
778
779 #if 0
780 printk("Testing context switches...\n");
781 for(i=0; i<num_contexts; i++)
782 srmmu_set_context(i);
783 printk("done...\n");
784 srmmu_set_context(0);
785 #endif
786
787 printk("survived...\n");
788 return start_mem;
789 }
790
791
792 void
793 srmmu_test_wp(void)
794 {
795 pgd_t *pgdp;
796 pmd_t *pmdp;
797 pte_t *ptep;
798
799 wp_works_ok = -1;
800
801
802
803
804
805
806 __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
807 if (wp_works_ok < 0)
808 wp_works_ok = 0;
809
810 pgdp = srmmu_pgd_offset(&init_task, 0x0);
811 pgd_val(*pgdp) = 0x0;
812
813 return;
814 }
815
816
817 void
818 ld_mmu_srmmu(void)
819 {
820 printk("Loading srmmu MMU routines\n");
821
822
823 pmd_shift = SRMMU_PMD_SHIFT;
824 pmd_size = SRMMU_PMD_SIZE;
825 pmd_mask = SRMMU_PMD_MASK;
826 pgdir_shift = SRMMU_PGDIR_SHIFT;
827 pgdir_size = SRMMU_PGDIR_SIZE;
828 pgdir_mask = SRMMU_PGDIR_MASK;
829
830 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
831 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
832 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
833
834 page_none = SRMMU_PAGE_NONE;
835 page_shared = SRMMU_PAGE_SHARED;
836 page_copy = SRMMU_PAGE_COPY;
837 page_readonly = SRMMU_PAGE_READONLY;
838 page_kernel = SRMMU_PAGE_KERNEL;
839 page_invalid = SRMMU_PAGE_INVALID;
840
841
842 invalidate = srmmu_invalidate;
843 switch_to_context = srmmu_switch_to_context;
844 pmd_align = srmmu_pmd_align;
845 pgdir_align = srmmu_pgdir_align;
846 vmalloc_start = srmmu_vmalloc_start;
847
848 pte_page = srmmu_pte_page;
849 pmd_page = srmmu_pmd_page;
850 pgd_page = srmmu_pgd_page;
851
852 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
853
854 pte_none = srmmu_pte_none;
855 pte_present = srmmu_pte_present;
856 pte_inuse = srmmu_pte_inuse;
857 pte_clear = srmmu_pte_clear;
858 pte_reuse = srmmu_pte_reuse;
859
860 pmd_none = srmmu_pmd_none;
861 pmd_bad = srmmu_pmd_bad;
862 pmd_present = srmmu_pmd_present;
863 pmd_inuse = srmmu_pmd_inuse;
864 pmd_clear = srmmu_pmd_clear;
865 pmd_reuse = srmmu_pmd_reuse;
866
867 pgd_none = srmmu_pgd_none;
868 pgd_bad = srmmu_pgd_bad;
869 pgd_present = srmmu_pgd_present;
870 pgd_inuse = srmmu_pgd_inuse;
871 pgd_clear = srmmu_pgd_clear;
872 pgd_reuse = srmmu_pgd_reuse;
873
874 mk_pte = srmmu_mk_pte;
875 pgd_set = srmmu_pgd_set;
876 pte_modify = srmmu_pte_modify;
877 pgd_offset = srmmu_pgd_offset;
878 pmd_offset = srmmu_pmd_offset;
879 pte_offset = srmmu_pte_offset;
880 pte_free_kernel = srmmu_pte_free_kernel;
881 pmd_free_kernel = srmmu_pmd_free_kernel;
882 pte_alloc_kernel = srmmu_pte_alloc_kernel;
883 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
884 pte_free = srmmu_pte_free;
885 pte_alloc = srmmu_pte_alloc;
886 pmd_free = srmmu_pmd_free;
887 pmd_alloc = srmmu_pmd_alloc;
888 pgd_free = srmmu_pgd_free;
889 pgd_alloc = srmmu_pgd_alloc;
890
891 pte_read = srmmu_pte_read;
892 pte_write = srmmu_pte_write;
893 pte_exec = srmmu_pte_exec;
894 pte_dirty = srmmu_pte_dirty;
895 pte_young = srmmu_pte_young;
896 pte_cow = srmmu_pte_cow;
897 pte_wrprotect = srmmu_pte_wrprotect;
898 pte_rdprotect = srmmu_pte_rdprotect;
899 pte_exprotect = srmmu_pte_exprotect;
900 pte_mkclean = srmmu_pte_mkclean;
901 pte_mkold = srmmu_pte_mkold;
902 pte_uncow = srmmu_pte_uncow;
903 pte_mkwrite = srmmu_pte_mkwrite;
904 pte_mkread = srmmu_pte_mkread;
905 pte_mkexec = srmmu_pte_mkexec;
906 pte_mkdirty = srmmu_pte_mkdirty;
907 pte_mkyoung = srmmu_pte_mkyoung;
908 pte_mkcow = srmmu_pte_mkcow;
909
910 return;
911 }
912