This source file includes following definitions.
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_virt_to_phys
- srmmu_phys_to_virt
- srmmu_vmalloc_start
- srmmu_pmd_page
- srmmu_pgd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_inuse
- srmmu_pte_clear
- srmmu_pte_reuse
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_inuse
- srmmu_pmd_clear
- srmmu_pmd_reuse
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_inuse
- srmmu_pgd_clear
- srmmu_pgd_reuse
- srmmu_pte_read
- srmmu_pte_write
- srmmu_pte_exec
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_cow
- srmmu_pte_wrprotect
- srmmu_pte_rdprotect
- srmmu_pte_exprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_uncow
- srmmu_pte_mkwrite
- srmmu_pte_mkread
- srmmu_pte_mkexec
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_pte_mkcow
- srmmu_mk_pte
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- srmmu_invalidate
- srmmu_set_pte
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_init_twalk
- srmmu_init_alloc
- srmmu_patch_fhandlers
- srmmu_paging_init
- srmmu_test_wp
- ld_mmu_srmmu
1
2
3
4
5
6
7 #include <linux/kernel.h>
8
9 #include <asm/page.h>
10 #include <asm/pgtable.h>
11 #include <asm/kdebug.h>
12 #include <asm/vaddrs.h>
13 #include <asm/traps.h>
14 #include <asm/mp.h>
15 #include <asm/cache.h>
16 #include <asm/oplib.h>
17
18 extern unsigned long free_area_init(unsigned long, unsigned long);
19
20 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
21 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
22
23
24
25
26
27
28
29
30
31
32 static inline unsigned int
33 srmmu_virt_to_phys(unsigned int vaddr)
34 {
35 unsigned int paddr = 0;
36 unsigned int voff = (vaddr - PAGE_OFFSET);
37 int i;
38
39 for(i=0; sp_banks[i].num_bytes != 0; i++) {
40 if(voff < paddr + sp_banks[i].num_bytes) {
41
42 return sp_banks[i].base_addr + voff - paddr;
43 } else
44 paddr += sp_banks[i].num_bytes;
45 }
46
47 printk("srmmu_virt_to_phys: SRMMU virt to phys translation failed, halting\n");
48 halt();
49 }
50
51 static inline unsigned long
52 srmmu_phys_to_virt(unsigned long paddr)
53 {
54 int i;
55 unsigned long offset = PAGE_OFFSET;
56
57 for (i=0; sp_banks[i].num_bytes != 0; i++)
58 {
59 if (paddr >= sp_banks[i].base_addr &&
60 paddr < (sp_banks[i].base_addr
61 + sp_banks[i].num_bytes)) {
62 return (paddr - sp_banks[i].base_addr) + offset;
63 } else
64 offset += sp_banks[i].num_bytes;
65 }
66 printk("srmmu_phys_to_virt: Could not make translation, halting...\n");
67 halt();
68 }
69
70 unsigned long
71 srmmu_vmalloc_start(void)
72 {
73 return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
74 }
75
76 unsigned long
77 srmmu_pmd_page(pmd_t pmd)
78 {
79 unsigned long page;
80
81 page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
82 return srmmu_phys_to_virt(page);
83 }
84
85 unsigned long
86 srmmu_pgd_page(pgd_t pgd)
87 {
88 unsigned long page;
89
90 page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
91 return srmmu_phys_to_virt(page);
92 }
93
94 unsigned long
95 srmmu_pte_page(pte_t pte)
96 {
97 unsigned long page;
98
99 page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
100 printk("srmmu_pte_page: page = %08lx\n", page);
101 return srmmu_phys_to_virt(page);
102 }
103
104 int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
105 int srmmu_pte_present(pte_t pte) { return pte_val(pte) & SRMMU_ET_PTE; }
106 int srmmu_pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)] != 1; }
107 void srmmu_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
108 void srmmu_pte_reuse(pte_t *ptep)
109 {
110 if(!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
111 mem_map[MAP_NR(ptep)]++;
112 }
113
114 int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
115 int srmmu_pmd_bad(pmd_t pmd)
116 {
117 return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
118 (srmmu_pmd_page(pmd) > high_memory);
119 }
120
121 int srmmu_pmd_present(pmd_t pmd) { return pmd_val(pmd) & SRMMU_ET_PTD; }
122 int srmmu_pmd_inuse(pmd_t *pmdp) { return mem_map[MAP_NR(pmdp)] != 1; }
123 void srmmu_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
124 void srmmu_pmd_reuse(pmd_t * pmdp)
125 {
126 if (!(mem_map[MAP_NR(pmdp)] & MAP_PAGE_RESERVED))
127 mem_map[MAP_NR(pmdp)]++;
128 }
129
130 int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
131 int srmmu_pgd_bad(pgd_t pgd)
132 {
133 return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
134 (srmmu_pgd_page(pgd) > high_memory);
135 }
136 int srmmu_pgd_present(pgd_t pgd) { return pgd_val(pgd) & SRMMU_ET_PTD; }
137 int srmmu_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)] != 1; }
138 void srmmu_pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
139 void srmmu_pgd_reuse(pgd_t *pgdp)
140 {
141 if (!(mem_map[MAP_NR(pgdp)] & MAP_PAGE_RESERVED))
142 mem_map[MAP_NR(pgdp)]++;
143 }
144
145
146
147
148
149 int srmmu_pte_read(pte_t pte) { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
150 int srmmu_pte_write(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
151 int srmmu_pte_exec(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
152 int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
153 int srmmu_pte_young(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_REF; }
154 int srmmu_pte_cow(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_COW; }
155
156
157
158
159 pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
160 pte_t srmmu_pte_rdprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
161 pte_t srmmu_pte_exprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
162 pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
163 pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
164 pte_t srmmu_pte_uncow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
165 pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
166 pte_t srmmu_pte_mkread(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
167 pte_t srmmu_pte_mkexec(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
168 pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
169 pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
170 pte_t srmmu_pte_mkcow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
171
172
173
174
175
176 pte_t
177 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
178 {
179 pte_t pte;
180
181 if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
182 page = (srmmu_virt_to_phys(page) >> SRMMU_PTE_PPN_PADDR_SHIFT);
183 pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
184 pte_val(pte) |= pgprot_val(pgprot);
185 return pte;
186 }
187
188 void
189 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
190 {
191 unsigned long page = (unsigned long) pmdp;
192
193 page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
194
195 pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
196 }
197
198 void
199 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
200 {
201 unsigned long page = (unsigned long) ptep;
202
203 page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
204
205 pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
206 }
207
208 pte_t
209 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
210 {
211 pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
212 return pte;
213 }
214
215
216 pgd_t *
217 srmmu_pgd_offset(struct task_struct * tsk, unsigned long address)
218 {
219 return ((pgd_t *) tsk->tss.pgd_ptr) +
220 ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
221 }
222
223
224 pmd_t *
225 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
226 {
227 return ((pmd_t *) pgd_page(*dir)) +
228 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
229 }
230
231
232 pte_t *
233 srmmu_pte_offset(pmd_t * dir, unsigned long address)
234 {
235 return ((pte_t *) pmd_page(*dir)) +
236 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
237 }
238
239
240 void
241 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
242 {
243
244 if(tsk->tss.context != -1) {
245 pgd_t *ctable_ptr = 0;
246 ctable_ptr = (pgd_t *) srmmu_phys_to_virt(srmmu_get_ctable_ptr());
247 ctable_ptr += tsk->tss.context;
248 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
249
250 srmmu_flush_whole_tlb();
251 }
252
253 tsk->tss.pgd_ptr = (unsigned long) pgdir;
254
255 return;
256 }
257
258
259
260
261
262
263 void
264 srmmu_pte_free_kernel(pte_t *pte)
265 {
266 mem_map[MAP_NR(pte)] = 1;
267 free_page((unsigned long) pte);
268 }
269
270 pte_t *
271 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
272 {
273 pte_t *page;
274
275 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
276 if (srmmu_pmd_none(*pmd)) {
277 page = (pte_t *) get_free_page(GFP_KERNEL);
278 if (srmmu_pmd_none(*pmd)) {
279 if (page) {
280 srmmu_pmd_set(pmd, page);
281 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
282 return page + address;
283 }
284 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
285 return NULL;
286 }
287 free_page((unsigned long) page);
288 }
289 if (srmmu_pmd_bad(*pmd)) {
290 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
291 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
292 return NULL;
293 }
294 return (pte_t *) srmmu_pmd_page(*pmd) + address;
295 }
296
297
298 void
299 srmmu_pmd_free_kernel(pmd_t *pmd)
300 {
301 mem_map[MAP_NR(pmd)] = 1;
302 free_page((unsigned long) pmd);
303 }
304
305 pmd_t *
306 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
307 {
308 pmd_t *page;
309
310 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
311 if (srmmu_pgd_none(*pgd)) {
312 page = (pmd_t *) get_free_page(GFP_KERNEL);
313 if (srmmu_pgd_none(*pgd)) {
314 if (page) {
315 srmmu_pgd_set(pgd, page);
316 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
317 return page + address;
318 }
319 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
320 return NULL;
321 }
322 free_page((unsigned long) page);
323 }
324 if (srmmu_pgd_bad(*pgd)) {
325 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
326 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
327 return NULL;
328 }
329 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
330 }
331
332 void
333 srmmu_pte_free(pte_t *pte)
334 {
335 free_page((unsigned long) pte);
336 }
337
338 pte_t *
339 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
340 {
341 pte_t *page;
342
343 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
344 if (srmmu_pmd_none(*pmd)) {
345 page = (pte_t *) get_free_page(GFP_KERNEL);
346 if (srmmu_pmd_none(*pmd)) {
347 if (page) {
348 srmmu_pmd_set(pmd, page);
349 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
350 return page + address;
351 }
352 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
353 return NULL;
354 }
355 free_page((unsigned long) page);
356 }
357 if (srmmu_pmd_bad(*pmd)) {
358 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
359 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
360 return NULL;
361 }
362 return (pte_t *) srmmu_pmd_page(*pmd) + address;
363 }
364
365
366
367
368
369 void
370 srmmu_pmd_free(pmd_t * pmd)
371 {
372 free_page((unsigned long) pmd);
373 }
374
375 pmd_t *
376 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
377 {
378 pmd_t *page;
379
380 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
381 if (srmmu_pgd_none(*pgd)) {
382 page = (pmd_t *) get_free_page(GFP_KERNEL);
383 if (srmmu_pgd_none(*pgd)) {
384 if (page) {
385 srmmu_pgd_set(pgd, page);
386 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
387 return page + address;
388 }
389 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
390 return NULL;
391 }
392 free_page((unsigned long) page);
393 }
394 if (srmmu_pgd_bad(*pgd)) {
395 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
396 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
397 return NULL;
398 }
399 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
400 }
401
402 void
403 srmmu_pgd_free(pgd_t *pgd)
404 {
405 free_page((unsigned long) pgd);
406 }
407
408
409
410
411
412 pgd_t *
413 srmmu_pgd_alloc(void)
414 {
415 return (pgd_t *) get_free_page(GFP_KERNEL);
416 }
417
418
419
420
421
422
423 void
424 srmmu_invalidate(void)
425 {
426 srmmu_flush_whole_tlb();
427 return;
428 }
429
430 void
431 srmmu_set_pte(pte_t *ptep, pte_t entry)
432 {
433
434 *ptep = entry;
435 }
436
437
438 void
439 srmmu_switch_to_context(int context)
440 {
441 printk("switching to context %d\n", context);
442
443 return;
444 }
445
446
447
448
449
450
451
452
453
454
455 void
456 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
457 int bus_type, int rdonly)
458 {
459 pgd_t *pgdp;
460 pmd_t *pmdp;
461 pte_t *ptep;
462
463 pgdp = srmmu_pgd_offset(&init_task, virt_addr);
464 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
465 ptep = srmmu_pte_offset(pmdp, virt_addr);
466 pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
467
468 if(!rdonly)
469 pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
470 else
471 pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
472
473 pte_val(*ptep) |= (bus_type << 28);
474 pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK);
475 srmmu_flush_whole_tlb();
476 flush_ei_ctx(0x0);
477
478 return;
479 }
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503 unsigned int
504 srmmu_init_twalk(unsigned virt, int trace)
505 {
506 unsigned int wh, root;
507
508 root = (unsigned int) srmmu_get_ctable_ptr();
509 if(trace) printk(":0x%x >> ", virt);
510
511 if(trace) printk(" 0x%x :", root);
512 wh = ldw_sun4m_bypass(root);
513 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
514 if(trace) printk("\n");
515 return 0;
516 }
517 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
518 wh &= ~SRMMU_PTE_ET_MASK;
519 wh |= 0x3;
520 if(trace) printk("\n");
521 printk("AIEEE context table level pte prom mapping!\n");
522 prom_halt();
523 return 0;
524 }
525
526 if(trace) printk(" 0x%x .", wh);
527 wh = ldw_sun4m_bypass(
528 ((wh & SRMMU_PTD_PTP_MASK) << 4)
529 + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
530
531 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
532 if(trace) printk("\n");
533 return 0;
534 }
535 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
536 wh &= ~SRMMU_PTE_ET_MASK;
537 if(trace) printk("\n");
538 return wh;
539 }
540
541 if(trace) printk(" 0x%x .", wh);
542 wh = ldw_sun4m_bypass(
543 ((wh & SRMMU_PTD_PTP_MASK) << 4)
544 + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
545 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
546 if(trace) printk("\n");
547 return 0;
548 }
549 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
550 wh &= ~SRMMU_PTE_ET_MASK;
551 wh |= 0x1;
552 if(trace) printk("\n");
553 return wh;
554 }
555
556 if(trace) printk(" 0x%x .", wh);
557 wh = ldw_sun4m_bypass(
558 ((wh & SRMMU_PTD_PTP_MASK) << 4)
559 + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
560 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
561 if(trace) printk("\n");
562 return 0;
563 }
564 if(trace) printk(" 0x%x\n", wh);
565 return wh;
566 }
567
568
569
570
571
572
573
574
575
576 static void *
577 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
578 {
579 register unsigned mask = size - 1;
580 register unsigned long ret;
581
582 if(size==0) return 0x0;
583 if(size & mask) {
584 printk("panic: srmmu_init_alloc botch\n");
585 prom_halt();
586 }
587 ret = (*kbrk + mask) & ~mask;
588 *kbrk = ret + size;
589 memset((void*) ret, 0, size);
590 return (void*) ret;
591 }
592
593 extern unsigned long srmmu_data_fault, srmmu_text_fault;
594
595
596 void
597 srmmu_patch_fhandlers(void)
598 {
599
600 sparc_ttable[SP_TRAP_TFLT].inst_one = SPARC_MOV_CONST_L3(0x1);
601 sparc_ttable[SP_TRAP_TFLT].inst_two =
602 SPARC_BRANCH((unsigned long) &srmmu_text_fault,
603 (unsigned long) &sparc_ttable[SP_TRAP_TFLT].inst_two);
604 sparc_ttable[SP_TRAP_TFLT].inst_three = SPARC_RD_PSR_L0;
605 sparc_ttable[SP_TRAP_TFLT].inst_four = SPARC_NOP;
606
607 sparc_ttable[SP_TRAP_DFLT].inst_one = SPARC_MOV_CONST_L3(0x9);
608 sparc_ttable[SP_TRAP_DFLT].inst_two =
609 SPARC_BRANCH((unsigned long) &srmmu_data_fault,
610 (unsigned long) &sparc_ttable[SP_TRAP_DFLT].inst_two);
611 sparc_ttable[SP_TRAP_DFLT].inst_three = SPARC_RD_PSR_L0;
612 sparc_ttable[SP_TRAP_DFLT].inst_four = SPARC_NOP;
613
614 return;
615 }
616
617
618
619
620
621
622
623 static unsigned long mempool;
624
625
626
627
628 pgd_t *lnx_root;
629
630 extern char start[];
631
632 unsigned long
633 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
634 {
635 unsigned long vaddr;
636 int i;
637
638 pte_t *ptep = 0;
639 pmd_t *pmdp = 0;
640 pgd_t *pgdp = 0;
641
642 mempool = start_mem;
643 lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
644
645 memset(swapper_pg_dir, 0, PAGE_SIZE);
646
647
648
649
650 pmdp = (pmd_t *) swapper_pg_dir;
651 for(i = 0; i < num_contexts; i++)
652 srmmu_pgd_set(&lnx_root[i], pmdp);
653
654
655 for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
656 pgdp = srmmu_pgd_offset(&init_task, vaddr);
657 if(srmmu_pgd_none(*pgdp)) {
658 pmdp = srmmu_init_alloc(&mempool,
659 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
660 srmmu_pgd_set(pgdp, pmdp);
661 }
662
663 pmdp = srmmu_pmd_offset(pgdp, vaddr);
664 if(srmmu_pmd_none(*pmdp)) {
665 ptep = srmmu_init_alloc(&mempool,
666 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
667 srmmu_pmd_set(pmdp, ptep);
668 }
669
670 ptep = srmmu_pte_offset(pmdp, vaddr);
671 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
672 }
673
674
675 for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
676 vaddr += SRMMU_PMD_SIZE) {
677 pgdp = srmmu_pgd_offset(&init_task, vaddr);
678 if(srmmu_pgd_none(*pgdp)) {
679 pmdp = srmmu_init_alloc(&mempool,
680 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
681 srmmu_pgd_set(pgdp, pmdp);
682 }
683 pmdp = srmmu_pmd_offset(pgdp, vaddr);
684 if(srmmu_pmd_none(*pmdp)) {
685 ptep = srmmu_init_alloc(&mempool,
686 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
687 srmmu_pmd_set(pmdp, ptep);
688 }
689 }
690
691
692 printk("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
693 (PERCPU_VADDR + PERCPU_LEN));
694 for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
695 vaddr += PERCPU_ENTSIZE) {
696 pgdp = srmmu_pgd_offset(&init_task, vaddr);
697 if(srmmu_pgd_none(*pgdp)) {
698 pmdp = srmmu_init_alloc(&mempool,
699 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
700 srmmu_pgd_set(pgdp, pmdp);
701 }
702 pmdp = srmmu_pmd_offset(pgdp, vaddr);
703 if(srmmu_pmd_none(*pmdp)) {
704 ptep = srmmu_init_alloc(&mempool,
705 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
706 srmmu_pmd_set(pmdp, ptep);
707 }
708 ptep = srmmu_pte_offset(pmdp, vaddr);
709
710 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
711
712 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
713 SRMMU_PAGE_KERNEL);
714
715 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
716 SRMMU_PAGE_KERNEL);
717
718 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
719 SRMMU_PAGE_KERNEL);
720 }
721 percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
722
723
724
725
726
727
728 for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
729 unsigned int prom_pte;
730
731 prom_pte = srmmu_init_twalk(vaddr, 0);
732
733 if(prom_pte) {
734 pgdp = srmmu_pgd_offset(&init_task, vaddr);
735 if((prom_pte&0x3) == 0x0) {
736 prom_pte &= ~0x3;
737 prom_pte |= SRMMU_ET_PTE;
738 pgd_val(*pgdp) = prom_pte;
739 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
740 continue;
741 }
742 if(srmmu_pgd_none(*pgdp)) {
743 pmdp = srmmu_init_alloc(&mempool,
744 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
745 srmmu_pgd_set(pgdp, pmdp);
746 }
747
748 pmdp = srmmu_pmd_offset(pgdp, vaddr);
749 if((prom_pte&0x3) == 0x1) {
750 prom_pte &= ~0x3;
751 prom_pte |= SRMMU_ET_PTE;
752 pgd_val(*pgdp) = prom_pte;
753 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
754 continue;
755 }
756 if(srmmu_pmd_none(*pmdp)) {
757 ptep = srmmu_init_alloc(&mempool,
758 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
759 srmmu_pmd_set(pmdp, ptep);
760 }
761
762 ptep = srmmu_pte_offset(pmdp, vaddr);
763 pte_val(*ptep) = prom_pte;
764
765 }
766 vaddr += PAGE_SIZE;
767 }
768
769
770
771
772
773
774
775 printk("Taking over MMU from PROM.\n");
776
777 srmmu_set_ctable_ptr(srmmu_virt_to_phys((unsigned)lnx_root));
778
779 srmmu_flush_whole_tlb();
780
781
782 start_mem = PAGE_ALIGN(mempool);
783 start_mem = free_area_init(start_mem, end_mem);
784 start_mem = PAGE_ALIGN(start_mem);
785
786 #if 0
787 printk("Testing context switches...\n");
788 for(i=0; i<num_contexts; i++)
789 srmmu_set_context(i);
790 printk("done...\n");
791 srmmu_set_context(0);
792 #endif
793
794 printk("survived...\n");
795 return start_mem;
796 }
797
798
799 void
800 srmmu_test_wp(void)
801 {
802 pgd_t *pgdp;
803 pmd_t *pmdp;
804 pte_t *ptep;
805
806 wp_works_ok = -1;
807
808
809
810
811
812
813 __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
814 if (wp_works_ok < 0)
815 wp_works_ok = 0;
816
817 pgdp = srmmu_pgd_offset(&init_task, 0x0);
818 pgd_val(*pgdp) = 0x0;
819
820 return;
821 }
822
823
824 void
825 ld_mmu_srmmu(void)
826 {
827 printk("Loading srmmu MMU routines\n");
828
829
830 pmd_shift = SRMMU_PMD_SHIFT;
831 pmd_size = SRMMU_PMD_SIZE;
832 pmd_mask = SRMMU_PMD_MASK;
833 pgdir_shift = SRMMU_PGDIR_SHIFT;
834 pgdir_size = SRMMU_PGDIR_SIZE;
835 pgdir_mask = SRMMU_PGDIR_MASK;
836
837 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
838 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
839 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
840
841 page_none = SRMMU_PAGE_NONE;
842 page_shared = SRMMU_PAGE_SHARED;
843 page_copy = SRMMU_PAGE_COPY;
844 page_readonly = SRMMU_PAGE_READONLY;
845 page_kernel = SRMMU_PAGE_KERNEL;
846 page_invalid = SRMMU_PAGE_INVALID;
847
848
849 invalidate = srmmu_invalidate;
850 set_pte = srmmu_set_pte;
851 switch_to_context = srmmu_switch_to_context;
852 pmd_align = srmmu_pmd_align;
853 pgdir_align = srmmu_pgdir_align;
854 vmalloc_start = srmmu_vmalloc_start;
855
856 pte_page = srmmu_pte_page;
857 pmd_page = srmmu_pmd_page;
858 pgd_page = srmmu_pgd_page;
859
860 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
861
862 pte_none = srmmu_pte_none;
863 pte_present = srmmu_pte_present;
864 pte_inuse = srmmu_pte_inuse;
865 pte_clear = srmmu_pte_clear;
866 pte_reuse = srmmu_pte_reuse;
867
868 pmd_none = srmmu_pmd_none;
869 pmd_bad = srmmu_pmd_bad;
870 pmd_present = srmmu_pmd_present;
871 pmd_inuse = srmmu_pmd_inuse;
872 pmd_clear = srmmu_pmd_clear;
873 pmd_reuse = srmmu_pmd_reuse;
874
875 pgd_none = srmmu_pgd_none;
876 pgd_bad = srmmu_pgd_bad;
877 pgd_present = srmmu_pgd_present;
878 pgd_inuse = srmmu_pgd_inuse;
879 pgd_clear = srmmu_pgd_clear;
880 pgd_reuse = srmmu_pgd_reuse;
881
882 mk_pte = srmmu_mk_pte;
883 pgd_set = srmmu_pgd_set;
884 pte_modify = srmmu_pte_modify;
885 pgd_offset = srmmu_pgd_offset;
886 pmd_offset = srmmu_pmd_offset;
887 pte_offset = srmmu_pte_offset;
888 pte_free_kernel = srmmu_pte_free_kernel;
889 pmd_free_kernel = srmmu_pmd_free_kernel;
890 pte_alloc_kernel = srmmu_pte_alloc_kernel;
891 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
892 pte_free = srmmu_pte_free;
893 pte_alloc = srmmu_pte_alloc;
894 pmd_free = srmmu_pmd_free;
895 pmd_alloc = srmmu_pmd_alloc;
896 pgd_free = srmmu_pgd_free;
897 pgd_alloc = srmmu_pgd_alloc;
898
899 pte_read = srmmu_pte_read;
900 pte_write = srmmu_pte_write;
901 pte_exec = srmmu_pte_exec;
902 pte_dirty = srmmu_pte_dirty;
903 pte_young = srmmu_pte_young;
904 pte_cow = srmmu_pte_cow;
905 pte_wrprotect = srmmu_pte_wrprotect;
906 pte_rdprotect = srmmu_pte_rdprotect;
907 pte_exprotect = srmmu_pte_exprotect;
908 pte_mkclean = srmmu_pte_mkclean;
909 pte_mkold = srmmu_pte_mkold;
910 pte_uncow = srmmu_pte_uncow;
911 pte_mkwrite = srmmu_pte_mkwrite;
912 pte_mkread = srmmu_pte_mkread;
913 pte_mkexec = srmmu_pte_mkexec;
914 pte_mkdirty = srmmu_pte_mkdirty;
915 pte_mkyoung = srmmu_pte_mkyoung;
916 pte_mkcow = srmmu_pte_mkcow;
917
918 return;
919 }
920