This source file includes following definitions.
- srmmu_pmd_align
- srmmu_pgdir_align
- srmmu_vmalloc_start
- srmmu_pmd_page
- srmmu_pgd_page
- srmmu_pte_page
- srmmu_pte_none
- srmmu_pte_present
- srmmu_pte_inuse
- srmmu_pte_clear
- srmmu_pte_reuse
- srmmu_pmd_none
- srmmu_pmd_bad
- srmmu_pmd_present
- srmmu_pmd_inuse
- srmmu_pmd_clear
- srmmu_pmd_reuse
- srmmu_pgd_none
- srmmu_pgd_bad
- srmmu_pgd_present
- srmmu_pgd_inuse
- srmmu_pgd_clear
- srmmu_pgd_reuse
- srmmu_pte_read
- srmmu_pte_write
- srmmu_pte_exec
- srmmu_pte_dirty
- srmmu_pte_young
- srmmu_pte_cow
- srmmu_pte_wrprotect
- srmmu_pte_rdprotect
- srmmu_pte_exprotect
- srmmu_pte_mkclean
- srmmu_pte_mkold
- srmmu_pte_uncow
- srmmu_pte_mkwrite
- srmmu_pte_mkread
- srmmu_pte_mkexec
- srmmu_pte_mkdirty
- srmmu_pte_mkyoung
- srmmu_pte_mkcow
- srmmu_mk_pte
- srmmu_pgd_set
- srmmu_pmd_set
- srmmu_pte_modify
- srmmu_pgd_offset
- srmmu_pmd_offset
- srmmu_pte_offset
- srmmu_update_rootmmu_dir
- srmmu_pte_free_kernel
- srmmu_pte_alloc_kernel
- srmmu_pmd_free_kernel
- srmmu_pmd_alloc_kernel
- srmmu_pte_free
- srmmu_pte_alloc
- srmmu_pmd_free
- srmmu_pmd_alloc
- srmmu_pgd_free
- srmmu_pgd_alloc
- srmmu_invalidate
- srmmu_set_pte
- srmmu_switch_to_context
- srmmu_mapioaddr
- srmmu_lockarea
- srmmu_unlockarea
- srmmu_get_scsi_buffer
- srmmu_release_scsi_buffer
- srmmu_init_twalk
- srmmu_init_alloc
- srmmu_get_fault_info
- srmmu_paging_init
- srmmu_test_wp
- srmmu_update_mmu_cache
- srmmu_fork_hook
- srmmu_exit_hook
- srmmu_release_hook
- srmmu_flush_hook
- srmmu_task_cacheflush
- ld_mmu_srmmu
1
2
3
4
5
6
7
8 #include <linux/kernel.h>
9
10 #include <asm/page.h>
11 #include <asm/pgtable.h>
12 #include <asm/kdebug.h>
13 #include <asm/vaddrs.h>
14 #include <asm/traps.h>
15 #include <asm/mp.h>
16 #include <asm/cache.h>
17 #include <asm/oplib.h>
18
19 extern unsigned long free_area_init(unsigned long, unsigned long);
20
21 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
22 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
23
24 unsigned long
25 srmmu_vmalloc_start(void)
26 {
27 return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
28 }
29
30 unsigned long
31 srmmu_pmd_page(pmd_t pmd)
32 {
33 unsigned long page;
34
35 page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
36 return (page + PAGE_OFFSET);
37 }
38
39 unsigned long
40 srmmu_pgd_page(pgd_t pgd)
41 {
42 unsigned long page;
43
44 page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
45 return (page + PAGE_OFFSET);
46 }
47
48 unsigned long
49 srmmu_pte_page(pte_t pte)
50 {
51 unsigned long page;
52
53 page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
54 return (page + PAGE_OFFSET);
55 }
56
57 int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
58 int srmmu_pte_present(pte_t pte) { return pte_val(pte) & SRMMU_ET_PTE; }
59 int srmmu_pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
60 void srmmu_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
61 void srmmu_pte_reuse(pte_t *ptep)
62 {
63 if(!mem_map[MAP_NR(ptep)].reserved)
64 mem_map[MAP_NR(ptep)].count++;
65 }
66
67 int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
68 int srmmu_pmd_bad(pmd_t pmd)
69 {
70 return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
71 (srmmu_pmd_page(pmd) > high_memory);
72 }
73
74 int srmmu_pmd_present(pmd_t pmd) { return pmd_val(pmd) & SRMMU_ET_PTD; }
75 int srmmu_pmd_inuse(pmd_t *pmdp) { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
76 void srmmu_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
77 void srmmu_pmd_reuse(pmd_t * pmdp)
78 {
79 if (!mem_map[MAP_NR(pmdp)].reserved)
80 mem_map[MAP_NR(pmdp)].count++;
81 }
82
83 int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
84 int srmmu_pgd_bad(pgd_t pgd)
85 {
86 return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
87 (srmmu_pgd_page(pgd) > high_memory);
88 }
89 int srmmu_pgd_present(pgd_t pgd) { return pgd_val(pgd) & SRMMU_ET_PTD; }
90 int srmmu_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
91 void srmmu_pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
92 void srmmu_pgd_reuse(pgd_t *pgdp)
93 {
94 if (!mem_map[MAP_NR(pgdp)].reserved)
95 mem_map[MAP_NR(pgdp)].count++;
96 }
97
98
99
100
101
102 int srmmu_pte_read(pte_t pte) { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
103 int srmmu_pte_write(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
104 int srmmu_pte_exec(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
105 int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
106 int srmmu_pte_young(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_REF; }
107 int srmmu_pte_cow(pte_t pte) { return pte_val(pte) & _SRMMU_PAGE_COW; }
108
109
110
111
112 pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
113 pte_t srmmu_pte_rdprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
114 pte_t srmmu_pte_exprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
115 pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
116 pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
117 pte_t srmmu_pte_uncow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
118 pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
119 pte_t srmmu_pte_mkread(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
120 pte_t srmmu_pte_mkexec(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
121 pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
122 pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
123 pte_t srmmu_pte_mkcow(pte_t pte) { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
124
125
126
127
128
129 pte_t
130 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
131 {
132 pte_t pte;
133
134 if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
135 page = ((page - PAGE_OFFSET) >> SRMMU_PTE_PPN_PADDR_SHIFT);
136 pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
137 pte_val(pte) |= pgprot_val(pgprot);
138 return pte;
139 }
140
141 void
142 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
143 {
144 unsigned long page = (unsigned long) pmdp;
145
146 page = ((page - PAGE_OFFSET) >> SRMMU_PTD_PTP_PADDR_SHIFT);
147
148 pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
149 }
150
151 void
152 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
153 {
154 unsigned long page = (unsigned long) ptep;
155
156 page = ((page - PAGE_OFFSET) >> SRMMU_PTD_PTP_PADDR_SHIFT);
157
158 pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
159 }
160
161 pte_t
162 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
163 {
164 pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
165 return pte;
166 }
167
168
169 pgd_t *
170 srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
171 {
172 return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
173 }
174
175
176 pmd_t *
177 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
178 {
179 return ((pmd_t *) pgd_page(*dir)) +
180 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
181 }
182
183
184 pte_t *
185 srmmu_pte_offset(pmd_t * dir, unsigned long address)
186 {
187 return ((pte_t *) pmd_page(*dir)) +
188 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
189 }
190
191
192 void
193 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
194 {
195
196 if(tsk->tss.context != -1) {
197 pgd_t *ctable_ptr = 0;
198 ctable_ptr = (pgd_t *) (srmmu_get_ctable_ptr() + PAGE_OFFSET);
199 ctable_ptr += tsk->tss.context;
200 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
201
202 srmmu_flush_whole_tlb();
203 }
204
205 tsk->tss.pgd_ptr = (unsigned long) pgdir;
206
207 return;
208 }
209
210
211
212
213
214
215 void
216 srmmu_pte_free_kernel(pte_t *pte)
217 {
218 mem_map[MAP_NR(pte)].reserved = 0;
219 free_page((unsigned long) pte);
220 }
221
222 pte_t *
223 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
224 {
225 pte_t *page;
226
227 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
228 if (srmmu_pmd_none(*pmd)) {
229 page = (pte_t *) get_free_page(GFP_KERNEL);
230 if (srmmu_pmd_none(*pmd)) {
231 if (page) {
232 srmmu_pmd_set(pmd, page);
233 mem_map[MAP_NR(page)].reserved = 1;
234 return page + address;
235 }
236 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
237 return NULL;
238 }
239 free_page((unsigned long) page);
240 }
241 if (srmmu_pmd_bad(*pmd)) {
242 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
243 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
244 return NULL;
245 }
246 return (pte_t *) srmmu_pmd_page(*pmd) + address;
247 }
248
249
250 void
251 srmmu_pmd_free_kernel(pmd_t *pmd)
252 {
253 mem_map[MAP_NR(pmd)].reserved = 0;
254 free_page((unsigned long) pmd);
255 }
256
257 pmd_t *
258 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
259 {
260 pmd_t *page;
261
262 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
263 if (srmmu_pgd_none(*pgd)) {
264 page = (pmd_t *) get_free_page(GFP_KERNEL);
265 if (srmmu_pgd_none(*pgd)) {
266 if (page) {
267 srmmu_pgd_set(pgd, page);
268 mem_map[MAP_NR(page)].reserved = 1;
269 return page + address;
270 }
271 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
272 return NULL;
273 }
274 free_page((unsigned long) page);
275 }
276 if (srmmu_pgd_bad(*pgd)) {
277 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
278 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
279 return NULL;
280 }
281 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
282 }
283
284 void
285 srmmu_pte_free(pte_t *pte)
286 {
287 free_page((unsigned long) pte);
288 }
289
290 pte_t *
291 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
292 {
293 pte_t *page;
294
295 address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
296 if (srmmu_pmd_none(*pmd)) {
297 page = (pte_t *) get_free_page(GFP_KERNEL);
298 if (srmmu_pmd_none(*pmd)) {
299 if (page) {
300 srmmu_pmd_set(pmd, page);
301 return page + address;
302 }
303 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
304 return NULL;
305 }
306 free_page((unsigned long) page);
307 }
308 if (srmmu_pmd_bad(*pmd)) {
309 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
310 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
311 return NULL;
312 }
313 return (pte_t *) srmmu_pmd_page(*pmd) + address;
314 }
315
316
317
318
319
320 void
321 srmmu_pmd_free(pmd_t * pmd)
322 {
323 free_page((unsigned long) pmd);
324 }
325
326 pmd_t *
327 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
328 {
329 pmd_t *page;
330
331 address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
332 if (srmmu_pgd_none(*pgd)) {
333 page = (pmd_t *) get_free_page(GFP_KERNEL);
334 if (srmmu_pgd_none(*pgd)) {
335 if (page) {
336 srmmu_pgd_set(pgd, page);
337 return page + address;
338 }
339 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
340 return NULL;
341 }
342 free_page((unsigned long) page);
343 }
344 if (srmmu_pgd_bad(*pgd)) {
345 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
346 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
347 return NULL;
348 }
349 return (pmd_t *) srmmu_pgd_page(*pgd) + address;
350 }
351
352 void
353 srmmu_pgd_free(pgd_t *pgd)
354 {
355 free_page((unsigned long) pgd);
356 }
357
358
359
360
361
362 pgd_t *
363 srmmu_pgd_alloc(void)
364 {
365 return (pgd_t *) get_free_page(GFP_KERNEL);
366 }
367
368
369
370
371
372
373 void
374 srmmu_invalidate(void)
375 {
376 srmmu_flush_whole_tlb();
377 return;
378 }
379
380
381 void srmmu_set_pte(pte_t *ptep, pte_t pteval)
382 {
383
384 *ptep = pteval;
385 }
386
387
388 void
389 srmmu_switch_to_context(void *vtask)
390 {
391 struct task_struct *tsk = vtask;
392 printk("switching to context %d\n", tsk->tss.context);
393
394 return;
395 }
396
397
398
399
400
401
402
403
404
405
406 void
407 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
408 int bus_type, int rdonly)
409 {
410 pgd_t *pgdp;
411 pmd_t *pmdp;
412 pte_t *ptep;
413
414 pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
415 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
416 ptep = srmmu_pte_offset(pmdp, virt_addr);
417 pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
418
419 if(!rdonly)
420 pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
421 else
422 pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
423
424 pte_val(*ptep) |= (bus_type << 28);
425 pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK);
426 srmmu_flush_whole_tlb();
427 flush_ei_ctx(0x0);
428
429 return;
430 }
431
432 char *srmmu_lockarea(char *vaddr, unsigned long len)
433 {
434 return vaddr;
435 }
436
437 void srmmu_unlockarea(char *vaddr, unsigned long len)
438 {
439 }
440
441 char *srmmu_get_scsi_buffer(char *vaddr, unsigned long len)
442 {
443 panic("sun4m: get_scsi_buffer() not implemented yet.");
444 }
445
446 void srmmu_release_scsi_buffer(char *vaddr, unsigned long len)
447 {
448 panic("sun4m: release_scsi_buffer() not implemented yet.");
449 }
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473 unsigned int
474 srmmu_init_twalk(unsigned virt, int trace)
475 {
476 unsigned int wh, root;
477
478 root = (unsigned int) srmmu_get_ctable_ptr();
479 if(trace) printk(":0x%x >> ", virt);
480
481 if(trace) printk(" 0x%x :", root);
482 wh = ldw_sun4m_bypass(root);
483 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
484 if(trace) printk("\n");
485 return 0;
486 }
487 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
488 wh &= ~SRMMU_PTE_ET_MASK;
489 wh |= 0x3;
490 if(trace) printk("\n");
491 printk("AIEEE context table level pte prom mapping!\n");
492 prom_halt();
493 return 0;
494 }
495
496 if(trace) printk(" 0x%x .", wh);
497 wh = ldw_sun4m_bypass(
498 ((wh & SRMMU_PTD_PTP_MASK) << 4)
499 + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
500
501 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
502 if(trace) printk("\n");
503 return 0;
504 }
505 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
506 wh &= ~SRMMU_PTE_ET_MASK;
507 if(trace) printk("\n");
508 return wh;
509 }
510
511 if(trace) printk(" 0x%x .", wh);
512 wh = ldw_sun4m_bypass(
513 ((wh & SRMMU_PTD_PTP_MASK) << 4)
514 + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
515 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
516 if(trace) printk("\n");
517 return 0;
518 }
519 if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
520 wh &= ~SRMMU_PTE_ET_MASK;
521 wh |= 0x1;
522 if(trace) printk("\n");
523 return wh;
524 }
525
526 if(trace) printk(" 0x%x .", wh);
527 wh = ldw_sun4m_bypass(
528 ((wh & SRMMU_PTD_PTP_MASK) << 4)
529 + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
530 if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
531 if(trace) printk("\n");
532 return 0;
533 }
534 if(trace) printk(" 0x%x\n", wh);
535 return wh;
536 }
537
538
539
540
541
542
543
544
545
546 static void *
547 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
548 {
549 register unsigned mask = size - 1;
550 register unsigned long ret;
551
552 if(size==0) return 0x0;
553 if(size & mask) {
554 printk("panic: srmmu_init_alloc botch\n");
555 prom_halt();
556 }
557 ret = (*kbrk + mask) & ~mask;
558 *kbrk = ret + size;
559 memset((void*) ret, 0, size);
560 return (void*) ret;
561 }
562
563
564 int
565 srmmu_get_fault_info(unsigned long *address, unsigned long *error_code,
566 unsigned long from_user)
567 {
568
569 return 0;
570 }
571
572
573
574
575
576
577
578 static unsigned long mempool;
579
580
581
582
583 pgd_t *lnx_root;
584
585 extern char start[];
586
587 unsigned long
588 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
589 {
590 unsigned long vaddr;
591 int i;
592
593 pte_t *ptep = 0;
594 pmd_t *pmdp = 0;
595 pgd_t *pgdp = 0;
596
597 mempool = start_mem;
598 lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
599
600 memset(swapper_pg_dir, 0, PAGE_SIZE);
601
602
603
604
605 pmdp = (pmd_t *) swapper_pg_dir;
606 for(i = 0; i < num_contexts; i++)
607 srmmu_pgd_set(&lnx_root[i], pmdp);
608
609
610 for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
611 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
612 if(srmmu_pgd_none(*pgdp)) {
613 pmdp = srmmu_init_alloc(&mempool,
614 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
615 srmmu_pgd_set(pgdp, pmdp);
616 }
617
618 pmdp = srmmu_pmd_offset(pgdp, vaddr);
619 if(srmmu_pmd_none(*pmdp)) {
620 ptep = srmmu_init_alloc(&mempool,
621 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
622 srmmu_pmd_set(pmdp, ptep);
623 }
624
625 ptep = srmmu_pte_offset(pmdp, vaddr);
626 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
627 }
628
629
630 for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
631 vaddr += SRMMU_PMD_SIZE) {
632 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
633 if(srmmu_pgd_none(*pgdp)) {
634 pmdp = srmmu_init_alloc(&mempool,
635 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
636 srmmu_pgd_set(pgdp, pmdp);
637 }
638 pmdp = srmmu_pmd_offset(pgdp, vaddr);
639 if(srmmu_pmd_none(*pmdp)) {
640 ptep = srmmu_init_alloc(&mempool,
641 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
642 srmmu_pmd_set(pmdp, ptep);
643 }
644 }
645
646
647 for(vaddr = (DVMA_VADDR); vaddr < (DVMA_VADDR + DVMA_LEN);
648 vaddr += PAGE_SIZE) {
649 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
650 if(srmmu_pgd_none(*pgdp)) {
651 pmdp = srmmu_init_alloc(&mempool,
652 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
653 srmmu_pgd_set(pgdp, pmdp);
654 }
655 pmdp = srmmu_pmd_offset(pgdp, vaddr);
656 if(srmmu_pmd_none(*pmdp)) {
657 ptep = srmmu_init_alloc(&mempool,
658 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
659 srmmu_pmd_set(pmdp, ptep);
660 }
661
662 ptep = srmmu_pte_offset(pmdp, vaddr);
663 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE), SRMMU_PAGE_KERNEL);
664 pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK);
665 }
666 srmmu_flush_whole_tlb();
667 flush_ei_ctx(0x0);
668
669
670 #if 0
671 prom_printf("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
672 (PERCPU_VADDR + PERCPU_LEN));
673 #endif
674 for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
675 vaddr += PERCPU_ENTSIZE) {
676 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
677 if(srmmu_pgd_none(*pgdp)) {
678 pmdp = srmmu_init_alloc(&mempool,
679 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
680 srmmu_pgd_set(pgdp, pmdp);
681 }
682 pmdp = srmmu_pmd_offset(pgdp, vaddr);
683 if(srmmu_pmd_none(*pmdp)) {
684 ptep = srmmu_init_alloc(&mempool,
685 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
686 srmmu_pmd_set(pmdp, ptep);
687 }
688 ptep = srmmu_pte_offset(pmdp, vaddr);
689
690 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
691
692 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
693 SRMMU_PAGE_KERNEL);
694
695 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
696 SRMMU_PAGE_KERNEL);
697
698 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
699 SRMMU_PAGE_KERNEL);
700 }
701 percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
702
703
704
705
706
707
708
709
710
711
712 for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
713 unsigned int prom_pte;
714
715 prom_pte = srmmu_init_twalk(vaddr, 0);
716
717 if(prom_pte) {
718 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
719 if((prom_pte&0x3) == 0x0) {
720 prom_pte &= ~0x3;
721 prom_pte |= SRMMU_ET_PTE;
722 pgd_val(*pgdp) = prom_pte;
723 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
724 continue;
725 }
726 if(srmmu_pgd_none(*pgdp)) {
727 pmdp = srmmu_init_alloc(&mempool,
728 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
729 srmmu_pgd_set(pgdp, pmdp);
730 }
731
732 pmdp = srmmu_pmd_offset(pgdp, vaddr);
733 if((prom_pte&0x3) == 0x1) {
734 prom_pte &= ~0x3;
735 prom_pte |= SRMMU_ET_PTE;
736 pgd_val(*pgdp) = prom_pte;
737 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
738 continue;
739 }
740 if(srmmu_pmd_none(*pmdp)) {
741 ptep = srmmu_init_alloc(&mempool,
742 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
743 srmmu_pmd_set(pmdp, ptep);
744 }
745
746 ptep = srmmu_pte_offset(pmdp, vaddr);
747 pte_val(*ptep) = prom_pte;
748
749 }
750 vaddr += PAGE_SIZE;
751 }
752
753
754
755
756
757
758
759 prom_printf("Taking over MMU from PROM.\n");
760
761 srmmu_set_ctable_ptr(((unsigned)lnx_root) - PAGE_OFFSET);
762
763 srmmu_flush_whole_tlb();
764
765
766 start_mem = PAGE_ALIGN(mempool);
767 start_mem = free_area_init(start_mem, end_mem);
768 start_mem = PAGE_ALIGN(start_mem);
769
770 #if 0
771 prom_printf("Testing context switches...\n");
772 for(i=0; i<num_contexts; i++)
773 srmmu_set_context(i);
774 prom_printf("done...\n");
775 srmmu_set_context(0);
776 #endif
777
778 prom_printf("survived...\n");
779 return start_mem;
780 }
781
782
783 void
784 srmmu_test_wp(void)
785 {
786 pgd_t *pgdp;
787
788 wp_works_ok = -1;
789
790
791
792
793
794
795 __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
796 if (wp_works_ok < 0)
797 wp_works_ok = 0;
798
799 pgdp = srmmu_pgd_offset(init_task.mm, 0x0);
800 pgd_val(*pgdp) = 0x0;
801
802 return;
803 }
804
805 void srmmu_update_mmu_cache(struct vm_area_struct * vma,
806 unsigned long address, pte_t pte)
807 {
808 printk("WHOOPS, update_mmu_cache called on a SRMMU!\n");
809 panic("SRMMU bolixed...");
810 }
811
812 void
813 srmmu_fork_hook(void *vtask, unsigned long kthread_usp)
814 {
815 return;
816 }
817
818 void
819 srmmu_exit_hook(void *vtask)
820 {
821 return;
822 }
823
824 void
825 srmmu_release_hook(void *vtask)
826 {
827 return;
828 }
829
830 void
831 srmmu_flush_hook(void *vtask)
832 {
833 return;
834 }
835
836 void
837 srmmu_task_cacheflush(void *vtask)
838 {
839 return;
840 }
841
842
843 void
844 ld_mmu_srmmu(void)
845 {
846 prom_printf("Loading srmmu MMU routines\n");
847
848
849 pmd_shift = SRMMU_PMD_SHIFT;
850 pmd_size = SRMMU_PMD_SIZE;
851 pmd_mask = SRMMU_PMD_MASK;
852 pgdir_shift = SRMMU_PGDIR_SHIFT;
853 pgdir_size = SRMMU_PGDIR_SIZE;
854 pgdir_mask = SRMMU_PGDIR_MASK;
855
856 ptrs_per_pte = SRMMU_PTRS_PER_PTE;
857 ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
858 ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
859
860 page_none = SRMMU_PAGE_NONE;
861 page_shared = SRMMU_PAGE_SHARED;
862 page_copy = SRMMU_PAGE_COPY;
863 page_readonly = SRMMU_PAGE_READONLY;
864 page_kernel = SRMMU_PAGE_KERNEL;
865 page_invalid = SRMMU_PAGE_INVALID;
866
867
868 invalidate = srmmu_invalidate;
869 set_pte = srmmu_set_pte;
870 switch_to_context = srmmu_switch_to_context;
871 pmd_align = srmmu_pmd_align;
872 pgdir_align = srmmu_pgdir_align;
873 vmalloc_start = srmmu_vmalloc_start;
874
875 pte_page = srmmu_pte_page;
876 pmd_page = srmmu_pmd_page;
877 pgd_page = srmmu_pgd_page;
878
879 sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
880
881 pte_none = srmmu_pte_none;
882 pte_present = srmmu_pte_present;
883 pte_inuse = srmmu_pte_inuse;
884 pte_clear = srmmu_pte_clear;
885 pte_reuse = srmmu_pte_reuse;
886
887 pmd_none = srmmu_pmd_none;
888 pmd_bad = srmmu_pmd_bad;
889 pmd_present = srmmu_pmd_present;
890 pmd_inuse = srmmu_pmd_inuse;
891 pmd_clear = srmmu_pmd_clear;
892 pmd_reuse = srmmu_pmd_reuse;
893
894 pgd_none = srmmu_pgd_none;
895 pgd_bad = srmmu_pgd_bad;
896 pgd_present = srmmu_pgd_present;
897 pgd_inuse = srmmu_pgd_inuse;
898 pgd_clear = srmmu_pgd_clear;
899 pgd_reuse = srmmu_pgd_reuse;
900
901 mk_pte = srmmu_mk_pte;
902 pgd_set = srmmu_pgd_set;
903 pte_modify = srmmu_pte_modify;
904 pgd_offset = srmmu_pgd_offset;
905 pmd_offset = srmmu_pmd_offset;
906 pte_offset = srmmu_pte_offset;
907 pte_free_kernel = srmmu_pte_free_kernel;
908 pmd_free_kernel = srmmu_pmd_free_kernel;
909 pte_alloc_kernel = srmmu_pte_alloc_kernel;
910 pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
911 pte_free = srmmu_pte_free;
912 pte_alloc = srmmu_pte_alloc;
913 pmd_free = srmmu_pmd_free;
914 pmd_alloc = srmmu_pmd_alloc;
915 pgd_free = srmmu_pgd_free;
916 pgd_alloc = srmmu_pgd_alloc;
917
918 pte_read = srmmu_pte_read;
919 pte_write = srmmu_pte_write;
920 pte_exec = srmmu_pte_exec;
921 pte_dirty = srmmu_pte_dirty;
922 pte_young = srmmu_pte_young;
923 pte_cow = srmmu_pte_cow;
924 pte_wrprotect = srmmu_pte_wrprotect;
925 pte_rdprotect = srmmu_pte_rdprotect;
926 pte_exprotect = srmmu_pte_exprotect;
927 pte_mkclean = srmmu_pte_mkclean;
928 pte_mkold = srmmu_pte_mkold;
929 pte_uncow = srmmu_pte_uncow;
930 pte_mkwrite = srmmu_pte_mkwrite;
931 pte_mkread = srmmu_pte_mkread;
932 pte_mkexec = srmmu_pte_mkexec;
933 pte_mkdirty = srmmu_pte_mkdirty;
934 pte_mkyoung = srmmu_pte_mkyoung;
935 pte_mkcow = srmmu_pte_mkcow;
936 get_fault_info = srmmu_get_fault_info;
937 update_mmu_cache = srmmu_update_mmu_cache;
938 mmu_exit_hook = srmmu_exit_hook;
939 mmu_fork_hook = srmmu_fork_hook;
940 mmu_release_hook = srmmu_release_hook;
941 mmu_flush_hook = srmmu_flush_hook;
942 mmu_task_cacheflush = srmmu_task_cacheflush;
943 mmu_lockarea = srmmu_lockarea;
944 mmu_unlockarea = srmmu_unlockarea;
945 mmu_get_scsi_buffer = srmmu_get_scsi_buffer;
946 mmu_release_scsi_buffer = srmmu_release_scsi_buffer;
947 }