root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_pmd_align
  2. srmmu_pgdir_align
  3. srmmu_virt_to_phys
  4. srmmu_phys_to_virt
  5. srmmu_vmalloc_start
  6. srmmu_pmd_page
  7. srmmu_pgd_page
  8. srmmu_pte_page
  9. srmmu_pte_none
  10. srmmu_pte_present
  11. srmmu_pte_inuse
  12. srmmu_pte_clear
  13. srmmu_pte_reuse
  14. srmmu_pmd_none
  15. srmmu_pmd_bad
  16. srmmu_pmd_present
  17. srmmu_pmd_inuse
  18. srmmu_pmd_clear
  19. srmmu_pmd_reuse
  20. srmmu_pgd_none
  21. srmmu_pgd_bad
  22. srmmu_pgd_present
  23. srmmu_pgd_inuse
  24. srmmu_pgd_clear
  25. srmmu_pgd_reuse
  26. srmmu_pte_read
  27. srmmu_pte_write
  28. srmmu_pte_exec
  29. srmmu_pte_dirty
  30. srmmu_pte_young
  31. srmmu_pte_cow
  32. srmmu_pte_wrprotect
  33. srmmu_pte_rdprotect
  34. srmmu_pte_exprotect
  35. srmmu_pte_mkclean
  36. srmmu_pte_mkold
  37. srmmu_pte_uncow
  38. srmmu_pte_mkwrite
  39. srmmu_pte_mkread
  40. srmmu_pte_mkexec
  41. srmmu_pte_mkdirty
  42. srmmu_pte_mkyoung
  43. srmmu_pte_mkcow
  44. srmmu_mk_pte
  45. srmmu_pgd_set
  46. srmmu_pmd_set
  47. srmmu_pte_modify
  48. srmmu_pgd_offset
  49. srmmu_pmd_offset
  50. srmmu_pte_offset
  51. srmmu_update_rootmmu_dir
  52. srmmu_pte_free_kernel
  53. srmmu_pte_alloc_kernel
  54. srmmu_pmd_free_kernel
  55. srmmu_pmd_alloc_kernel
  56. srmmu_pte_free
  57. srmmu_pte_alloc
  58. srmmu_pmd_free
  59. srmmu_pmd_alloc
  60. srmmu_pgd_free
  61. srmmu_pgd_alloc
  62. srmmu_invalidate
  63. srmmu_set_pte
  64. srmmu_switch_to_context
  65. srmmu_mapioaddr
  66. srmmu_init_twalk
  67. srmmu_init_alloc
  68. srmmu_patch_fhandlers
  69. srmmu_paging_init
  70. srmmu_test_wp
  71. ld_mmu_srmmu

   1 /* srmmu.c:  SRMMU specific routines for memory management.
   2  *
   3  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   4  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@lab.ipmce.su)
   5  */
   6 
   7 #include <linux/kernel.h>  /* for printk */
   8 
   9 #include <asm/page.h>
  10 #include <asm/pgtable.h>
  11 #include <asm/kdebug.h>
  12 #include <asm/vaddrs.h>
  13 #include <asm/traps.h>
  14 #include <asm/mp.h>
  15 #include <asm/cache.h>
  16 #include <asm/oplib.h>
  17 
  18 extern unsigned long free_area_init(unsigned long, unsigned long);
  19 
  20 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  21 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  22 
  23 /* Idea taken from Hamish McDonald's MC680x0 Linux code, nice job.
  24  * Many of the page table/directory functions on the SRMMU use this
  25  * routine.
  26  *
  27  * Having a complete physical ram structure walk happen for each
  28  * invocation is quite costly.  However, this does do some nice
  29  * sanity checking and we'll see when our maps don't match.  Eventually
  30  * when I trust my code I will just do a direct mmu probe in mk_pte().
  31  */
  32 static inline unsigned int
  33 srmmu_virt_to_phys(unsigned int vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  34 {
  35         unsigned int paddr = 0;
  36         unsigned int voff = (vaddr - PAGE_OFFSET);
  37         int i;
  38 
  39         for(i=0; sp_banks[i].num_bytes != 0; i++) {
  40                 if(voff < paddr + sp_banks[i].num_bytes) {
  41                         /* This matches. */
  42                         return sp_banks[i].base_addr + voff - paddr;
  43                 } else
  44                         paddr += sp_banks[i].num_bytes;
  45         }
  46         /* Shit, gotta consult the MMU, this shouldn't happen... */
  47         printk("srmmu_virt_to_phys: SRMMU virt to phys translation failed, halting\n");
  48         halt();
  49 }               
  50 
  51 static inline unsigned long
  52 srmmu_phys_to_virt(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         int i;
  55         unsigned long offset = PAGE_OFFSET;
  56 
  57         for (i=0; sp_banks[i].num_bytes != 0; i++)
  58         {
  59                 if (paddr >= sp_banks[i].base_addr &&
  60                     paddr < (sp_banks[i].base_addr
  61                              + sp_banks[i].num_bytes)) {
  62                         return (paddr - sp_banks[i].base_addr) + offset;
  63                 } else
  64                         offset += sp_banks[i].num_bytes;
  65         }
  66         printk("srmmu_phys_to_virt: Could not make translation, halting...\n");
  67         halt();
  68 }
  69 
  70 unsigned long
  71 srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73         return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
  74 }
  75 
  76 unsigned long 
  77 srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         unsigned long page;
  80 
  81         page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  82         return srmmu_phys_to_virt(page);
  83 }
  84 
  85 unsigned long
  86 srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         unsigned long page;
  89 
  90         page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  91         return srmmu_phys_to_virt(page);
  92 }
  93 
  94 unsigned long 
  95 srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         unsigned long page;
  98 
  99         page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
 100         printk("srmmu_pte_page: page = %08lx\n", page);
 101         return srmmu_phys_to_virt(page);
 102 }
 103 
 104 int srmmu_pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 105 int srmmu_pte_present(pte_t pte)        { return pte_val(pte) & SRMMU_ET_PTE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 106 int srmmu_pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 107 void srmmu_pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 108 void srmmu_pte_reuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 109 {
 110   if(!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
 111     mem_map[MAP_NR(ptep)]++;
 112 }
 113 
 114 int srmmu_pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 115 int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 116 {
 117         return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
 118                 (srmmu_pmd_page(pmd) > high_memory);
 119 }
 120 
 121 int srmmu_pmd_present(pmd_t pmd)        { return pmd_val(pmd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
 122 int srmmu_pmd_inuse(pmd_t *pmdp)        { return mem_map[MAP_NR(pmdp)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 123 void srmmu_pmd_clear(pmd_t *pmdp)       { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 124 void srmmu_pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         if (!(mem_map[MAP_NR(pmdp)] & MAP_PAGE_RESERVED))
 127                 mem_map[MAP_NR(pmdp)]++;
 128 }
 129 
 130 int srmmu_pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 131 int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
 134                 (srmmu_pgd_page(pgd) > high_memory);
 135 }
 136 int srmmu_pgd_present(pgd_t pgd)        { return pgd_val(pgd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
 137 int srmmu_pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 138 void srmmu_pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 139 void srmmu_pgd_reuse(pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 140 {
 141   if (!(mem_map[MAP_NR(pgdp)] & MAP_PAGE_RESERVED))
 142     mem_map[MAP_NR(pgdp)]++;
 143 }
 144 
 145 /*
 146  * The following only work if pte_present() is true.
 147  * Undefined behaviour if not..
 148  */
 149 int srmmu_pte_read(pte_t pte)           { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 150 int srmmu_pte_write(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
     /* [previous][next][first][last][top][bottom][index][help] */
 151 int srmmu_pte_exec(pte_t pte)           { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
     /* [previous][next][first][last][top][bottom][index][help] */
 152 int srmmu_pte_dirty(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 153 int srmmu_pte_young(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 154 int srmmu_pte_cow(pte_t pte)            { return pte_val(pte) & _SRMMU_PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 155 
 156 /* When we change permissions, we first clear all bits in the ACCESS field
 157  * then apply the wanted bits.
 158  */
 159 pte_t srmmu_pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 160 pte_t srmmu_pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 161 pte_t srmmu_pte_exprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 162 pte_t srmmu_pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 163 pte_t srmmu_pte_mkold(pte_t pte)        { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 164 pte_t srmmu_pte_uncow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 165 pte_t srmmu_pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 166 pte_t srmmu_pte_mkread(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 167 pte_t srmmu_pte_mkexec(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 168 pte_t srmmu_pte_mkdirty(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 169 pte_t srmmu_pte_mkyoung(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 170 pte_t srmmu_pte_mkcow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 171 
 172 /*
 173  * Conversion functions: convert a page and protection to a page entry,
 174  * and a page entry and page directory to the page they refer to.
 175  */
 176 pte_t
 177 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         pte_t pte;
 180 
 181         if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
 182         page = (srmmu_virt_to_phys(page) >> SRMMU_PTE_PPN_PADDR_SHIFT);
 183         pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
 184         pte_val(pte) |= pgprot_val(pgprot);
 185         return pte;
 186 }
 187 
 188 void
 189 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         unsigned long page = (unsigned long) pmdp;
 192 
 193         page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 194 
 195         pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 196 }
 197 
 198 void
 199 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201         unsigned long page = (unsigned long) ptep;
 202 
 203         page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 204 
 205         pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 206 }
 207 
 208 pte_t
 209 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
 212         return pte;
 213 }
 214 
 215 /* to find an entry in a top-level page table... */
 216 pgd_t *
 217 srmmu_pgd_offset(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219         return ((pgd_t *) tsk->tss.pgd_ptr) +
 220                 ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 221 }
 222 
 223 /* Find an entry in the second-level page table.. */
 224 pmd_t *
 225 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         return ((pmd_t *) pgd_page(*dir)) +
 228                 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 229 }
 230 
 231 /* Find an entry in the third-level page table.. */ 
 232 pte_t *
 233 srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235         return ((pte_t *) pmd_page(*dir)) +
 236                 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 237 }
 238 
 239 /* This must update the context register for this process. */
 240 void
 241 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir) 
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         /* See if this process has a context entry already, like after execve() */
 244         if(tsk->tss.context != -1) {
 245                 pgd_t *ctable_ptr = 0;
 246                 ctable_ptr = (pgd_t *) srmmu_phys_to_virt(srmmu_get_ctable_ptr());
 247                 ctable_ptr += tsk->tss.context;
 248                 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
 249                 /* Should flush caches here too... */
 250                 srmmu_flush_whole_tlb();
 251         }
 252 
 253         tsk->tss.pgd_ptr = (unsigned long) pgdir;
 254 
 255         return;
 256 }
 257 
 258 /*
 259  * Allocate and free page tables. The xxx_kernel() versions are
 260  * used to allocate a kernel page table - this turns on ASN bits
 261  * if any, and marks the page tables reserved.
 262  */
 263 void
 264 srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 265 {
 266         mem_map[MAP_NR(pte)] = 1;
 267         free_page((unsigned long) pte);
 268 }
 269 
 270 pte_t *
 271 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 {
 273         pte_t *page;
 274 
 275         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 276         if (srmmu_pmd_none(*pmd)) {
 277                 page = (pte_t *) get_free_page(GFP_KERNEL);
 278                 if (srmmu_pmd_none(*pmd)) {
 279                         if (page) {
 280                                 srmmu_pmd_set(pmd, page);
 281                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 282                                 return page + address;
 283                         }
 284                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 285                         return NULL;
 286                 }
 287                 free_page((unsigned long) page);
 288         }
 289         if (srmmu_pmd_bad(*pmd)) {
 290                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 291                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 292                 return NULL;
 293         }
 294         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 295 }
 296 
 297 /* Full three level on SRMMU */
 298 void
 299 srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         mem_map[MAP_NR(pmd)] = 1;
 302         free_page((unsigned long) pmd);
 303 }
 304 
 305 pmd_t *
 306 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 307 {
 308         pmd_t *page;
 309 
 310         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 311         if (srmmu_pgd_none(*pgd)) {
 312                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 313                 if (srmmu_pgd_none(*pgd)) {
 314                         if (page) {
 315                                 srmmu_pgd_set(pgd, page);
 316                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 317                                 return page + address;
 318                         }
 319                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 320                         return NULL;
 321                 }
 322                 free_page((unsigned long) page);
 323         }
 324         if (srmmu_pgd_bad(*pgd)) {
 325                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 326                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 327                 return NULL;
 328         }
 329         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 330 }
 331 
 332 void
 333 srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 334 {
 335         free_page((unsigned long) pte);
 336 }
 337 
 338 pte_t *
 339 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         pte_t *page;
 342 
 343         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 344         if (srmmu_pmd_none(*pmd)) {
 345                 page = (pte_t *) get_free_page(GFP_KERNEL);
 346                 if (srmmu_pmd_none(*pmd)) {
 347                         if (page) {
 348                                 srmmu_pmd_set(pmd, page);
 349                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 350                                 return page + address;
 351                         }
 352                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 353                         return NULL;
 354                 }
 355                 free_page((unsigned long) page);
 356         }
 357         if (srmmu_pmd_bad(*pmd)) {
 358                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 359                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 360                 return NULL;
 361         }
 362         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 363 }
 364 
 365 /*
 366  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 367  * inside the pgd, so has no extra memory associated with it.
 368  */
 369 void 
 370 srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 371 {
 372         free_page((unsigned long) pmd);
 373 }
 374 
 375 pmd_t *
 376 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 377 {
 378         pmd_t *page;
 379 
 380         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 381         if (srmmu_pgd_none(*pgd)) {
 382                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 383                 if (srmmu_pgd_none(*pgd)) {
 384                         if (page) {
 385                                 srmmu_pgd_set(pgd, page);
 386                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 387                                 return page + address;
 388                         }
 389                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 390                         return NULL;
 391                 }
 392                 free_page((unsigned long) page);
 393         }
 394         if (srmmu_pgd_bad(*pgd)) {
 395                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 396                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 397                 return NULL;
 398         }
 399         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 400 }
 401 
 402 void
 403 srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         free_page((unsigned long) pgd);
 406 }
 407 
 408 /* A page directory on the srmmu needs 1k, but for now to simplify the
 409  * alignment constraints and allocation we just grab a whole page.
 410  */
 411 
 412 pgd_t *
 413 srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 414 {
 415         return (pgd_t *) get_free_page(GFP_KERNEL);
 416 }
 417 
 418 /* Just flush the whole thing for now. We will need module
 419  * specific invalidate routines in certain circumstances,
 420  * because of different flushing facilities and hardware
 421  * bugs.
 422  */
 423 void
 424 srmmu_invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 425 {
 426         srmmu_flush_whole_tlb();
 427         return;
 428 }
 429 
 430 void
 431 srmmu_set_pte(pte_t *ptep, pte_t entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 432 {
 433         /* for now... */
 434         *ptep = entry;
 435 }
 436 
 437 /* XXX Needs to be written */
 438 void
 439 srmmu_switch_to_context(int context)
     /* [previous][next][first][last][top][bottom][index][help] */
 440 {
 441         printk("switching to context %d\n", context);
 442 
 443         return;
 444 }
 445 
 446 /* Low level IO area allocation on the SRMMU.
 447  *
 448  * I think we can get away with just using a regular page translation,
 449  * just making sure the cacheable bit is off.  I would like to avoid
 450  * having to mess with the IOMMU if at all possible at first.
 451  *
 452  * Aparently IOMMU is only necessary for SBus devices, maybe VME too.
 453  * We'll see...
 454  */
 455 void
 456 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 457                 int bus_type, int rdonly)
 458 {
 459   pgd_t *pgdp;
 460   pmd_t *pmdp;
 461   pte_t *ptep;
 462 
 463   pgdp = srmmu_pgd_offset(&init_task, virt_addr);
 464   pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 465   ptep = srmmu_pte_offset(pmdp, virt_addr);
 466   pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
 467 
 468   if(!rdonly)
 469           pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
 470   else
 471           pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
 472 
 473   pte_val(*ptep) |= (bus_type << 28);
 474   pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK); /* Make sure cacheable bit is off. */
 475   srmmu_flush_whole_tlb();
 476   flush_ei_ctx(0x0);
 477 
 478   return;
 479 }
 480 
 481 /* Perfom a some soft of MMU tablewalk.
 482  * Long contiguous mappings are not supported (yet ?).
 483  *
 484  * Origionally written by Peter Zaitcev, modified by David S.
 485  * Miller.  This is only used to copy over the PROM/KADB mappings
 486  * in srmmu_paging_init().
 487  *
 488  * The return value encodes at what level the entry was found,
 489  * basically this is found in the lower 2 bits of the return
 490  * value.  If the return value is zero, there was no valid mapping
 491  * found at all, the low bits for a non-zero return value
 492  * are:
 493  *         0 -- Level 1 PTE
 494  *         1 -- Level 2 PTE
 495  *         2 -- Normal level 3 PTE
 496  *         3 -- Context Table PTE (unlikely, but still)
 497  * 
 498  * Also note that this is called before the context table pointer
 499  * register is changed, so the PROMs entry is still in there.  Also,
 500  * it is safe to assume that the context 0 contains the mappings.
 501  */
 502 /* TODO chop out 'trace' when stable */
 503 unsigned int
 504 srmmu_init_twalk(unsigned virt, int trace)
     /* [previous][next][first][last][top][bottom][index][help] */
 505 {
 506         unsigned int wh, root;
 507 
 508         root = (unsigned int) srmmu_get_ctable_ptr();
 509         if(trace) printk(":0x%x >> ", virt);
 510 
 511         if(trace) printk(" 0x%x :", root);
 512         wh = ldw_sun4m_bypass(root);
 513         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 514                 if(trace) printk("\n");
 515                 return 0;
 516         }
 517         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 518                 wh &= ~SRMMU_PTE_ET_MASK;
 519                 wh |= 0x3;
 520                 if(trace) printk("\n");
 521                 printk("AIEEE context table level pte prom mapping!\n");
 522                 prom_halt();
 523                 return 0;
 524         }
 525                 
 526         if(trace) printk(" 0x%x .", wh);
 527         wh = ldw_sun4m_bypass(
 528                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 529                               + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
 530 
 531         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 532                 if(trace) printk("\n");
 533                 return 0;
 534         }
 535         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 536                 wh &= ~SRMMU_PTE_ET_MASK;
 537                 if(trace) printk("\n");
 538                 return wh;
 539         }
 540 
 541         if(trace) printk(" 0x%x .", wh);
 542         wh = ldw_sun4m_bypass(
 543                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 544                               + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
 545         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 546                 if(trace) printk("\n");
 547                 return 0;
 548         }
 549         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 550                 wh &= ~SRMMU_PTE_ET_MASK;
 551                 wh |= 0x1;
 552                 if(trace) printk("\n");
 553                 return wh;
 554         }
 555 
 556         if(trace) printk(" 0x%x .", wh);
 557         wh = ldw_sun4m_bypass(
 558                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 559                               + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
 560         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 561                 if(trace) printk("\n");
 562                 return 0;
 563         }
 564         if(trace) printk(" 0x%x\n", wh);
 565         return wh;
 566 }
 567 
 568 
 569 /* Allocate a block of RAM which is aligned to its size.
 570  * This procedure can be used until the call to mem_init().
 571  *
 572  * To get around the elf bootloader nastyness we have a
 573  * early-on page table pool allocation area starting at
 574  * C_LABEL(pg0) which is 256k, this should be enough for now.
 575  */
 576 static void *
 577 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
     /* [previous][next][first][last][top][bottom][index][help] */
 578 {
 579         register unsigned mask = size - 1;
 580         register unsigned long ret;
 581 
 582         if(size==0) return 0x0;
 583         if(size & mask) {
 584                 printk("panic: srmmu_init_alloc botch\n");
 585                 prom_halt();
 586         }
 587         ret = (*kbrk + mask) & ~mask;
 588         *kbrk = ret + size;
 589         memset((void*) ret, 0, size);
 590         return (void*) ret;
 591 }
 592 
 593 extern unsigned long srmmu_data_fault, srmmu_text_fault;
 594 
 595 /* Patch in the SRMMU fault handlers for the trap table. */
 596 void
 597 srmmu_patch_fhandlers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 598 {
 599         /* Say the following ten times fast... */
 600         sparc_ttable[SP_TRAP_TFLT].inst_one = SPARC_MOV_CONST_L3(0x1);
 601         sparc_ttable[SP_TRAP_TFLT].inst_two =
 602                 SPARC_BRANCH((unsigned long) &srmmu_text_fault, 
 603                              (unsigned long) &sparc_ttable[SP_TRAP_TFLT].inst_two);
 604         sparc_ttable[SP_TRAP_TFLT].inst_three = SPARC_RD_PSR_L0;
 605         sparc_ttable[SP_TRAP_TFLT].inst_four = SPARC_NOP;
 606 
 607         sparc_ttable[SP_TRAP_DFLT].inst_one = SPARC_MOV_CONST_L3(0x9);
 608         sparc_ttable[SP_TRAP_DFLT].inst_two =
 609                 SPARC_BRANCH((unsigned long) &srmmu_data_fault,
 610                              (unsigned long) &sparc_ttable[SP_TRAP_DFLT].inst_two);
 611         sparc_ttable[SP_TRAP_DFLT].inst_three = SPARC_RD_PSR_L0;
 612         sparc_ttable[SP_TRAP_DFLT].inst_four = SPARC_NOP;
 613 
 614         return;
 615 }
 616 
 617 /* Paging initialization on the Sparc Reference MMU. */
 618 
 619 /* This is all poorly designed, we cannot assume any pages are valid
 620  * past _end until *after* this routine runs, thus we can't use the
 621  * start_mem mechanism during initialization...
 622  */
 623 static unsigned long mempool;
 624 
 625 /* The following is global because trap_init needs it to fire up
 626  * the other cpu's on multiprocessors.
 627  */
 628 pgd_t *lnx_root;      /* Pointer to the new root table */
 629 
 630 extern char start[];
 631 
 632 unsigned long
 633 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 634 {
 635         unsigned long vaddr;  /* Virtual counter */
 636         int i;
 637 
 638         pte_t *ptep = 0;
 639         pmd_t *pmdp = 0;
 640         pgd_t *pgdp = 0;
 641 
 642         mempool = start_mem;
 643         lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
 644 
 645         memset(swapper_pg_dir, 0, PAGE_SIZE);
 646 
 647         /* For every entry in the new Linux context table, put in
 648          * an entry which points to swapper_pg_dir .
 649          */
 650         pmdp = (pmd_t *) swapper_pg_dir;
 651         for(i = 0; i < num_contexts; i++)
 652                 srmmu_pgd_set(&lnx_root[i], pmdp);
 653 
 654         /* Make Linux physical page tables. */
 655         for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
 656                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 657                 if(srmmu_pgd_none(*pgdp)) {
 658                         pmdp = srmmu_init_alloc(&mempool,
 659                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 660                         srmmu_pgd_set(pgdp, pmdp);
 661                 }
 662 
 663                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 664                 if(srmmu_pmd_none(*pmdp)) {
 665                         ptep = srmmu_init_alloc(&mempool,
 666                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 667                         srmmu_pmd_set(pmdp, ptep);
 668                 }
 669 
 670                 ptep = srmmu_pte_offset(pmdp, vaddr);
 671                 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
 672         }
 673 
 674         /* Map IO areas. */
 675         for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
 676             vaddr += SRMMU_PMD_SIZE) {
 677                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 678                 if(srmmu_pgd_none(*pgdp)) {
 679                         pmdp = srmmu_init_alloc(&mempool,
 680                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 681                         srmmu_pgd_set(pgdp, pmdp);
 682                 }
 683                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 684                 if(srmmu_pmd_none(*pmdp)) {
 685                         ptep = srmmu_init_alloc(&mempool,
 686                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 687                         srmmu_pmd_set(pmdp, ptep);
 688                 }
 689         }
 690 
 691         /* Map in the PERCPU areas in virtual address space. */
 692         printk("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
 693                (PERCPU_VADDR + PERCPU_LEN));
 694         for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
 695             vaddr += PERCPU_ENTSIZE) {
 696                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 697                 if(srmmu_pgd_none(*pgdp)) {
 698                         pmdp = srmmu_init_alloc(&mempool,
 699                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 700                         srmmu_pgd_set(pgdp, pmdp);
 701                 }
 702                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 703                 if(srmmu_pmd_none(*pmdp)) {
 704                         ptep = srmmu_init_alloc(&mempool,
 705                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 706                         srmmu_pmd_set(pmdp, ptep);
 707                 }
 708                 ptep = srmmu_pte_offset(pmdp, vaddr);
 709                 /* Per-cpu trap table page. */
 710                 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
 711                 /* Per-cpu kernel stack page. */
 712                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 713                                        SRMMU_PAGE_KERNEL);
 714                 /* Per-cpu Prom MBox. */
 715                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 716                                        SRMMU_PAGE_KERNEL);
 717                 /* Per-cpu state variables. */
 718                 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 719                                      SRMMU_PAGE_KERNEL);
 720         }
 721         percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
 722 
 723         /* Ugh, have to map DVMA that the prom has mapped too or else
 724          * you will lose with video cards when we take over the ctx table.
 725          * Also, must take into consideration that prom might be using level
 726          * two or one PTE's. TODO
 727          */
 728         for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
 729                 unsigned int prom_pte;
 730 
 731                 prom_pte = srmmu_init_twalk(vaddr, 0);
 732 
 733                 if(prom_pte) {
 734                         pgdp = srmmu_pgd_offset(&init_task, vaddr);
 735                         if((prom_pte&0x3) == 0x0) {
 736                                 prom_pte &= ~0x3;
 737                                 prom_pte |= SRMMU_ET_PTE;
 738                                 pgd_val(*pgdp) = prom_pte;
 739                                 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
 740                                 continue;
 741                         }
 742                         if(srmmu_pgd_none(*pgdp)) {
 743                                 pmdp = srmmu_init_alloc(&mempool,
 744                                                         SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 745                                 srmmu_pgd_set(pgdp, pmdp);
 746                         }
 747 
 748                         pmdp = srmmu_pmd_offset(pgdp, vaddr);
 749                         if((prom_pte&0x3) == 0x1) {
 750                                 prom_pte &= ~0x3;
 751                                 prom_pte |= SRMMU_ET_PTE;
 752                                 pgd_val(*pgdp) = prom_pte;
 753                                 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
 754                                 continue;
 755                         }
 756                         if(srmmu_pmd_none(*pmdp)) {
 757                                 ptep = srmmu_init_alloc(&mempool,
 758                                                         SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 759                                 srmmu_pmd_set(pmdp, ptep);
 760                         }
 761                         /* A normal 3rd level PTE, no need to change ET bits. */
 762                         ptep = srmmu_pte_offset(pmdp, vaddr);
 763                         pte_val(*ptep) = prom_pte;
 764 
 765                 }
 766                 vaddr += PAGE_SIZE;
 767         }
 768 
 769         /* I believe I do not need to flush VAC here since my stores  */
 770         /* probably already reached the physical RAM.             --P3 */
 771 
 772         /* We probably do, and should do it just to be safe... -Davem */
 773 
 774         /* Take the MMU over from the PROM */
 775         printk("Taking over MMU from PROM.\n");
 776 
 777         srmmu_set_ctable_ptr(srmmu_virt_to_phys((unsigned)lnx_root));
 778 
 779         srmmu_flush_whole_tlb();
 780 
 781         /* Now it is ok to use memory at start_mem. */
 782         start_mem = PAGE_ALIGN(mempool);
 783         start_mem = free_area_init(start_mem, end_mem);
 784         start_mem = PAGE_ALIGN(start_mem);
 785 
 786 #if 0
 787         printk("Testing context switches...\n");
 788         for(i=0; i<num_contexts; i++)
 789                 srmmu_set_context(i);
 790         printk("done...\n");
 791         srmmu_set_context(0);
 792 #endif
 793 
 794         printk("survived...\n");
 795         return start_mem;
 796 }
 797 
 798 /* Test the WP bit on the Sparc Reference MMU. */
 799 void
 800 srmmu_test_wp(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 801 {
 802         pgd_t *pgdp;
 803         pmd_t *pmdp;
 804         pte_t *ptep;
 805         
 806         wp_works_ok = -1;
 807         /* We mapped page zero as a read-only page in paging_init()
 808          * So fire up the test, then invalidate the pgd for page zero.
 809          * It is no longer needed.
 810          */
 811 
 812         /* Let it rip... */
 813         __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
 814         if (wp_works_ok < 0)
 815                 wp_works_ok = 0;
 816 
 817         pgdp = srmmu_pgd_offset(&init_task, 0x0);
 818         pgd_val(*pgdp) = 0x0;
 819 
 820         return;
 821 }
 822 
 823 /* Load up routines and constants for sun4m mmu */
 824 void
 825 ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 826 {
 827         printk("Loading srmmu MMU routines\n");
 828 
 829         /* First the constants */
 830         pmd_shift = SRMMU_PMD_SHIFT;
 831         pmd_size = SRMMU_PMD_SIZE;
 832         pmd_mask = SRMMU_PMD_MASK;
 833         pgdir_shift = SRMMU_PGDIR_SHIFT;
 834         pgdir_size = SRMMU_PGDIR_SIZE;
 835         pgdir_mask = SRMMU_PGDIR_MASK;
 836 
 837         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
 838         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
 839         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
 840 
 841         page_none = SRMMU_PAGE_NONE;
 842         page_shared = SRMMU_PAGE_SHARED;
 843         page_copy = SRMMU_PAGE_COPY;
 844         page_readonly = SRMMU_PAGE_READONLY;
 845         page_kernel = SRMMU_PAGE_KERNEL;
 846         page_invalid = SRMMU_PAGE_INVALID;
 847         
 848         /* Functions */
 849         invalidate = srmmu_invalidate;
 850         set_pte = srmmu_set_pte;
 851         switch_to_context = srmmu_switch_to_context;
 852         pmd_align = srmmu_pmd_align;
 853         pgdir_align = srmmu_pgdir_align;
 854         vmalloc_start = srmmu_vmalloc_start;
 855 
 856         pte_page = srmmu_pte_page;
 857         pmd_page = srmmu_pmd_page;
 858         pgd_page = srmmu_pgd_page;
 859 
 860         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
 861 
 862         pte_none = srmmu_pte_none;
 863         pte_present = srmmu_pte_present;
 864         pte_inuse = srmmu_pte_inuse;
 865         pte_clear = srmmu_pte_clear;
 866         pte_reuse = srmmu_pte_reuse;
 867 
 868         pmd_none = srmmu_pmd_none;
 869         pmd_bad = srmmu_pmd_bad;
 870         pmd_present = srmmu_pmd_present;
 871         pmd_inuse = srmmu_pmd_inuse;
 872         pmd_clear = srmmu_pmd_clear;
 873         pmd_reuse = srmmu_pmd_reuse;
 874 
 875         pgd_none = srmmu_pgd_none;
 876         pgd_bad = srmmu_pgd_bad;
 877         pgd_present = srmmu_pgd_present;
 878         pgd_inuse = srmmu_pgd_inuse;
 879         pgd_clear = srmmu_pgd_clear;
 880         pgd_reuse = srmmu_pgd_reuse;
 881 
 882         mk_pte = srmmu_mk_pte;
 883         pgd_set = srmmu_pgd_set;  /* XXX needs a cast */
 884         pte_modify = srmmu_pte_modify;
 885         pgd_offset = srmmu_pgd_offset;
 886         pmd_offset = srmmu_pmd_offset;
 887         pte_offset = srmmu_pte_offset;
 888         pte_free_kernel = srmmu_pte_free_kernel;
 889         pmd_free_kernel = srmmu_pmd_free_kernel;
 890         pte_alloc_kernel = srmmu_pte_alloc_kernel;
 891         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
 892         pte_free = srmmu_pte_free;
 893         pte_alloc = srmmu_pte_alloc;
 894         pmd_free = srmmu_pmd_free;
 895         pmd_alloc = srmmu_pmd_alloc;
 896         pgd_free = srmmu_pgd_free;
 897         pgd_alloc = srmmu_pgd_alloc;
 898 
 899         pte_read = srmmu_pte_read;
 900         pte_write = srmmu_pte_write;
 901         pte_exec = srmmu_pte_exec;
 902         pte_dirty = srmmu_pte_dirty;
 903         pte_young = srmmu_pte_young;
 904         pte_cow = srmmu_pte_cow;
 905         pte_wrprotect = srmmu_pte_wrprotect;
 906         pte_rdprotect = srmmu_pte_rdprotect;
 907         pte_exprotect = srmmu_pte_exprotect;
 908         pte_mkclean = srmmu_pte_mkclean;
 909         pte_mkold = srmmu_pte_mkold;
 910         pte_uncow = srmmu_pte_uncow;
 911         pte_mkwrite = srmmu_pte_mkwrite;
 912         pte_mkread = srmmu_pte_mkread;
 913         pte_mkexec = srmmu_pte_mkexec;
 914         pte_mkdirty = srmmu_pte_mkdirty;
 915         pte_mkyoung = srmmu_pte_mkyoung;
 916         pte_mkcow = srmmu_pte_mkcow;
 917 
 918         return;
 919 }
 920 

/* [previous][next][first][last][top][bottom][index][help] */