root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_pmd_align
  2. srmmu_pgdir_align
  3. srmmu_virt_to_phys
  4. srmmu_phys_to_virt
  5. srmmu_vmalloc_start
  6. srmmu_pmd_page
  7. srmmu_pgd_page
  8. srmmu_pte_page
  9. srmmu_pte_none
  10. srmmu_pte_present
  11. srmmu_pte_inuse
  12. srmmu_pte_clear
  13. srmmu_pte_reuse
  14. srmmu_pmd_none
  15. srmmu_pmd_bad
  16. srmmu_pmd_present
  17. srmmu_pmd_inuse
  18. srmmu_pmd_clear
  19. srmmu_pmd_reuse
  20. srmmu_pgd_none
  21. srmmu_pgd_bad
  22. srmmu_pgd_present
  23. srmmu_pgd_inuse
  24. srmmu_pgd_clear
  25. srmmu_pgd_reuse
  26. srmmu_pte_read
  27. srmmu_pte_write
  28. srmmu_pte_exec
  29. srmmu_pte_dirty
  30. srmmu_pte_young
  31. srmmu_pte_cow
  32. srmmu_pte_wrprotect
  33. srmmu_pte_rdprotect
  34. srmmu_pte_exprotect
  35. srmmu_pte_mkclean
  36. srmmu_pte_mkold
  37. srmmu_pte_uncow
  38. srmmu_pte_mkwrite
  39. srmmu_pte_mkread
  40. srmmu_pte_mkexec
  41. srmmu_pte_mkdirty
  42. srmmu_pte_mkyoung
  43. srmmu_pte_mkcow
  44. srmmu_mk_pte
  45. srmmu_pgd_set
  46. srmmu_pmd_set
  47. srmmu_pte_modify
  48. srmmu_pgd_offset
  49. srmmu_pmd_offset
  50. srmmu_pte_offset
  51. srmmu_update_rootmmu_dir
  52. srmmu_pte_free_kernel
  53. srmmu_pte_alloc_kernel
  54. srmmu_pmd_free_kernel
  55. srmmu_pmd_alloc_kernel
  56. srmmu_pte_free
  57. srmmu_pte_alloc
  58. srmmu_pmd_free
  59. srmmu_pmd_alloc
  60. srmmu_pgd_free
  61. srmmu_pgd_alloc
  62. srmmu_invalidate
  63. srmmu_switch_to_context
  64. srmmu_mapioaddr
  65. srmmu_init_twalk
  66. srmmu_init_alloc
  67. srmmu_patch_fhandlers
  68. srmmu_paging_init
  69. srmmu_test_wp
  70. ld_mmu_srmmu

   1 /* srmmu.c:  SRMMU specific routines for memory management.
   2  *
   3  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   4  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@lab.ipmce.su)
   5  */
   6 
   7 #include <linux/kernel.h>  /* for printk */
   8 
   9 #include <asm/page.h>
  10 #include <asm/pgtable.h>
  11 #include <asm/kdebug.h>
  12 #include <asm/vaddrs.h>
  13 #include <asm/traps.h>
  14 #include <asm/mp.h>
  15 #include <asm/cache.h>
  16 #include <asm/oplib.h>
  17 
  18 extern unsigned long free_area_init(unsigned long, unsigned long);
  19 
  20 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  21 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  22 
  23 /* Idea taken from Hamish McDonald's MC680x0 Linux code, nice job.
  24  * Many of the page table/directory functions on the SRMMU use this
  25  * routine.
  26  *
  27  * Having a complete physical ram structure walk happen for each
  28  * invocation is quite costly.  However, this does do some nice
  29  * sanity checking and we'll see when our maps don't match.  Eventually
  30  * when I trust my code I will just do a direct mmu probe in mk_pte().
  31  */
  32 static inline unsigned int
  33 srmmu_virt_to_phys(unsigned int vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  34 {
  35         unsigned int paddr = 0;
  36         unsigned int voff = (vaddr - PAGE_OFFSET);
  37         int i;
  38 
  39         for(i=0; sp_banks[i].num_bytes != 0; i++) {
  40                 if(voff < paddr + sp_banks[i].num_bytes) {
  41                         /* This matches. */
  42                         return sp_banks[i].base_addr + voff - paddr;
  43                 } else
  44                         paddr += sp_banks[i].num_bytes;
  45         }
  46         /* Shit, gotta consult the MMU, this shouldn't happen... */
  47         printk("srmmu_virt_to_phys: SRMMU virt to phys translation failed, halting\n");
  48         halt();
  49 }               
  50 
  51 static inline unsigned long
  52 srmmu_phys_to_virt(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         int i;
  55         unsigned long offset = PAGE_OFFSET;
  56 
  57         for (i=0; sp_banks[i].num_bytes != 0; i++)
  58         {
  59                 if (paddr >= sp_banks[i].base_addr &&
  60                     paddr < (sp_banks[i].base_addr
  61                              + sp_banks[i].num_bytes)) {
  62                         return (paddr - sp_banks[i].base_addr) + offset;
  63                 } else
  64                         offset += sp_banks[i].num_bytes;
  65         }
  66         printk("srmmu_phys_to_virt: Could not make translation, halting...\n");
  67         halt();
  68 }
  69 
  70 unsigned long
  71 srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73         return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
  74 }
  75 
  76 unsigned long 
  77 srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         unsigned long page;
  80 
  81         page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  82         return srmmu_phys_to_virt(page);
  83 }
  84 
  85 unsigned long
  86 srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         unsigned long page;
  89 
  90         page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  91         return srmmu_phys_to_virt(page);
  92 }
  93 
  94 unsigned long 
  95 srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         unsigned long page;
  98 
  99         page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
 100         printk("srmmu_pte_page: page = %08lx\n", page);
 101         return srmmu_phys_to_virt(page);
 102 }
 103 
 104 int srmmu_pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 105 int srmmu_pte_present(pte_t pte)        { return pte_val(pte) & SRMMU_ET_PTE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 106 int srmmu_pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 107 void srmmu_pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 108 void srmmu_pte_reuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 109 {
 110   if(!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
 111     mem_map[MAP_NR(ptep)]++;
 112 }
 113 
 114 int srmmu_pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 115 int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 116 {
 117         return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
 118                 (srmmu_pmd_page(pmd) > high_memory);
 119 }
 120 
 121 int srmmu_pmd_present(pmd_t pmd)        { return pmd_val(pmd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
 122 int srmmu_pmd_inuse(pmd_t *pmdp)        { return mem_map[MAP_NR(pmdp)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 123 void srmmu_pmd_clear(pmd_t *pmdp)       { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 124 void srmmu_pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         if (!(mem_map[MAP_NR(pmdp)] & MAP_PAGE_RESERVED))
 127                 mem_map[MAP_NR(pmdp)]++;
 128 }
 129 
 130 int srmmu_pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 131 int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
 134                 (srmmu_pgd_page(pgd) > high_memory);
 135 }
 136 int srmmu_pgd_present(pgd_t pgd)        { return pgd_val(pgd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
 137 int srmmu_pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)] != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 138 void srmmu_pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 139 void srmmu_pgd_reuse(pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 140 {
 141   if (!(mem_map[MAP_NR(pgdp)] & MAP_PAGE_RESERVED))
 142     mem_map[MAP_NR(pgdp)]++;
 143 }
 144 
 145 /*
 146  * The following only work if pte_present() is true.
 147  * Undefined behaviour if not..
 148  */
 149 int srmmu_pte_read(pte_t pte)           { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 150 int srmmu_pte_write(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
     /* [previous][next][first][last][top][bottom][index][help] */
 151 int srmmu_pte_exec(pte_t pte)           { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
     /* [previous][next][first][last][top][bottom][index][help] */
 152 int srmmu_pte_dirty(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 153 int srmmu_pte_young(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 154 int srmmu_pte_cow(pte_t pte)            { return pte_val(pte) & _SRMMU_PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 155 
 156 /* When we change permissions, we first clear all bits in the ACCESS field
 157  * then apply the wanted bits.
 158  */
 159 pte_t srmmu_pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 160 pte_t srmmu_pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 161 pte_t srmmu_pte_exprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 162 pte_t srmmu_pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 163 pte_t srmmu_pte_mkold(pte_t pte)        { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 164 pte_t srmmu_pte_uncow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 165 pte_t srmmu_pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 166 pte_t srmmu_pte_mkread(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 167 pte_t srmmu_pte_mkexec(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 168 pte_t srmmu_pte_mkdirty(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 169 pte_t srmmu_pte_mkyoung(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 170 pte_t srmmu_pte_mkcow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 171 
 172 /*
 173  * Conversion functions: convert a page and protection to a page entry,
 174  * and a page entry and page directory to the page they refer to.
 175  */
 176 pte_t
 177 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         pte_t pte;
 180 
 181         if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
 182         page = (srmmu_virt_to_phys(page) >> SRMMU_PTE_PPN_PADDR_SHIFT);
 183         pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
 184         pte_val(pte) |= pgprot_val(pgprot);
 185         return pte;
 186 }
 187 
 188 void
 189 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         unsigned long page = (unsigned long) pmdp;
 192 
 193         page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 194 
 195         pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 196 }
 197 
 198 void
 199 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201         unsigned long page = (unsigned long) ptep;
 202 
 203         page = (srmmu_virt_to_phys(page) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 204 
 205         pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 206 }
 207 
 208 pte_t
 209 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
 212         return pte;
 213 }
 214 
 215 /* to find an entry in a top-level page table... */
 216 pgd_t *
 217 srmmu_pgd_offset(struct task_struct * tsk, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219         return ((pgd_t *) tsk->tss.pgd_ptr) +
 220                 ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 221 }
 222 
 223 /* Find an entry in the second-level page table.. */
 224 pmd_t *
 225 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         return ((pmd_t *) pgd_page(*dir)) +
 228                 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 229 }
 230 
 231 /* Find an entry in the third-level page table.. */ 
 232 pte_t *
 233 srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235         return ((pte_t *) pmd_page(*dir)) +
 236                 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 237 }
 238 
 239 /* This must update the context register for this process. */
 240 void
 241 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir) 
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         /* See if this process has a context entry already, like after execve() */
 244         if(tsk->tss.context != -1) {
 245                 pgd_t *ctable_ptr = 0;
 246                 ctable_ptr = (pgd_t *) srmmu_phys_to_virt(srmmu_get_ctable_ptr());
 247                 ctable_ptr += tsk->tss.context;
 248                 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
 249                 /* Should flush caches here too... */
 250                 srmmu_flush_whole_tlb();
 251         }
 252 
 253         tsk->tss.pgd_ptr = (unsigned long) pgdir;
 254 
 255         return;
 256 }
 257 
 258 /*
 259  * Allocate and free page tables. The xxx_kernel() versions are
 260  * used to allocate a kernel page table - this turns on ASN bits
 261  * if any, and marks the page tables reserved.
 262  */
 263 void
 264 srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 265 {
 266         mem_map[MAP_NR(pte)] = 1;
 267         free_page((unsigned long) pte);
 268 }
 269 
 270 pte_t *
 271 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 {
 273         pte_t *page;
 274 
 275         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 276         if (srmmu_pmd_none(*pmd)) {
 277                 page = (pte_t *) get_free_page(GFP_KERNEL);
 278                 if (srmmu_pmd_none(*pmd)) {
 279                         if (page) {
 280                                 srmmu_pmd_set(pmd, page);
 281                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 282                                 return page + address;
 283                         }
 284                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 285                         return NULL;
 286                 }
 287                 free_page((unsigned long) page);
 288         }
 289         if (srmmu_pmd_bad(*pmd)) {
 290                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 291                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 292                 return NULL;
 293         }
 294         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 295 }
 296 
 297 /* Full three level on SRMMU */
 298 void
 299 srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         mem_map[MAP_NR(pmd)] = 1;
 302         free_page((unsigned long) pmd);
 303 }
 304 
 305 pmd_t *
 306 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 307 {
 308         pmd_t *page;
 309 
 310         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 311         if (srmmu_pgd_none(*pgd)) {
 312                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 313                 if (srmmu_pgd_none(*pgd)) {
 314                         if (page) {
 315                                 srmmu_pgd_set(pgd, page);
 316                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 317                                 return page + address;
 318                         }
 319                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 320                         return NULL;
 321                 }
 322                 free_page((unsigned long) page);
 323         }
 324         if (srmmu_pgd_bad(*pgd)) {
 325                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 326                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 327                 return NULL;
 328         }
 329         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 330 }
 331 
 332 void
 333 srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 334 {
 335         free_page((unsigned long) pte);
 336 }
 337 
 338 pte_t *
 339 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         pte_t *page;
 342 
 343         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 344         if (srmmu_pmd_none(*pmd)) {
 345                 page = (pte_t *) get_free_page(GFP_KERNEL);
 346                 if (srmmu_pmd_none(*pmd)) {
 347                         if (page) {
 348                                 srmmu_pmd_set(pmd, page);
 349                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 350                                 return page + address;
 351                         }
 352                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 353                         return NULL;
 354                 }
 355                 free_page((unsigned long) page);
 356         }
 357         if (srmmu_pmd_bad(*pmd)) {
 358                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 359                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 360                 return NULL;
 361         }
 362         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 363 }
 364 
 365 /*
 366  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 367  * inside the pgd, so has no extra memory associated with it.
 368  */
 369 void 
 370 srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 371 {
 372         free_page((unsigned long) pmd);
 373 }
 374 
 375 pmd_t *
 376 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 377 {
 378         pmd_t *page;
 379 
 380         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 381         if (srmmu_pgd_none(*pgd)) {
 382                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 383                 if (srmmu_pgd_none(*pgd)) {
 384                         if (page) {
 385                                 srmmu_pgd_set(pgd, page);
 386                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 387                                 return page + address;
 388                         }
 389                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 390                         return NULL;
 391                 }
 392                 free_page((unsigned long) page);
 393         }
 394         if (srmmu_pgd_bad(*pgd)) {
 395                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 396                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 397                 return NULL;
 398         }
 399         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 400 }
 401 
 402 void
 403 srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         free_page((unsigned long) pgd);
 406 }
 407 
 408 /* A page directory on the srmmu needs 1k, but for now to simplify the
 409  * alignment constraints and allocation we just grab a whole page.
 410  */
 411 
 412 pgd_t *
 413 srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 414 {
 415         return (pgd_t *) get_free_page(GFP_KERNEL);
 416 }
 417 
 418 /* Just flush the whole thing for now. We will need module
 419  * specific invalidate routines in certain circumstances,
 420  * because of different flushing facilities and hardware
 421  * bugs.
 422  */
 423 void
 424 srmmu_invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 425 {
 426         srmmu_flush_whole_tlb();
 427         return;
 428 }
 429 
 430 /* XXX Needs to be written */
 431 void
 432 srmmu_switch_to_context(int context)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434         printk("switching to context %d\n", context);
 435 
 436         return;
 437 }
 438 
 439 /* Low level IO area allocation on the SRMMU.
 440  *
 441  * I think we can get away with just using a regular page translation,
 442  * just making sure the cacheable bit is off.  I would like to avoid
 443  * having to mess with the IOMMU if at all possible at first.
 444  *
 445  * Aparently IOMMU is only necessary for SBus devices, maybe VME too.
 446  * We'll see...
 447  */
 448 void
 449 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 450                 int bus_type, int rdonly)
 451 {
 452   pgd_t *pgdp;
 453   pmd_t *pmdp;
 454   pte_t *ptep;
 455 
 456   pgdp = srmmu_pgd_offset(&init_task, virt_addr);
 457   pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 458   ptep = srmmu_pte_offset(pmdp, virt_addr);
 459   pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
 460 
 461   if(!rdonly)
 462           pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
 463   else
 464           pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
 465 
 466   pte_val(*ptep) |= (bus_type << 28);
 467   pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK); /* Make sure cacheable bit is off. */
 468   srmmu_flush_whole_tlb();
 469   flush_ei_ctx(0x0);
 470 
 471   return;
 472 }
 473 
 474 /* Perfom a some soft of MMU tablewalk.
 475  * Long contiguous mappings are not supported (yet ?).
 476  *
 477  * Origionally written by Peter Zaitcev, modified by David S.
 478  * Miller.  This is only used to copy over the PROM/KADB mappings
 479  * in srmmu_paging_init().
 480  *
 481  * The return value encodes at what level the entry was found,
 482  * basically this is found in the lower 2 bits of the return
 483  * value.  If the return value is zero, there was no valid mapping
 484  * found at all, the low bits for a non-zero return value
 485  * are:
 486  *         0 -- Level 1 PTE
 487  *         1 -- Level 2 PTE
 488  *         2 -- Normal level 3 PTE
 489  *         3 -- Context Table PTE (unlikely, but still)
 490  * 
 491  * Also note that this is called before the context table pointer
 492  * register is changed, so the PROMs entry is still in there.  Also,
 493  * it is safe to assume that the context 0 contains the mappings.
 494  */
 495 /* TODO chop out 'trace' when stable */
 496 unsigned int
 497 srmmu_init_twalk(unsigned virt, int trace)
     /* [previous][next][first][last][top][bottom][index][help] */
 498 {
 499         unsigned int wh, root;
 500 
 501         root = (unsigned int) srmmu_get_ctable_ptr();
 502         if(trace) printk(":0x%x >> ", virt);
 503 
 504         if(trace) printk(" 0x%x :", root);
 505         wh = ldw_sun4m_bypass(root);
 506         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 507                 if(trace) printk("\n");
 508                 return 0;
 509         }
 510         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 511                 wh &= ~SRMMU_PTE_ET_MASK;
 512                 wh |= 0x3;
 513                 if(trace) printk("\n");
 514                 printk("AIEEE context table level pte prom mapping!\n");
 515                 prom_halt();
 516                 return 0;
 517         }
 518                 
 519         if(trace) printk(" 0x%x .", wh);
 520         wh = ldw_sun4m_bypass(
 521                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 522                               + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
 523 
 524         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 525                 if(trace) printk("\n");
 526                 return 0;
 527         }
 528         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 529                 wh &= ~SRMMU_PTE_ET_MASK;
 530                 if(trace) printk("\n");
 531                 return wh;
 532         }
 533 
 534         if(trace) printk(" 0x%x .", wh);
 535         wh = ldw_sun4m_bypass(
 536                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 537                               + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
 538         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 539                 if(trace) printk("\n");
 540                 return 0;
 541         }
 542         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 543                 wh &= ~SRMMU_PTE_ET_MASK;
 544                 wh |= 0x1;
 545                 if(trace) printk("\n");
 546                 return wh;
 547         }
 548 
 549         if(trace) printk(" 0x%x .", wh);
 550         wh = ldw_sun4m_bypass(
 551                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 552                               + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
 553         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 554                 if(trace) printk("\n");
 555                 return 0;
 556         }
 557         if(trace) printk(" 0x%x\n", wh);
 558         return wh;
 559 }
 560 
 561 
 562 /* Allocate a block of RAM which is aligned to its size.
 563  * This procedure can be used until the call to mem_init().
 564  *
 565  * To get around the elf bootloader nastyness we have a
 566  * early-on page table pool allocation area starting at
 567  * C_LABEL(pg0) which is 256k, this should be enough for now.
 568  */
 569 static void *
 570 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         register unsigned mask = size - 1;
 573         register unsigned long ret;
 574 
 575         if(size==0) return 0x0;
 576         if(size & mask) {
 577                 printk("panic: srmmu_init_alloc botch\n");
 578                 prom_halt();
 579         }
 580         ret = (*kbrk + mask) & ~mask;
 581         *kbrk = ret + size;
 582         memset((void*) ret, 0, size);
 583         return (void*) ret;
 584 }
 585 
 586 extern unsigned long srmmu_data_fault, srmmu_text_fault;
 587 
 588 /* Patch in the SRMMU fault handlers for the trap table. */
 589 void
 590 srmmu_patch_fhandlers(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 591 {
 592         /* Say the following ten times fast... */
 593         sparc_ttable[SP_TRAP_TFLT].inst_one = SPARC_MOV_CONST_L3(0x1);
 594         sparc_ttable[SP_TRAP_TFLT].inst_two =
 595                 SPARC_BRANCH((unsigned long) &srmmu_text_fault, 
 596                              (unsigned long) &sparc_ttable[SP_TRAP_TFLT].inst_two);
 597         sparc_ttable[SP_TRAP_TFLT].inst_three = SPARC_RD_PSR_L0;
 598         sparc_ttable[SP_TRAP_TFLT].inst_four = SPARC_NOP;
 599 
 600         sparc_ttable[SP_TRAP_DFLT].inst_one = SPARC_MOV_CONST_L3(0x9);
 601         sparc_ttable[SP_TRAP_DFLT].inst_two =
 602                 SPARC_BRANCH((unsigned long) &srmmu_data_fault,
 603                              (unsigned long) &sparc_ttable[SP_TRAP_DFLT].inst_two);
 604         sparc_ttable[SP_TRAP_DFLT].inst_three = SPARC_RD_PSR_L0;
 605         sparc_ttable[SP_TRAP_DFLT].inst_four = SPARC_NOP;
 606 
 607         return;
 608 }
 609 
 610 /* Paging initialization on the Sparc Reference MMU. */
 611 
 612 /* This is all poorly designed, we cannot assume any pages are valid
 613  * past _end until *after* this routine runs, thus we can't use the
 614  * start_mem mechanism during initialization...
 615  */
 616 static unsigned long mempool;
 617 
 618 /* The following is global because trap_init needs it to fire up
 619  * the other cpu's on multiprocessors.
 620  */
 621 pgd_t *lnx_root;      /* Pointer to the new root table */
 622 
 623 extern char start[];
 624 
 625 unsigned long
 626 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628         unsigned long vaddr;  /* Virtual counter */
 629         int i;
 630 
 631         pte_t *ptep = 0;
 632         pmd_t *pmdp = 0;
 633         pgd_t *pgdp = 0;
 634 
 635         mempool = start_mem;
 636         lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
 637 
 638         memset(swapper_pg_dir, 0, PAGE_SIZE);
 639 
 640         /* For every entry in the new Linux context table, put in
 641          * an entry which points to swapper_pg_dir .
 642          */
 643         pmdp = (pmd_t *) swapper_pg_dir;
 644         for(i = 0; i < num_contexts; i++)
 645                 srmmu_pgd_set(&lnx_root[i], pmdp);
 646 
 647         /* Make Linux physical page tables. */
 648         for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
 649                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 650                 if(srmmu_pgd_none(*pgdp)) {
 651                         pmdp = srmmu_init_alloc(&mempool,
 652                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 653                         srmmu_pgd_set(pgdp, pmdp);
 654                 }
 655 
 656                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 657                 if(srmmu_pmd_none(*pmdp)) {
 658                         ptep = srmmu_init_alloc(&mempool,
 659                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 660                         srmmu_pmd_set(pmdp, ptep);
 661                 }
 662 
 663                 ptep = srmmu_pte_offset(pmdp, vaddr);
 664                 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
 665         }
 666 
 667         /* Map IO areas. */
 668         for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
 669             vaddr += SRMMU_PMD_SIZE) {
 670                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 671                 if(srmmu_pgd_none(*pgdp)) {
 672                         pmdp = srmmu_init_alloc(&mempool,
 673                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 674                         srmmu_pgd_set(pgdp, pmdp);
 675                 }
 676                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 677                 if(srmmu_pmd_none(*pmdp)) {
 678                         ptep = srmmu_init_alloc(&mempool,
 679                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 680                         srmmu_pmd_set(pmdp, ptep);
 681                 }
 682         }
 683 
 684         /* Map in the PERCPU areas in virtual address space. */
 685         printk("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
 686                (PERCPU_VADDR + PERCPU_LEN));
 687         for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
 688             vaddr += PERCPU_ENTSIZE) {
 689                 pgdp = srmmu_pgd_offset(&init_task, vaddr);
 690                 if(srmmu_pgd_none(*pgdp)) {
 691                         pmdp = srmmu_init_alloc(&mempool,
 692                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 693                         srmmu_pgd_set(pgdp, pmdp);
 694                 }
 695                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 696                 if(srmmu_pmd_none(*pmdp)) {
 697                         ptep = srmmu_init_alloc(&mempool,
 698                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 699                         srmmu_pmd_set(pmdp, ptep);
 700                 }
 701                 ptep = srmmu_pte_offset(pmdp, vaddr);
 702                 /* Per-cpu trap table page. */
 703                 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
 704                 /* Per-cpu kernel stack page. */
 705                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 706                                        SRMMU_PAGE_KERNEL);
 707                 /* Per-cpu Prom MBox. */
 708                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 709                                        SRMMU_PAGE_KERNEL);
 710                 /* Per-cpu state variables. */
 711                 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 712                                      SRMMU_PAGE_KERNEL);
 713         }
 714         percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
 715 
 716         /* Ugh, have to map DVMA that the prom has mapped too or else
 717          * you will lose with video cards when we take over the ctx table.
 718          * Also, must take into consideration that prom might be using level
 719          * two or one PTE's. TODO
 720          */
 721         for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
 722                 unsigned int prom_pte;
 723 
 724                 prom_pte = srmmu_init_twalk(vaddr, 0);
 725 
 726                 if(prom_pte) {
 727                         pgdp = srmmu_pgd_offset(&init_task, vaddr);
 728                         if((prom_pte&0x3) == 0x0) {
 729                                 prom_pte &= ~0x3;
 730                                 prom_pte |= SRMMU_ET_PTE;
 731                                 pgd_val(*pgdp) = prom_pte;
 732                                 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
 733                                 continue;
 734                         }
 735                         if(srmmu_pgd_none(*pgdp)) {
 736                                 pmdp = srmmu_init_alloc(&mempool,
 737                                                         SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 738                                 srmmu_pgd_set(pgdp, pmdp);
 739                         }
 740 
 741                         pmdp = srmmu_pmd_offset(pgdp, vaddr);
 742                         if((prom_pte&0x3) == 0x1) {
 743                                 prom_pte &= ~0x3;
 744                                 prom_pte |= SRMMU_ET_PTE;
 745                                 pgd_val(*pgdp) = prom_pte;
 746                                 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
 747                                 continue;
 748                         }
 749                         if(srmmu_pmd_none(*pmdp)) {
 750                                 ptep = srmmu_init_alloc(&mempool,
 751                                                         SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 752                                 srmmu_pmd_set(pmdp, ptep);
 753                         }
 754                         /* A normal 3rd level PTE, no need to change ET bits. */
 755                         ptep = srmmu_pte_offset(pmdp, vaddr);
 756                         pte_val(*ptep) = prom_pte;
 757 
 758                 }
 759                 vaddr += PAGE_SIZE;
 760         }
 761 
 762         /* I believe I do not need to flush VAC here since my stores  */
 763         /* probably already reached the physical RAM.             --P3 */
 764 
 765         /* We probably do, and should do it just to be safe... -Davem */
 766 
 767         /* Take the MMU over from the PROM */
 768         printk("Taking over MMU from PROM.\n");
 769 
 770         srmmu_set_ctable_ptr(srmmu_virt_to_phys((unsigned)lnx_root));
 771 
 772         srmmu_flush_whole_tlb();
 773 
 774         /* Now it is ok to use memory at start_mem. */
 775         start_mem = PAGE_ALIGN(mempool);
 776         start_mem = free_area_init(start_mem, end_mem);
 777         start_mem = PAGE_ALIGN(start_mem);
 778 
 779 #if 0
 780         printk("Testing context switches...\n");
 781         for(i=0; i<num_contexts; i++)
 782                 srmmu_set_context(i);
 783         printk("done...\n");
 784         srmmu_set_context(0);
 785 #endif
 786 
 787         printk("survived...\n");
 788         return start_mem;
 789 }
 790 
 791 /* Test the WP bit on the Sparc Reference MMU. */
 792 void
 793 srmmu_test_wp(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 794 {
 795         pgd_t *pgdp;
 796         pmd_t *pmdp;
 797         pte_t *ptep;
 798         
 799         wp_works_ok = -1;
 800         /* We mapped page zero as a read-only page in paging_init()
 801          * So fire up the test, then invalidate the pgd for page zero.
 802          * It is no longer needed.
 803          */
 804 
 805         /* Let it rip... */
 806         __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
 807         if (wp_works_ok < 0)
 808                 wp_works_ok = 0;
 809 
 810         pgdp = srmmu_pgd_offset(&init_task, 0x0);
 811         pgd_val(*pgdp) = 0x0;
 812 
 813         return;
 814 }
 815 
 816 /* Load up routines and constants for sun4m mmu */
 817 void
 818 ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 819 {
 820         printk("Loading srmmu MMU routines\n");
 821 
 822         /* First the constants */
 823         pmd_shift = SRMMU_PMD_SHIFT;
 824         pmd_size = SRMMU_PMD_SIZE;
 825         pmd_mask = SRMMU_PMD_MASK;
 826         pgdir_shift = SRMMU_PGDIR_SHIFT;
 827         pgdir_size = SRMMU_PGDIR_SIZE;
 828         pgdir_mask = SRMMU_PGDIR_MASK;
 829 
 830         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
 831         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
 832         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
 833 
 834         page_none = SRMMU_PAGE_NONE;
 835         page_shared = SRMMU_PAGE_SHARED;
 836         page_copy = SRMMU_PAGE_COPY;
 837         page_readonly = SRMMU_PAGE_READONLY;
 838         page_kernel = SRMMU_PAGE_KERNEL;
 839         page_invalid = SRMMU_PAGE_INVALID;
 840         
 841         /* Functions */
 842         invalidate = srmmu_invalidate;
 843         switch_to_context = srmmu_switch_to_context;
 844         pmd_align = srmmu_pmd_align;
 845         pgdir_align = srmmu_pgdir_align;
 846         vmalloc_start = srmmu_vmalloc_start;
 847 
 848         pte_page = srmmu_pte_page;
 849         pmd_page = srmmu_pmd_page;
 850         pgd_page = srmmu_pgd_page;
 851 
 852         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
 853 
 854         pte_none = srmmu_pte_none;
 855         pte_present = srmmu_pte_present;
 856         pte_inuse = srmmu_pte_inuse;
 857         pte_clear = srmmu_pte_clear;
 858         pte_reuse = srmmu_pte_reuse;
 859 
 860         pmd_none = srmmu_pmd_none;
 861         pmd_bad = srmmu_pmd_bad;
 862         pmd_present = srmmu_pmd_present;
 863         pmd_inuse = srmmu_pmd_inuse;
 864         pmd_clear = srmmu_pmd_clear;
 865         pmd_reuse = srmmu_pmd_reuse;
 866 
 867         pgd_none = srmmu_pgd_none;
 868         pgd_bad = srmmu_pgd_bad;
 869         pgd_present = srmmu_pgd_present;
 870         pgd_inuse = srmmu_pgd_inuse;
 871         pgd_clear = srmmu_pgd_clear;
 872         pgd_reuse = srmmu_pgd_reuse;
 873 
 874         mk_pte = srmmu_mk_pte;
 875         pgd_set = srmmu_pgd_set;  /* XXX needs a cast */
 876         pte_modify = srmmu_pte_modify;
 877         pgd_offset = srmmu_pgd_offset;
 878         pmd_offset = srmmu_pmd_offset;
 879         pte_offset = srmmu_pte_offset;
 880         pte_free_kernel = srmmu_pte_free_kernel;
 881         pmd_free_kernel = srmmu_pmd_free_kernel;
 882         pte_alloc_kernel = srmmu_pte_alloc_kernel;
 883         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
 884         pte_free = srmmu_pte_free;
 885         pte_alloc = srmmu_pte_alloc;
 886         pmd_free = srmmu_pmd_free;
 887         pmd_alloc = srmmu_pmd_alloc;
 888         pgd_free = srmmu_pgd_free;
 889         pgd_alloc = srmmu_pgd_alloc;
 890 
 891         pte_read = srmmu_pte_read;
 892         pte_write = srmmu_pte_write;
 893         pte_exec = srmmu_pte_exec;
 894         pte_dirty = srmmu_pte_dirty;
 895         pte_young = srmmu_pte_young;
 896         pte_cow = srmmu_pte_cow;
 897         pte_wrprotect = srmmu_pte_wrprotect;
 898         pte_rdprotect = srmmu_pte_rdprotect;
 899         pte_exprotect = srmmu_pte_exprotect;
 900         pte_mkclean = srmmu_pte_mkclean;
 901         pte_mkold = srmmu_pte_mkold;
 902         pte_uncow = srmmu_pte_uncow;
 903         pte_mkwrite = srmmu_pte_mkwrite;
 904         pte_mkread = srmmu_pte_mkread;
 905         pte_mkexec = srmmu_pte_mkexec;
 906         pte_mkdirty = srmmu_pte_mkdirty;
 907         pte_mkyoung = srmmu_pte_mkyoung;
 908         pte_mkcow = srmmu_pte_mkcow;
 909 
 910         return;
 911 }
 912 

/* [previous][next][first][last][top][bottom][index][help] */