root/arch/sparc/mm/srmmu.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. srmmu_pmd_align
  2. srmmu_pgdir_align
  3. srmmu_vmalloc_start
  4. srmmu_pmd_page
  5. srmmu_pgd_page
  6. srmmu_pte_page
  7. srmmu_pte_none
  8. srmmu_pte_present
  9. srmmu_pte_inuse
  10. srmmu_pte_clear
  11. srmmu_pte_reuse
  12. srmmu_pmd_none
  13. srmmu_pmd_bad
  14. srmmu_pmd_present
  15. srmmu_pmd_inuse
  16. srmmu_pmd_clear
  17. srmmu_pmd_reuse
  18. srmmu_pgd_none
  19. srmmu_pgd_bad
  20. srmmu_pgd_present
  21. srmmu_pgd_inuse
  22. srmmu_pgd_clear
  23. srmmu_pgd_reuse
  24. srmmu_pte_read
  25. srmmu_pte_write
  26. srmmu_pte_exec
  27. srmmu_pte_dirty
  28. srmmu_pte_young
  29. srmmu_pte_cow
  30. srmmu_pte_wrprotect
  31. srmmu_pte_rdprotect
  32. srmmu_pte_exprotect
  33. srmmu_pte_mkclean
  34. srmmu_pte_mkold
  35. srmmu_pte_uncow
  36. srmmu_pte_mkwrite
  37. srmmu_pte_mkread
  38. srmmu_pte_mkexec
  39. srmmu_pte_mkdirty
  40. srmmu_pte_mkyoung
  41. srmmu_pte_mkcow
  42. srmmu_mk_pte
  43. srmmu_pgd_set
  44. srmmu_pmd_set
  45. srmmu_pte_modify
  46. srmmu_pgd_offset
  47. srmmu_pmd_offset
  48. srmmu_pte_offset
  49. srmmu_update_rootmmu_dir
  50. srmmu_pte_free_kernel
  51. srmmu_pte_alloc_kernel
  52. srmmu_pmd_free_kernel
  53. srmmu_pmd_alloc_kernel
  54. srmmu_pte_free
  55. srmmu_pte_alloc
  56. srmmu_pmd_free
  57. srmmu_pmd_alloc
  58. srmmu_pgd_free
  59. srmmu_pgd_alloc
  60. srmmu_invalidate
  61. srmmu_set_pte
  62. srmmu_switch_to_context
  63. srmmu_mapioaddr
  64. srmmu_lockarea
  65. srmmu_unlockarea
  66. srmmu_get_scsi_buffer
  67. srmmu_release_scsi_buffer
  68. srmmu_init_twalk
  69. srmmu_init_alloc
  70. srmmu_get_fault_info
  71. srmmu_paging_init
  72. srmmu_test_wp
  73. srmmu_update_mmu_cache
  74. srmmu_fork_hook
  75. srmmu_exit_hook
  76. srmmu_release_hook
  77. srmmu_flush_hook
  78. srmmu_task_cacheflush
  79. ld_mmu_srmmu

   1 /* $Id: srmmu.c,v 1.22 1995/11/25 00:59:33 davem Exp $
   2  * srmmu.c:  SRMMU specific routines for memory management.
   3  *
   4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5  * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@lab.ipmce.su)
   6  */
   7 
   8 #include <linux/kernel.h>  /* for printk */
   9 
  10 #include <asm/page.h>
  11 #include <asm/pgtable.h>
  12 #include <asm/kdebug.h>
  13 #include <asm/vaddrs.h>
  14 #include <asm/traps.h>
  15 #include <asm/mp.h>
  16 #include <asm/cache.h>
  17 #include <asm/oplib.h>
  18 
  19 extern unsigned long free_area_init(unsigned long, unsigned long);
  20 
  21 unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  22 unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  23 
  24 unsigned long
  25 srmmu_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  26 {
  27         return ((high_memory + SRMMU_VMALLOC_OFFSET) & ~(SRMMU_VMALLOC_OFFSET-1));
  28 }
  29 
  30 unsigned long 
  31 srmmu_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         unsigned long page;
  34 
  35         page = (pmd_val(pmd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  36         return (page + PAGE_OFFSET);
  37 }
  38 
  39 unsigned long
  40 srmmu_pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
  41 {
  42         unsigned long page;
  43 
  44         page = (pgd_val(pgd) & (SRMMU_PTD_PTP_MASK)) << SRMMU_PTD_PTP_PADDR_SHIFT;
  45         return (page + PAGE_OFFSET);
  46 }
  47 
  48 unsigned long 
  49 srmmu_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
  50 {
  51         unsigned long page;
  52 
  53         page = (pte_val(pte) & (SRMMU_PTE_PPN_MASK)) << SRMMU_PTE_PPN_PADDR_SHIFT;
  54         return (page + PAGE_OFFSET);
  55 }
  56 
  57 int srmmu_pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
  58 int srmmu_pte_present(pte_t pte)        { return pte_val(pte) & SRMMU_ET_PTE; }
     /* [previous][next][first][last][top][bottom][index][help] */
  59 int srmmu_pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
  60 void srmmu_pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  61 void srmmu_pte_reuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
  62 {
  63   if(!mem_map[MAP_NR(ptep)].reserved)
  64     mem_map[MAP_NR(ptep)].count++;
  65 }
  66 
  67 int srmmu_pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
  68 int srmmu_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         return ((pmd_val(pmd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
  71                 (srmmu_pmd_page(pmd) > high_memory);
  72 }
  73 
  74 int srmmu_pmd_present(pmd_t pmd)        { return pmd_val(pmd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
  75 int srmmu_pmd_inuse(pmd_t *pmdp)        { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
  76 void srmmu_pmd_clear(pmd_t *pmdp)       { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  77 void srmmu_pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         if (!mem_map[MAP_NR(pmdp)].reserved)
  80                 mem_map[MAP_NR(pmdp)].count++;
  81 }
  82 
  83 int srmmu_pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
  84 int srmmu_pgd_bad(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
  85 {
  86         return ((pgd_val(pgd)&SRMMU_ET_PTDBAD)==SRMMU_ET_PTDBAD) ||
  87                 (srmmu_pgd_page(pgd) > high_memory);
  88 }
  89 int srmmu_pgd_present(pgd_t pgd)        { return pgd_val(pgd) & SRMMU_ET_PTD; }
     /* [previous][next][first][last][top][bottom][index][help] */
  90 int srmmu_pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
  91 void srmmu_pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  92 void srmmu_pgd_reuse(pgd_t *pgdp)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94   if (!mem_map[MAP_NR(pgdp)].reserved)
  95     mem_map[MAP_NR(pgdp)].count++;
  96 }
  97 
  98 /*
  99  * The following only work if pte_present() is true.
 100  * Undefined behaviour if not..
 101  */
 102 int srmmu_pte_read(pte_t pte)           { return (pte_val(pte) & _SRMMU_PAGE_RDONLY) || (pte_val(pte) & _SRMMU_PAGE_WRITE_USR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 103 int srmmu_pte_write(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_WRITE_USR; }
     /* [previous][next][first][last][top][bottom][index][help] */
 104 int srmmu_pte_exec(pte_t pte)           { return pte_val(pte) & _SRMMU_PAGE_EXEC; }
     /* [previous][next][first][last][top][bottom][index][help] */
 105 int srmmu_pte_dirty(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 106 int srmmu_pte_young(pte_t pte)          { return pte_val(pte) & _SRMMU_PAGE_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 107 int srmmu_pte_cow(pte_t pte)            { return pte_val(pte) & _SRMMU_PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 108 
 109 /* When we change permissions, we first clear all bits in the ACCESS field
 110  * then apply the wanted bits.
 111  */
 112 pte_t srmmu_pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 113 pte_t srmmu_pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_NOREAD; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 114 pte_t srmmu_pte_exprotect(pte_t pte)    { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 115 pte_t srmmu_pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 116 pte_t srmmu_pte_mkold(pte_t pte)        { pte_val(pte) &= ~_SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 117 pte_t srmmu_pte_uncow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_UNCOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 118 pte_t srmmu_pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_WRITE_USR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 119 pte_t srmmu_pte_mkread(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_RDONLY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 120 pte_t srmmu_pte_mkexec(pte_t pte)       { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_EXEC; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 121 pte_t srmmu_pte_mkdirty(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 122 pte_t srmmu_pte_mkyoung(pte_t pte)      { pte_val(pte) |= _SRMMU_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 123 pte_t srmmu_pte_mkcow(pte_t pte)        { pte_val(pte) &= ~SRMMU_PTE_ACC_MASK; pte_val(pte) |= _SRMMU_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 124 
 125 /*
 126  * Conversion functions: convert a page and protection to a page entry,
 127  * and a page entry and page directory to the page they refer to.
 128  */
 129 pte_t
 130 srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 131 {
 132         pte_t pte;
 133 
 134         if(page & (~PAGE_MASK)) panic("srmmu_mk_pte() called with unaligned page");
 135         page = ((page - PAGE_OFFSET) >> SRMMU_PTE_PPN_PADDR_SHIFT);
 136         pte_val(pte) = (page & SRMMU_PTE_PPN_MASK);
 137         pte_val(pte) |= pgprot_val(pgprot);
 138         return pte;
 139 }
 140 
 141 void
 142 srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         unsigned long page = (unsigned long) pmdp;
 145 
 146         page = ((page - PAGE_OFFSET) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 147 
 148         pgd_val(*pgdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 149 }
 150 
 151 void
 152 srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 153 {
 154         unsigned long page = (unsigned long) ptep;
 155 
 156         page = ((page - PAGE_OFFSET) >> SRMMU_PTD_PTP_PADDR_SHIFT);
 157 
 158         pmd_val(*pmdp) = ((page & SRMMU_PTD_PTP_MASK) | SRMMU_ET_PTD);
 159 }
 160 
 161 pte_t
 162 srmmu_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164         pte_val(pte) = (pte_val(pte) & (~SRMMU_PTE_ACC_MASK)) | pgprot_val(newprot);
 165         return pte;
 166 }
 167 
 168 /* to find an entry in a top-level page table... */
 169 pgd_t *
 170 srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 171 {
 172         return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
 173 }
 174 
 175 /* Find an entry in the second-level page table.. */
 176 pmd_t *
 177 srmmu_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         return ((pmd_t *) pgd_page(*dir)) +
 180                 ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
 181 }
 182 
 183 /* Find an entry in the third-level page table.. */ 
 184 pte_t *
 185 srmmu_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 186 {
 187         return ((pte_t *) pmd_page(*dir)) +
 188                 ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
 189 }
 190 
 191 /* This must update the context register for this process. */
 192 void
 193 srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir) 
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         /* See if this process has a context entry already, like after execve() */
 196         if(tsk->tss.context != -1) {
 197                 pgd_t *ctable_ptr = 0;
 198                 ctable_ptr = (pgd_t *) (srmmu_get_ctable_ptr() + PAGE_OFFSET);
 199                 ctable_ptr += tsk->tss.context;
 200                 srmmu_pgd_set(ctable_ptr, (pmd_t *) pgdir);
 201                 /* Should flush caches here too... */
 202                 srmmu_flush_whole_tlb();
 203         }
 204 
 205         tsk->tss.pgd_ptr = (unsigned long) pgdir;
 206 
 207         return;
 208 }
 209 
 210 /*
 211  * Allocate and free page tables. The xxx_kernel() versions are
 212  * used to allocate a kernel page table - this turns on ASN bits
 213  * if any, and marks the page tables reserved.
 214  */
 215 void
 216 srmmu_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 217 {
 218         mem_map[MAP_NR(pte)].reserved = 0;
 219         free_page((unsigned long) pte);
 220 }
 221 
 222 pte_t *
 223 srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 224 {
 225         pte_t *page;
 226 
 227         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 228         if (srmmu_pmd_none(*pmd)) {
 229                 page = (pte_t *) get_free_page(GFP_KERNEL);
 230                 if (srmmu_pmd_none(*pmd)) {
 231                         if (page) {
 232                                 srmmu_pmd_set(pmd, page);
 233                                 mem_map[MAP_NR(page)].reserved = 1;
 234                                 return page + address;
 235                         }
 236                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 237                         return NULL;
 238                 }
 239                 free_page((unsigned long) page);
 240         }
 241         if (srmmu_pmd_bad(*pmd)) {
 242                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 243                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 244                 return NULL;
 245         }
 246         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 247 }
 248 
 249 /* Full three level on SRMMU */
 250 void
 251 srmmu_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 252 {
 253         mem_map[MAP_NR(pmd)].reserved = 0;
 254         free_page((unsigned long) pmd);
 255 }
 256 
 257 pmd_t *
 258 srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 259 {
 260         pmd_t *page;
 261 
 262         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 263         if (srmmu_pgd_none(*pgd)) {
 264                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 265                 if (srmmu_pgd_none(*pgd)) {
 266                         if (page) {
 267                                 srmmu_pgd_set(pgd, page);
 268                                 mem_map[MAP_NR(page)].reserved = 1;
 269                                 return page + address;
 270                         }
 271                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 272                         return NULL;
 273                 }
 274                 free_page((unsigned long) page);
 275         }
 276         if (srmmu_pgd_bad(*pgd)) {
 277                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 278                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 279                 return NULL;
 280         }
 281         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 282 }
 283 
 284 void
 285 srmmu_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 286 {
 287         free_page((unsigned long) pte);
 288 }
 289 
 290 pte_t *
 291 srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 292 {
 293         pte_t *page;
 294 
 295         address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
 296         if (srmmu_pmd_none(*pmd)) {
 297                 page = (pte_t *) get_free_page(GFP_KERNEL);
 298                 if (srmmu_pmd_none(*pmd)) {
 299                         if (page) {
 300                                 srmmu_pmd_set(pmd, page);
 301                                 return page + address;
 302                         }
 303                         srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 304                         return NULL;
 305                 }
 306                 free_page((unsigned long) page);
 307         }
 308         if (srmmu_pmd_bad(*pmd)) {
 309                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 310                 srmmu_pmd_set(pmd, (pte_t *) SRMMU_ET_PTDBAD);
 311                 return NULL;
 312         }
 313         return (pte_t *) srmmu_pmd_page(*pmd) + address;
 314 }
 315 
 316 /*
 317  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 318  * inside the pgd, so has no extra memory associated with it.
 319  */
 320 void 
 321 srmmu_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 322 {
 323         free_page((unsigned long) pmd);
 324 }
 325 
 326 pmd_t *
 327 srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 328 {
 329         pmd_t *page;
 330 
 331         address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
 332         if (srmmu_pgd_none(*pgd)) {
 333                 page = (pmd_t *) get_free_page(GFP_KERNEL);
 334                 if (srmmu_pgd_none(*pgd)) {
 335                         if (page) {
 336                                 srmmu_pgd_set(pgd, page);
 337                                 return page + address;
 338                         }
 339                         srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 340                         return NULL;
 341                 }
 342                 free_page((unsigned long) page);
 343         }
 344         if (srmmu_pgd_bad(*pgd)) {
 345                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 346                 srmmu_pgd_set(pgd, (pmd_t *) SRMMU_ET_PTDBAD);
 347                 return NULL;
 348         }
 349         return (pmd_t *) srmmu_pgd_page(*pgd) + address;
 350 }
 351 
 352 void
 353 srmmu_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 354 {
 355         free_page((unsigned long) pgd);
 356 }
 357 
 358 /* A page directory on the srmmu needs 1k, but for now to simplify the
 359  * alignment constraints and allocation we just grab a whole page.
 360  */
 361 
 362 pgd_t *
 363 srmmu_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 364 {
 365         return (pgd_t *) get_free_page(GFP_KERNEL);
 366 }
 367 
 368 /* Just flush the whole thing for now. We will need module
 369  * specific invalidate routines in certain circumstances,
 370  * because of different flushing facilities and hardware
 371  * bugs.
 372  */
 373 void
 374 srmmu_invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 375 {
 376         srmmu_flush_whole_tlb();
 377         return;
 378 }
 379 
 380 /* XXX Needs to be written */
 381 void srmmu_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383         /* More than this is needed. */
 384         *ptep = pteval;
 385 }
 386 
 387 /* XXX Needs to be written */
 388 void
 389 srmmu_switch_to_context(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
 390 {
 391         struct task_struct *tsk = vtask;
 392         printk("switching to context %d\n", tsk->tss.context);
 393 
 394         return;
 395 }
 396 
 397 /* Low level IO area allocation on the SRMMU.
 398  *
 399  * I think we can get away with just using a regular page translation,
 400  * just making sure the cacheable bit is off.  I would like to avoid
 401  * having to mess with the IOMMU if at all possible at first.
 402  *
 403  * Aparently IOMMU is only necessary for SBus devices, maybe VME too.
 404  * We'll see...
 405  */
 406 void
 407 srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 408                 int bus_type, int rdonly)
 409 {
 410   pgd_t *pgdp;
 411   pmd_t *pmdp;
 412   pte_t *ptep;
 413 
 414   pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
 415   pmdp = srmmu_pmd_offset(pgdp, virt_addr);
 416   ptep = srmmu_pte_offset(pmdp, virt_addr);
 417   pte_val(*ptep) = (physaddr >> SRMMU_PTE_PPN_PADDR_SHIFT) & SRMMU_PTE_PPN_MASK;
 418 
 419   if(!rdonly)
 420           pte_val(*ptep) |= (SRMMU_ACC_S_RDWREXEC | SRMMU_ET_PTE);
 421   else
 422           pte_val(*ptep) |= (SRMMU_ACC_S_RDEXEC | SRMMU_ET_PTE);
 423 
 424   pte_val(*ptep) |= (bus_type << 28);
 425   pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK); /* Make sure cacheable bit is off. */
 426   srmmu_flush_whole_tlb();
 427   flush_ei_ctx(0x0);
 428 
 429   return;
 430 }
 431 
 432 char *srmmu_lockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434         return vaddr;
 435 }
 436 
 437 void srmmu_unlockarea(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 438 {
 439 }
 440 
 441 char *srmmu_get_scsi_buffer(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 442 {
 443         panic("sun4m: get_scsi_buffer() not implemented yet.");
 444 }
 445 
 446 void srmmu_release_scsi_buffer(char *vaddr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 447 {
 448         panic("sun4m: release_scsi_buffer() not implemented yet.");
 449 }
 450 
 451 /* Perfom a some soft of MMU tablewalk.
 452  * Long contiguous mappings are not supported (yet ?).
 453  *
 454  * Origionally written by Peter Zaitcev, modified by David S.
 455  * Miller.  This is only used to copy over the PROM/KADB mappings
 456  * in srmmu_paging_init().
 457  *
 458  * The return value encodes at what level the entry was found,
 459  * basically this is found in the lower 2 bits of the return
 460  * value.  If the return value is zero, there was no valid mapping
 461  * found at all, the low bits for a non-zero return value
 462  * are:
 463  *         0 -- Level 1 PTE
 464  *         1 -- Level 2 PTE
 465  *         2 -- Normal level 3 PTE
 466  *         3 -- Context Table PTE (unlikely, but still)
 467  * 
 468  * Also note that this is called before the context table pointer
 469  * register is changed, so the PROMs entry is still in there.  Also,
 470  * it is safe to assume that the context 0 contains the mappings.
 471  */
 472 /* TODO chop out 'trace' when stable */
 473 unsigned int
 474 srmmu_init_twalk(unsigned virt, int trace)
     /* [previous][next][first][last][top][bottom][index][help] */
 475 {
 476         unsigned int wh, root;
 477 
 478         root = (unsigned int) srmmu_get_ctable_ptr();
 479         if(trace) printk(":0x%x >> ", virt);
 480 
 481         if(trace) printk(" 0x%x :", root);
 482         wh = ldw_sun4m_bypass(root);
 483         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 484                 if(trace) printk("\n");
 485                 return 0;
 486         }
 487         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 488                 wh &= ~SRMMU_PTE_ET_MASK;
 489                 wh |= 0x3;
 490                 if(trace) printk("\n");
 491                 printk("AIEEE context table level pte prom mapping!\n");
 492                 prom_halt();
 493                 return 0;
 494         }
 495                 
 496         if(trace) printk(" 0x%x .", wh);
 497         wh = ldw_sun4m_bypass(
 498                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 499                               + ((virt & SRMMU_IDX1_MASK) >> SRMMU_IDX1_SHIFT)*sizeof(pte_t));
 500 
 501         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 502                 if(trace) printk("\n");
 503                 return 0;
 504         }
 505         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 506                 wh &= ~SRMMU_PTE_ET_MASK;
 507                 if(trace) printk("\n");
 508                 return wh;
 509         }
 510 
 511         if(trace) printk(" 0x%x .", wh);
 512         wh = ldw_sun4m_bypass(
 513                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 514                               + ((virt & SRMMU_IDX2_MASK) >> SRMMU_IDX2_SHIFT)*sizeof(pte_t));
 515         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 516                 if(trace) printk("\n");
 517                 return 0;
 518         }
 519         if((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_PTE) {
 520                 wh &= ~SRMMU_PTE_ET_MASK;
 521                 wh |= 0x1;
 522                 if(trace) printk("\n");
 523                 return wh;
 524         }
 525 
 526         if(trace) printk(" 0x%x .", wh);
 527         wh = ldw_sun4m_bypass(
 528                               ((wh & SRMMU_PTD_PTP_MASK) << 4)
 529                               + ((virt & SRMMU_IDX3_MASK) >> SRMMU_IDX3_SHIFT)*sizeof(pte_t));
 530         if ((wh & SRMMU_PTE_ET_MASK) == SRMMU_ET_INVALID) {
 531                 if(trace) printk("\n");
 532                 return 0;
 533         }
 534         if(trace) printk(" 0x%x\n", wh);
 535         return wh;
 536 }
 537 
 538 
 539 /* Allocate a block of RAM which is aligned to its size.
 540  * This procedure can be used until the call to mem_init().
 541  *
 542  * To get around the elf bootloader nastyness we have a
 543  * early-on page table pool allocation area starting at
 544  * C_LABEL(pg0) which is 256k, this should be enough for now.
 545  */
 546 static void *
 547 srmmu_init_alloc(unsigned long *kbrk, unsigned size)
     /* [previous][next][first][last][top][bottom][index][help] */
 548 {
 549         register unsigned mask = size - 1;
 550         register unsigned long ret;
 551 
 552         if(size==0) return 0x0;
 553         if(size & mask) {
 554                 printk("panic: srmmu_init_alloc botch\n");
 555                 prom_halt();
 556         }
 557         ret = (*kbrk + mask) & ~mask;
 558         *kbrk = ret + size;
 559         memset((void*) ret, 0, size);
 560         return (void*) ret;
 561 }
 562 
 563 /* Get fault information on an SRMMU. */
 564 int
 565 srmmu_get_fault_info(unsigned long *address, unsigned long *error_code,
     /* [previous][next][first][last][top][bottom][index][help] */
 566                      unsigned long from_user)
 567 {
 568         /* XXX Foo, write this... XXX */
 569         return 0;
 570 }
 571 
 572 /* Paging initialization on the Sparc Reference MMU. */
 573 
 574 /* This is all poorly designed, we cannot assume any pages are valid
 575  * past _end until *after* this routine runs, thus we can't use the
 576  * start_mem mechanism during initialization...
 577  */
 578 static unsigned long mempool;
 579 
 580 /* The following is global because trap_init needs it to fire up
 581  * the other cpu's on multiprocessors.
 582  */
 583 pgd_t *lnx_root;      /* Pointer to the new root table */
 584 
 585 extern char start[];
 586 
 587 unsigned long
 588 srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 589 {
 590         unsigned long vaddr;  /* Virtual counter */
 591         int i;
 592 
 593         pte_t *ptep = 0;
 594         pmd_t *pmdp = 0;
 595         pgd_t *pgdp = 0;
 596 
 597         mempool = start_mem;
 598         lnx_root = srmmu_init_alloc(&mempool, num_contexts*sizeof(pgd_t));
 599 
 600         memset(swapper_pg_dir, 0, PAGE_SIZE);
 601 
 602         /* For every entry in the new Linux context table, put in
 603          * an entry which points to swapper_pg_dir .
 604          */
 605         pmdp = (pmd_t *) swapper_pg_dir;
 606         for(i = 0; i < num_contexts; i++)
 607                 srmmu_pgd_set(&lnx_root[i], pmdp);
 608 
 609         /* Make Linux physical page tables. */
 610         for(vaddr = KERNBASE; vaddr < end_mem; vaddr+=PAGE_SIZE) {
 611                 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
 612                 if(srmmu_pgd_none(*pgdp)) {
 613                         pmdp = srmmu_init_alloc(&mempool,
 614                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 615                         srmmu_pgd_set(pgdp, pmdp);
 616                 }
 617 
 618                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 619                 if(srmmu_pmd_none(*pmdp)) {
 620                         ptep = srmmu_init_alloc(&mempool,
 621                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 622                         srmmu_pmd_set(pmdp, ptep);
 623                 }
 624 
 625                 ptep = srmmu_pte_offset(pmdp, vaddr);
 626                 *ptep = srmmu_mk_pte(vaddr, SRMMU_PAGE_KERNEL);
 627         }
 628 
 629         /* Map IO areas. */
 630         for(vaddr = IOBASE_VADDR; vaddr < (IOBASE_VADDR+IOBASE_LEN);
 631             vaddr += SRMMU_PMD_SIZE) {
 632                 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
 633                 if(srmmu_pgd_none(*pgdp)) {
 634                         pmdp = srmmu_init_alloc(&mempool,
 635                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 636                         srmmu_pgd_set(pgdp, pmdp);
 637                 }
 638                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 639                 if(srmmu_pmd_none(*pmdp)) {
 640                         ptep = srmmu_init_alloc(&mempool,
 641                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 642                         srmmu_pmd_set(pmdp, ptep);
 643                 }
 644         }
 645 
 646         /* Map DVMA areas. */
 647         for(vaddr = (DVMA_VADDR); vaddr < (DVMA_VADDR + DVMA_LEN);
 648             vaddr += PAGE_SIZE) {
 649                 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
 650                 if(srmmu_pgd_none(*pgdp)) {
 651                         pmdp = srmmu_init_alloc(&mempool,
 652                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 653                         srmmu_pgd_set(pgdp, pmdp);
 654                 }
 655                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 656                 if(srmmu_pmd_none(*pmdp)) {
 657                         ptep = srmmu_init_alloc(&mempool,
 658                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 659                         srmmu_pmd_set(pmdp, ptep);
 660                 }
 661 
 662                 ptep = srmmu_pte_offset(pmdp, vaddr);
 663                 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE), SRMMU_PAGE_KERNEL);
 664                 pte_val(*ptep) &= ~(SRMMU_PTE_C_MASK);
 665         }
 666         srmmu_flush_whole_tlb();
 667         flush_ei_ctx(0x0);
 668 
 669         /* Map in the PERCPU areas in virtual address space. */
 670 #if 0
 671         prom_printf("PERCPU_VADDR + PERCPU_LEN = %08lx\n",
 672                     (PERCPU_VADDR + PERCPU_LEN));
 673 #endif
 674         for(vaddr = PERCPU_VADDR; vaddr < (PERCPU_VADDR + PERCPU_LEN);
 675             vaddr += PERCPU_ENTSIZE) {
 676                 pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
 677                 if(srmmu_pgd_none(*pgdp)) {
 678                         pmdp = srmmu_init_alloc(&mempool,
 679                                                 SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 680                         srmmu_pgd_set(pgdp, pmdp);
 681                 }
 682                 pmdp = srmmu_pmd_offset(pgdp, vaddr);
 683                 if(srmmu_pmd_none(*pmdp)) {
 684                         ptep = srmmu_init_alloc(&mempool,
 685                                                 SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 686                         srmmu_pmd_set(pmdp, ptep);
 687                 }
 688                 ptep = srmmu_pte_offset(pmdp, vaddr);
 689                 /* Per-cpu trap table page. */
 690                 *ptep++ = srmmu_mk_pte((unsigned int) start, SRMMU_PAGE_KERNEL);
 691                 /* Per-cpu kernel stack page. */
 692                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 693                                        SRMMU_PAGE_KERNEL);
 694                 /* Per-cpu Prom MBox. */
 695                 *ptep++ = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 696                                        SRMMU_PAGE_KERNEL);
 697                 /* Per-cpu state variables. */
 698                 *ptep = srmmu_mk_pte((unsigned int) srmmu_init_alloc(&mempool, PAGE_SIZE),
 699                                      SRMMU_PAGE_KERNEL);
 700         }
 701         percpu_table = (struct sparc_percpu *) PERCPU_VADDR;
 702 
 703         /* Ugh, have to map DVMA that the prom has mapped too or else
 704          * you will lose with video cards when we take over the ctx table.
 705          * Also, must take into consideration that prom might be using level
 706          * two or one PTE's. TODO
 707          *
 708          * XXX This still isn't right, the cg* graphics cards get their
 709          * XXX mapped screens all fucked up when I jump onto Linux's
 710          * XXX page tables.  Must investigate...
 711          */
 712         for(vaddr = KADB_DEBUGGER_BEGVM; vaddr != 0x0;) {
 713                 unsigned int prom_pte;
 714 
 715                 prom_pte = srmmu_init_twalk(vaddr, 0);
 716 
 717                 if(prom_pte) {
 718                         pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
 719                         if((prom_pte&0x3) == 0x0) {
 720                                 prom_pte &= ~0x3;
 721                                 prom_pte |= SRMMU_ET_PTE;
 722                                 pgd_val(*pgdp) = prom_pte;
 723                                 vaddr = SRMMU_PGDIR_ALIGN(vaddr+1);
 724                                 continue;
 725                         }
 726                         if(srmmu_pgd_none(*pgdp)) {
 727                                 pmdp = srmmu_init_alloc(&mempool,
 728                                                         SRMMU_PTRS_PER_PMD*sizeof(pmd_t));
 729                                 srmmu_pgd_set(pgdp, pmdp);
 730                         }
 731 
 732                         pmdp = srmmu_pmd_offset(pgdp, vaddr);
 733                         if((prom_pte&0x3) == 0x1) {
 734                                 prom_pte &= ~0x3;
 735                                 prom_pte |= SRMMU_ET_PTE;
 736                                 pgd_val(*pgdp) = prom_pte;
 737                                 vaddr = SRMMU_PMD_ALIGN(vaddr+1);
 738                                 continue;
 739                         }
 740                         if(srmmu_pmd_none(*pmdp)) {
 741                                 ptep = srmmu_init_alloc(&mempool,
 742                                                         SRMMU_PTRS_PER_PTE*sizeof(pte_t));
 743                                 srmmu_pmd_set(pmdp, ptep);
 744                         }
 745                         /* A normal 3rd level PTE, no need to change ET bits. */
 746                         ptep = srmmu_pte_offset(pmdp, vaddr);
 747                         pte_val(*ptep) = prom_pte;
 748 
 749                 }
 750                 vaddr += PAGE_SIZE;
 751         }
 752 
 753         /* I believe I do not need to flush VAC here since my stores  */
 754         /* probably already reached the physical RAM.             --P3 */
 755 
 756         /* We probably do, and should do it just to be safe... -Davem */
 757 
 758         /* Take the MMU over from the PROM */
 759         prom_printf("Taking over MMU from PROM.\n");
 760 
 761         srmmu_set_ctable_ptr(((unsigned)lnx_root) - PAGE_OFFSET);
 762 
 763         srmmu_flush_whole_tlb();
 764 
 765         /* Now it is ok to use memory at start_mem. */
 766         start_mem = PAGE_ALIGN(mempool);
 767         start_mem = free_area_init(start_mem, end_mem);
 768         start_mem = PAGE_ALIGN(start_mem);
 769 
 770 #if 0
 771         prom_printf("Testing context switches...\n");
 772         for(i=0; i<num_contexts; i++)
 773                 srmmu_set_context(i);
 774         prom_printf("done...\n");
 775         srmmu_set_context(0);
 776 #endif
 777 
 778         prom_printf("survived...\n");
 779         return start_mem;
 780 }
 781 
 782 /* Test the WP bit on the Sparc Reference MMU. */
 783 void
 784 srmmu_test_wp(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 785 {
 786         pgd_t *pgdp;
 787         
 788         wp_works_ok = -1;
 789         /* We mapped page zero as a read-only page in paging_init()
 790          * So fire up the test, then invalidate the pgd for page zero.
 791          * It is no longer needed.
 792          */
 793 
 794         /* Let it rip... */
 795         __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
 796         if (wp_works_ok < 0)
 797                 wp_works_ok = 0;
 798 
 799         pgdp = srmmu_pgd_offset(init_task.mm, 0x0);
 800         pgd_val(*pgdp) = 0x0;
 801 
 802         return;
 803 }
 804 
 805 void srmmu_update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 806                             unsigned long address, pte_t pte)
 807 {
 808         printk("WHOOPS, update_mmu_cache called on a SRMMU!\n");
 809         panic("SRMMU bolixed...");
 810 }
 811 
 812 void
 813 srmmu_fork_hook(void *vtask, unsigned long kthread_usp)
     /* [previous][next][first][last][top][bottom][index][help] */
 814 {
 815         return; /* XXX */
 816 }
 817 
 818 void
 819 srmmu_exit_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
 820 {
 821         return; /* XXX */
 822 }
 823 
 824 void
 825 srmmu_release_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
 826 {
 827         return; /* XXX */
 828 }
 829 
 830 void
 831 srmmu_flush_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
 832 {
 833         return; /* XXX */
 834 }
 835 
 836 void
 837 srmmu_task_cacheflush(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
 838 {
 839         return; /* XXX */
 840 }
 841 
 842 /* Load up routines and constants for sun4m mmu */
 843 void
 844 ld_mmu_srmmu(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 845 {
 846         prom_printf("Loading srmmu MMU routines\n");
 847 
 848         /* First the constants */
 849         pmd_shift = SRMMU_PMD_SHIFT;
 850         pmd_size = SRMMU_PMD_SIZE;
 851         pmd_mask = SRMMU_PMD_MASK;
 852         pgdir_shift = SRMMU_PGDIR_SHIFT;
 853         pgdir_size = SRMMU_PGDIR_SIZE;
 854         pgdir_mask = SRMMU_PGDIR_MASK;
 855 
 856         ptrs_per_pte = SRMMU_PTRS_PER_PTE;
 857         ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
 858         ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
 859 
 860         page_none = SRMMU_PAGE_NONE;
 861         page_shared = SRMMU_PAGE_SHARED;
 862         page_copy = SRMMU_PAGE_COPY;
 863         page_readonly = SRMMU_PAGE_READONLY;
 864         page_kernel = SRMMU_PAGE_KERNEL;
 865         page_invalid = SRMMU_PAGE_INVALID;
 866         
 867         /* Functions */
 868         invalidate = srmmu_invalidate;
 869         set_pte = srmmu_set_pte;
 870         switch_to_context = srmmu_switch_to_context;
 871         pmd_align = srmmu_pmd_align;
 872         pgdir_align = srmmu_pgdir_align;
 873         vmalloc_start = srmmu_vmalloc_start;
 874 
 875         pte_page = srmmu_pte_page;
 876         pmd_page = srmmu_pmd_page;
 877         pgd_page = srmmu_pgd_page;
 878 
 879         sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
 880 
 881         pte_none = srmmu_pte_none;
 882         pte_present = srmmu_pte_present;
 883         pte_inuse = srmmu_pte_inuse;
 884         pte_clear = srmmu_pte_clear;
 885         pte_reuse = srmmu_pte_reuse;
 886 
 887         pmd_none = srmmu_pmd_none;
 888         pmd_bad = srmmu_pmd_bad;
 889         pmd_present = srmmu_pmd_present;
 890         pmd_inuse = srmmu_pmd_inuse;
 891         pmd_clear = srmmu_pmd_clear;
 892         pmd_reuse = srmmu_pmd_reuse;
 893 
 894         pgd_none = srmmu_pgd_none;
 895         pgd_bad = srmmu_pgd_bad;
 896         pgd_present = srmmu_pgd_present;
 897         pgd_inuse = srmmu_pgd_inuse;
 898         pgd_clear = srmmu_pgd_clear;
 899         pgd_reuse = srmmu_pgd_reuse;
 900 
 901         mk_pte = srmmu_mk_pte;
 902         pgd_set = srmmu_pgd_set;
 903         pte_modify = srmmu_pte_modify;
 904         pgd_offset = srmmu_pgd_offset;
 905         pmd_offset = srmmu_pmd_offset;
 906         pte_offset = srmmu_pte_offset;
 907         pte_free_kernel = srmmu_pte_free_kernel;
 908         pmd_free_kernel = srmmu_pmd_free_kernel;
 909         pte_alloc_kernel = srmmu_pte_alloc_kernel;
 910         pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
 911         pte_free = srmmu_pte_free;
 912         pte_alloc = srmmu_pte_alloc;
 913         pmd_free = srmmu_pmd_free;
 914         pmd_alloc = srmmu_pmd_alloc;
 915         pgd_free = srmmu_pgd_free;
 916         pgd_alloc = srmmu_pgd_alloc;
 917 
 918         pte_read = srmmu_pte_read;
 919         pte_write = srmmu_pte_write;
 920         pte_exec = srmmu_pte_exec;
 921         pte_dirty = srmmu_pte_dirty;
 922         pte_young = srmmu_pte_young;
 923         pte_cow = srmmu_pte_cow;
 924         pte_wrprotect = srmmu_pte_wrprotect;
 925         pte_rdprotect = srmmu_pte_rdprotect;
 926         pte_exprotect = srmmu_pte_exprotect;
 927         pte_mkclean = srmmu_pte_mkclean;
 928         pte_mkold = srmmu_pte_mkold;
 929         pte_uncow = srmmu_pte_uncow;
 930         pte_mkwrite = srmmu_pte_mkwrite;
 931         pte_mkread = srmmu_pte_mkread;
 932         pte_mkexec = srmmu_pte_mkexec;
 933         pte_mkdirty = srmmu_pte_mkdirty;
 934         pte_mkyoung = srmmu_pte_mkyoung;
 935         pte_mkcow = srmmu_pte_mkcow;
 936         get_fault_info = srmmu_get_fault_info;
 937         update_mmu_cache = srmmu_update_mmu_cache;
 938         mmu_exit_hook = srmmu_exit_hook;
 939         mmu_fork_hook = srmmu_fork_hook;
 940         mmu_release_hook = srmmu_release_hook;
 941         mmu_flush_hook = srmmu_flush_hook;
 942         mmu_task_cacheflush = srmmu_task_cacheflush;
 943         mmu_lockarea = srmmu_lockarea;
 944         mmu_unlockarea = srmmu_unlockarea;
 945         mmu_get_scsi_buffer = srmmu_get_scsi_buffer;
 946         mmu_release_scsi_buffer = srmmu_release_scsi_buffer;
 947 }

/* [previous][next][first][last][top][bottom][index][help] */