root/include/asm-sparc/pgtsrmmu.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. srmmu_get_mmureg
  2. srmmu_set_mmureg
  3. srmmu_set_ctable_ptr
  4. srmmu_get_ctable_ptr
  5. srmmu_set_context
  6. srmmu_get_context
  7. srmmu_get_fstatus
  8. srmmu_get_faddr
  9. srmmu_flush_whole_tlb
  10. srmmu_flush_tlb_ctx
  11. srmmu_flush_tlb_region
  12. srmmu_flush_tlb_segment
  13. srmmu_flush_tlb_page
  14. srmmu_hwprobe

   1 /* $Id: pgtsrmmu.h,v 1.17 1996/04/25 06:13:26 davem Exp $
   2  * pgtsrmmu.h:  SRMMU page table defines and code.
   3  *
   4  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 #ifndef _SPARC_PGTSRMMU_H
   8 #define _SPARC_PGTSRMMU_H
   9 
  10 #include <linux/config.h>
  11 #include <asm/page.h>
  12 
  13 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  14 #define SRMMU_PMD_SHIFT         18
  15 #define SRMMU_PMD_SIZE          (1UL << SRMMU_PMD_SHIFT)
  16 #define SRMMU_PMD_MASK          (~(SRMMU_PMD_SIZE-1))
  17 #define SRMMU_PMD_ALIGN(addr)   (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
  18 
  19 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  20 #define SRMMU_PGDIR_SHIFT       24
  21 #define SRMMU_PGDIR_SIZE        (1UL << SRMMU_PGDIR_SHIFT)
  22 #define SRMMU_PGDIR_MASK        (~(SRMMU_PGDIR_SIZE-1))
  23 #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
  24 
  25 #define SRMMU_PTRS_PER_PTE      64
  26 #define SRMMU_PTRS_PER_PMD      64
  27 #define SRMMU_PTRS_PER_PGD      256
  28 
  29 #define SRMMU_PTE_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
  30 #define SRMMU_PMD_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
  31 #define SRMMU_PGD_TABLE_SIZE    0x400 /* 256 entries, 4 bytes a piece */
  32 
  33 #define SRMMU_VMALLOC_START   (0xfe200000)
  34 
  35 /* Definition of the values in the ET field of PTD's and PTE's */
  36 #define SRMMU_ET_MASK         0x3
  37 #define SRMMU_ET_INVALID      0x0
  38 #define SRMMU_ET_PTD          0x1
  39 #define SRMMU_ET_PTE          0x2
  40 #define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
  41 
  42 /* Physical page extraction from PTP's and PTE's. */
  43 #define SRMMU_CTX_PMASK    0xfffffff0
  44 #define SRMMU_PTD_PMASK    0xfffffff0
  45 #define SRMMU_PTE_PMASK    0xffffff00
  46 
  47 /* The pte non-page bits.  Some notes:
  48  * 1) cache, dirty, valid, and ref are frobbable
  49  *    for both supervisor and user pages.
  50  * 2) exec and write will only give the desired effect
  51  *    on user pages
  52  * 3) use priv and priv_readonly for changing the
  53  *    characteristics of supervisor ptes
  54  */
  55 #define SRMMU_CACHE        0x80
  56 #define SRMMU_DIRTY        0x40
  57 #define SRMMU_REF          0x20
  58 #define SRMMU_EXEC         0x08
  59 #define SRMMU_WRITE        0x04
  60 #define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
  61 #define SRMMU_PRIV         0x1c
  62 #define SRMMU_PRIV_RDONLY  0x18
  63 
  64 #define SRMMU_CHG_MASK    (SRMMU_REF | SRMMU_DIRTY | SRMMU_ET_PTE)
  65 
  66 /* Some day I will implement true fine grained access bits for
  67  * user pages because the SRMMU gives us the capabilities to
  68  * enforce all the protection levels that vma's can have.
  69  * XXX But for now...
  70  */
  71 #define SRMMU_PAGE_NONE    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  72                                     SRMMU_PRIV | SRMMU_REF)
  73 #define SRMMU_PAGE_SHARED  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  74                                     SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
  75 #define SRMMU_PAGE_COPY    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  76                                     SRMMU_EXEC | SRMMU_REF)
  77 #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  78                                     SRMMU_EXEC | SRMMU_REF)
  79 #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV)
  80 
  81 /* SRMMU Register addresses in ASI 0x4.  These are valid for all
  82  * current SRMMU implementations that exist.
  83  */
  84 #define SRMMU_CTRL_REG           0x00000000
  85 #define SRMMU_CTXTBL_PTR         0x00000100
  86 #define SRMMU_CTX_REG            0x00000200
  87 #define SRMMU_FAULT_STATUS       0x00000300
  88 #define SRMMU_FAULT_ADDR         0x00000400
  89 
  90 /* Accessing the MMU control register. */
  91 extern inline unsigned int srmmu_get_mmureg(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  92 {
  93         unsigned int retval;
  94         __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  95                              "=r" (retval) :
  96                              "i" (ASI_M_MMUREGS));
  97         return retval;
  98 }
  99 
 100 extern inline void srmmu_set_mmureg(unsigned long regval)
     /* [previous][next][first][last][top][bottom][index][help] */
 101 {
 102         __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
 103                              "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
 104 
 105 }
 106 
 107 extern inline void srmmu_set_ctable_ptr(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
 110 #if CONFIG_AP1000
 111         /* weird memory system on the AP1000 */
 112         paddr |= (0x8<<28);
 113 #endif
 114         __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 115                              "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
 116                              "i" (ASI_M_MMUREGS) :
 117                              "memory");
 118 }
 119 
 120 extern inline unsigned long srmmu_get_ctable_ptr(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 121 {
 122         unsigned int retval;
 123 
 124         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 125                              "=r" (retval) :
 126                              "r" (SRMMU_CTXTBL_PTR),
 127                              "i" (ASI_M_MMUREGS));
 128         return (retval & SRMMU_CTX_PMASK) << 4;
 129 }
 130 
 131 extern inline void srmmu_set_context(int context)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 134                              "r" (context), "r" (SRMMU_CTX_REG),
 135                              "i" (ASI_M_MMUREGS) : "memory");
 136 }
 137 
 138 extern inline int srmmu_get_context(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 139 {
 140         register int retval;
 141         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 142                              "=r" (retval) :
 143                              "r" (SRMMU_CTX_REG),
 144                              "i" (ASI_M_MMUREGS));
 145         return retval;
 146 }
 147 
 148 extern inline unsigned int srmmu_get_fstatus(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         unsigned int retval;
 151 
 152         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 153                              "=r" (retval) :
 154                              "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
 155         return retval;
 156 }
 157 
 158 extern inline unsigned int srmmu_get_faddr(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 159 {
 160         unsigned int retval;
 161 
 162         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 163                              "=r" (retval) :
 164                              "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
 165         return retval;
 166 }
 167 
 168 /* This is guaranteed on all SRMMU's. */
 169 extern inline void srmmu_flush_whole_tlb(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 172                              "r" (0x400),        /* Flush entire TLB!! */
 173                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 174 
 175 }
 176 
 177 /* These flush types are not available on all chips... */
 178 extern inline void srmmu_flush_tlb_ctx(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 181                              "r" (0x300),        /* Flush TLB ctx.. */
 182                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 183 
 184 }
 185 
 186 extern inline void srmmu_flush_tlb_region(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         addr &= SRMMU_PGDIR_MASK;
 189         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 190                              "r" (addr | 0x200), /* Flush TLB region.. */
 191                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 192 
 193 }
 194 
 195 
 196 extern inline void srmmu_flush_tlb_segment(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         addr &= SRMMU_PMD_MASK;
 199         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 200                              "r" (addr | 0x100), /* Flush TLB segment.. */
 201                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 202 
 203 }
 204 
 205 extern inline void srmmu_flush_tlb_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 206 {
 207         page &= PAGE_MASK;
 208         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 209                              "r" (page),        /* Flush TLB page.. */
 210                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 211 
 212 }
 213 
 214 extern inline unsigned long srmmu_hwprobe(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 {
 216         unsigned long retval;
 217 
 218         vaddr &= PAGE_MASK;
 219         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 220                              "=r" (retval) :
 221                              "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
 222 
 223         return retval;
 224 }
 225 
 226 extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
 227 extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
 228 
 229 #endif /* !(_SPARC_PGTSRMMU_H) */

/* [previous][next][first][last][top][bottom][index][help] */