root/include/asm-sparc/pgtsrmmu.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. srmmu_get_mmureg
  2. srmmu_set_mmureg
  3. srmmu_set_ctable_ptr
  4. srmmu_get_ctable_ptr
  5. srmmu_set_context
  6. srmmu_get_context
  7. srmmu_get_fstatus
  8. srmmu_get_faddr
  9. srmmu_flush_whole_tlb
  10. srmmu_flush_tlb_ctx
  11. srmmu_flush_tlb_region
  12. srmmu_flush_tlb_segment
  13. srmmu_flush_tlb_page
  14. srmmu_hwprobe

   1 /* $Id: pgtsrmmu.h,v 1.13 1996/03/01 07:20:54 davem Exp $
   2  * pgtsrmmu.h:  SRMMU page table defines and code.
   3  *
   4  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 #ifndef _SPARC_PGTSRMMU_H
   8 #define _SPARC_PGTSRMMU_H
   9 
  10 #include <asm/page.h>
  11 
  12 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  13 #define SRMMU_PMD_SHIFT         18
  14 #define SRMMU_PMD_SIZE          (1UL << SRMMU_PMD_SHIFT)
  15 #define SRMMU_PMD_MASK          (~(SRMMU_PMD_SIZE-1))
  16 #define SRMMU_PMD_ALIGN(addr)   (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
  17 
  18 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  19 #define SRMMU_PGDIR_SHIFT       24
  20 #define SRMMU_PGDIR_SIZE        (1UL << SRMMU_PGDIR_SHIFT)
  21 #define SRMMU_PGDIR_MASK        (~(SRMMU_PGDIR_SIZE-1))
  22 #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
  23 
  24 #define SRMMU_PTRS_PER_PTE      64
  25 #define SRMMU_PTRS_PER_PMD      64
  26 #define SRMMU_PTRS_PER_PGD      256
  27 
  28 #define SRMMU_PTE_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
  29 #define SRMMU_PMD_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
  30 #define SRMMU_PGD_TABLE_SIZE    0x400 /* 256 entries, 4 bytes a piece */
  31 
  32 #define SRMMU_VMALLOC_START   (0xfe100000)
  33 
  34 /* Definition of the values in the ET field of PTD's and PTE's */
  35 #define SRMMU_ET_MASK         0x3
  36 #define SRMMU_ET_INVALID      0x0
  37 #define SRMMU_ET_PTD          0x1
  38 #define SRMMU_ET_PTE          0x2
  39 #define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
  40 
  41 /* Physical page extraction from PTP's and PTE's. */
  42 #define SRMMU_CTX_PMASK    0xfffffff0
  43 #define SRMMU_PTD_PMASK    0xfffffff0
  44 #define SRMMU_PTE_PMASK    0xffffff00
  45 
  46 /* The pte non-page bits.  Some notes:
  47  * 1) cache, dirty, valid, and ref are frobbable
  48  *    for both supervisor and user pages.
  49  * 2) exec and write will only give the desired effect
  50  *    on user pages
  51  * 3) use priv and priv_readonly for changing the
  52  *    characteristics of supervisor ptes
  53  */
  54 #define SRMMU_CACHE        0x80
  55 #define SRMMU_DIRTY        0x40
  56 #define SRMMU_REF          0x20
  57 #define SRMMU_EXEC         0x08
  58 #define SRMMU_WRITE        0x04
  59 #define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
  60 #define SRMMU_PRIV         0x1c
  61 #define SRMMU_PRIV_RDONLY  0x18
  62 
  63 #define SRMMU_CHG_MASK    (SRMMU_REF | SRMMU_DIRTY | SRMMU_ET_PTE)
  64 
  65 /* Some day I will implement true fine grained access bits for
  66  * user pages because the SRMMU gives us the capabilities to
  67  * enforce all the protection levels that vma's can have.
  68  * XXX But for now...
  69  */
  70 #define SRMMU_PAGE_NONE    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  71                                     SRMMU_PRIV | SRMMU_REF)
  72 #define SRMMU_PAGE_SHARED  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  73                                     SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
  74 #define SRMMU_PAGE_COPY    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  75                                     SRMMU_EXEC | SRMMU_REF)
  76 #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
  77                                     SRMMU_EXEC | SRMMU_REF)
  78 #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV)
  79 
  80 /* SRMMU Register addresses in ASI 0x4.  These are valid for all
  81  * current SRMMU implementations that exist.
  82  */
  83 #define SRMMU_CTRL_REG           0x00000000
  84 #define SRMMU_CTXTBL_PTR         0x00000100
  85 #define SRMMU_CTX_REG            0x00000200
  86 #define SRMMU_FAULT_STATUS       0x00000300
  87 #define SRMMU_FAULT_ADDR         0x00000400
  88 
  89 /* Accessing the MMU control register. */
  90 extern inline unsigned int srmmu_get_mmureg(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         unsigned int retval;
  93         __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  94                              "=r" (retval) :
  95                              "i" (ASI_M_MMUREGS));
  96         return retval;
  97 }
  98 
  99 extern inline void srmmu_set_mmureg(unsigned long regval)
     /* [previous][next][first][last][top][bottom][index][help] */
 100 {
 101         __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
 102                              "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
 103 
 104 }
 105 
 106 extern inline void srmmu_set_ctable_ptr(unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {
 108         paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
 109         __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 110                              "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
 111                              "i" (ASI_M_MMUREGS) :
 112                              "memory");
 113 }
 114 
 115 extern inline unsigned long srmmu_get_ctable_ptr(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 116 {
 117         unsigned int retval;
 118 
 119         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 120                              "=r" (retval) :
 121                              "r" (SRMMU_CTXTBL_PTR),
 122                              "i" (ASI_M_MMUREGS));
 123         return (retval & SRMMU_CTX_PMASK) << 4;
 124 }
 125 
 126 extern inline void srmmu_set_context(int context)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 129                              "r" (context), "r" (SRMMU_CTX_REG),
 130                              "i" (ASI_M_MMUREGS) : "memory");
 131 }
 132 
 133 extern inline int srmmu_get_context(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135         register int retval;
 136         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 137                              "=r" (retval) :
 138                              "r" (SRMMU_CTX_REG),
 139                              "i" (ASI_M_MMUREGS));
 140         return retval;
 141 }
 142 
 143 extern inline unsigned int srmmu_get_fstatus(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         unsigned int retval;
 146 
 147         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 148                              "=r" (retval) :
 149                              "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
 150         return retval;
 151 }
 152 
 153 extern inline unsigned int srmmu_get_faddr(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 154 {
 155         unsigned int retval;
 156 
 157         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 158                              "=r" (retval) :
 159                              "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
 160         return retval;
 161 }
 162 
 163 /* This is guarenteed on all SRMMU's. */
 164 extern inline void srmmu_flush_whole_tlb(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 167                              "r" (0x400),        /* Flush entire TLB!! */
 168                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 169 
 170 }
 171 
 172 /* These flush types are not available on all chips... */
 173 extern inline void srmmu_flush_tlb_ctx(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 176                              "r" (0x300),        /* Flush TLB ctx.. */
 177                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 178 
 179 }
 180 
 181 extern inline void srmmu_flush_tlb_region(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 182 {
 183         addr &= SRMMU_PGDIR_MASK;
 184         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 185                              "r" (addr | 0x200), /* Flush TLB region.. */
 186                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 187 
 188 }
 189 
 190 
 191 extern inline void srmmu_flush_tlb_segment(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193         addr &= SRMMU_PMD_MASK;
 194         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 195                              "r" (addr | 0x100), /* Flush TLB segment.. */
 196                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 197 
 198 }
 199 
 200 extern inline void srmmu_flush_tlb_page(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         page &= PAGE_MASK;
 203         __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 204                              "r" (page),        /* Flush TLB page.. */
 205                              "i" (ASI_M_FLUSH_PROBE) : "memory");
 206 
 207 }
 208 
 209 extern inline unsigned long srmmu_hwprobe(unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         unsigned long retval;
 212 
 213         __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 214                              "=r" (retval) :
 215                              "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
 216 
 217         return retval;
 218 }
 219 
 220 extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
 221 extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
 222 
 223 #endif /* !(_SPARC_PGTSRMMU_H) */

/* [previous][next][first][last][top][bottom][index][help] */