1 /* page.h: Various defines and such for MMU operations on the Sparc for
2 * the Linux kernel.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7 #ifndef _SPARC_PAGE_H
8 #define _SPARC_PAGE_H
9
10 #include <asm/asi.h> /* for get/set segmap/pte routines */
11 #include <asm/contregs.h> /* for switch_to_context */
12 #include <asm/head.h> /* for KERNBASE */
13
14 #define PAGE_SHIFT 12 /* This is the virtual page... */
15 #define PAGE_OFFSET KERNBASE
16 #define PAGE_SIZE (1 << PAGE_SHIFT)
17
18 /* to mask away the intra-page address bits */
19 #define PAGE_MASK (~(PAGE_SIZE-1))
20
21 #ifdef __KERNEL__
22 #ifndef __ASSEMBLY__
23
24 /* The following structure is used to hold the physical
25 * memory configuration of the machine. This is filled in
26 * probe_memory() and is later used by mem_init() to set up
27 * mem_map[]. We statically allocate SPARC_PHYS_BANKS of
28 * these structs, this is arbitrary. The entry after the
29 * last valid one has num_bytes==0.
30 */
31
32 struct sparc_phys_banks {
33 unsigned long base_addr;
34 unsigned long num_bytes;
35 };
36
37 #define SPARC_PHYS_BANKS 32
38
39 extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
40
41 #define CONFIG_STRICT_MM_TYPECHECKS
42
43 #ifdef CONFIG_STRICT_MM_TYPECHECKS
44 /*
45 * These are used to make use of C type-checking..
46 */
47 typedef struct { unsigned long pte; } pte_t;
48 typedef struct { unsigned long pmd; } pmd_t;
49 typedef struct { unsigned long pgd; } pgd_t;
50 typedef struct { unsigned long pgprot; } pgprot_t;
51
52 #define pte_val(x) ((x).pte)
53 #define pmd_val(x) ((x).pmd)
54 #define pgd_val(x) ((x).pgd)
55 #define pgprot_val(x) ((x).pgprot)
56
57 #define __pte(x) ((pte_t) { (x) } )
58 #define __pmd(x) ((pmd_t) { (x) } )
59 #define __pgd(x) ((pgd_t) { (x) } )
60 #define __pgprot(x) ((pgprot_t) { (x) } )
61
62 #else
63 /*
64 * .. while these make it easier on the compiler
65 */
66 typedef unsigned long pte_t;
67 typedef unsigned long pmd_t;
68 typedef unsigned long pgd_t;
69 typedef unsigned long pgprot_t;
70
71 #define pte_val(x) (x)
72 #define pmd_val(x) (x)
73 #define pgd_val(x) (x)
74 #define pgprot_val(x) (x)
75
76 #define __pte(x) (x)
77 #define __pmd(x) (x)
78 #define __pgd(x) (x)
79 #define __pgprot(x) (x)
80
81 #endif
82
83 /* The current va context is global and known, so all that is needed to
84 * do an invalidate is flush the VAC on a sun4c or call the ASI flushing
85 * routines on a SRMMU.
86 */
87
88 extern void (*invalidate)(void);
89 extern void (*set_pte)(pte_t *ptep, pte_t entry);
90
91 /* to align the pointer to the (next) page boundary */
92 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
93
94 /* We now put the free page pool mapped contiguously in high memory above
95 * the kernel.
96 */
97 #define MAP_NR(addr) ((((unsigned long)addr) - PAGE_OFFSET) >> PAGE_SHIFT)
98 #define MAP_PAGE_RESERVED (1<<15)
99
100
101 #endif /* !(__ASSEMBLY__) */
102
103 /* The rest is kind of funky because on the sparc, the offsets into the mmu
104 * entries are encoded in magic alternate address space tables. I will
105 * probably find some nifty inline assembly routines to do the equivalent.
106 * Much thought must go into this code. (davem@caip.rutgers.edu)
107 */
108
109 /* Bitfields within a Sparc sun4c PTE (page table entry). */
110
111 #define PTE_V 0x80000000 /* valid bit */
112 #define PTE_ACC 0x60000000 /* access bits */
113 #define PTE_W 0x40000000 /* writable bit */
114 #define PTE_P 0x20000000 /* privileged page */
115 #define PTE_NC 0x10000000 /* page is non-cacheable */
116 #define PTE_TYP 0x0c000000 /* page type field */
117 #define PTE_RMEM 0x00000000 /* type == on board real memory */
118 #define PTE_IO 0x04000000 /* type == i/o area */
119 #define PTE_VME16 0x08000000 /* type == 16-bit VME area */
120 #define PTE_VME32 0x0c000000 /* type == 32-bit VME area */
121 #define PTE_R 0x02000000 /* page has been referenced */
122 #define PTE_M 0x01000000 /* page has been modified */
123 #define PTE_RESV 0x00f80000 /* reserved bits */
124 #define PTE_PHYPG 0x0007ffff /* phys pg number, sun4c only uses 16bits */
125
126 /* SRMMU defines */
127 /* The fields in an srmmu virtual address when it gets translated.
128 *
129 * -------------------------------------------------------------
130 * | INDEX 1 | INDEX 2 | INDEX 3 | PAGE OFFSET |
131 * -------------------------------------------------------------
132 * 31 24 23 18 17 12 11 0
133 */
134 #define SRMMU_IDX1_SHIFT 24
135 #define SRMMU_IDX1_MASK 0xff000000
136 #define SRMMU_IDX2_SHIFT 18
137 #define SRMMU_IDX2_MASK 0x00fc0000
138 #define SRMMU_IDX3_SHIFT 12
139 #define SRMMU_IDX3_MASK 0x0003f000
140
141 #define SRMMU_PGOFFSET_MASK 0x00000fff
142 /* The page table sizes for the various levels in bytes. */
143 #define SRMMU_LV1_PTSIZE 1024
144 #define SRMMU_LV2_PTSIZE 256
145 #define SRMMU_LV3_PTSIZE 256
146
147 /* Definition of the values in the ET field of PTD's and PTE's */
148 #define SRMMU_ET_INVALID 0x0
149 #define SRMMU_ET_PTD 0x1
150 #define SRMMU_ET_PTE 0x2
151 #define SRMMU_ET_RESV 0x3
152 #define SRMMU_ET_PTDBAD 0x3 /* Upward compatability my butt. */
153
154 /* Page table directory bits.
155 *
156 * ----------------
157 * | PTP | ET |
158 * ----------------
159 * 31 2 1 0
160 *
161 * PTP: The physical page table pointer. This value appears on
162 * bits 35->6 on the physical address bus during translation.
163 *
164 * ET: Entry type field. Must be 1 for a PTD.
165 */
166
167 #define SRMMU_PTD_PTP_SHIFT 0x2
168 #define SRMMU_PTD_PTP_MASK 0xfffffffc
169 #define SRMMU_PTD_PTP_PADDR_SHIFT 0x4
170 #define SRMMU_PTD_ET_SHIFT 0x0
171 #define SRMMU_PTD_ET_MASK 0x00000003
172
173 /* Page table entry bits.
174 *
175 * -------------------------------------------------
176 * | Physical Page Number | C | M | R | ACC | ET |
177 * -------------------------------------------------
178 * 31 8 7 6 5 4 2 1 0
179 *
180 * PPN: Physical page number, high order 24 bits of the 36-bit
181 * physical address, thus is you mask off all the non
182 * PPN bits you have the physical address of your page.
183 * No shifting necessary.
184 *
185 * C: Whether the page is cacheable in the mmu TLB or not. If
186 * not set the CPU cannot cache values to these addresses. For
187 * IO space translations this bit should be clear.
188 *
189 * M: Modified. This tells whether the page has been written to
190 * since the bit was last cleared. NOTE: this does not include
191 * accesses via the ASI physical page pass through since that does
192 * not use the MMU.
193 *
194 * R: References. This tells whether the page has been referenced
195 * in any way shape or form since the last clearing of the bit.
196 * NOTE: this does not include accesses via the ASI physical page
197 * pass through since that does not use the MMU.
198 *
199 * ACC: Access permissions for this page. This is further explained below
200 * with appropriate macros.
201 */
202
203 #define SRMMU_PTE_PPN_SHIFT 0x8
204 #define SRMMU_PTE_PPN_MASK 0xffffff00
205 #define SRMMU_PTE_PPN_PADDR_SHIFT 0x4
206 #define SRMMU_PTE_C_SHIFT 0x7
207 #define SRMMU_PTE_C_MASK 0x00000080
208 #define SRMMU_PTE_M_SHIFT 0x6
209 #define SRMMU_PTE_M_MASK 0x00000040
210 #define SRMMU_PTE_R_SHIFT 0x5
211 #define SRMMU_PTE_R_MASK 0x00000020
212 #define SRMMU_PTE_ACC_SHIFT 0x2
213 #define SRMMU_PTE_ACC_MASK 0x0000001c
214 #define SRMMU_PTE_ET_SHIFT 0x0
215 #define SRMMU_PTE_ET_MASK 0x00000003
216
217 /* SRMMU pte access bits.
218 *
219 * BIT USER ACCESS SUPERVISOR ACCESS
220 * --- -------------- -----------------
221 * 0x0 read only read only
222 * 0x1 read&write read&write
223 * 0x2 read&execute read&execute
224 * 0x3 read&write&execute read&write&execute
225 * 0x4 execute only execute only
226 * 0x5 read only read&write
227 * 0x6 ACCESS DENIED read&execute
228 * 0x7 ACCESS DENIED read&write&execute
229 *
230 * All these values are shifted left two bits.
231 */
232
233 #define SRMMU_ACC_US_RDONLY 0x00
234 #define SRMMU_ACC_US_RDWR 0x04
235 #define SRMMU_ACC_US_RDEXEC 0x08
236 #define SRMMU_ACC_US_RDWREXEC 0x0c
237 #define SRMMU_ACC_US_EXECONLY 0x10
238 #define SRMMU_ACC_U_RDONLY 0x14
239 #define SRMMU_ACC_S_RDWR 0x14
240 #define SRMMU_ACC_U_ACCDENIED 0x18
241 #define SRMMU_ACC_S_RDEXEC 0x18
242 #define SRMMU_ACC_U_ACCDENIED2 0x1c
243 #define SRMMU_ACC_S_RDWREXEC 0x1c
244
245 #ifndef __ASSEMBLY__
246
247 /* SUN4C pte, segmap, and context manipulation */
248 extern __inline__ unsigned long get_segmap(unsigned long addr)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
249 {
250 register unsigned long entry;
251
252 __asm__ __volatile__("lduba [%1] %2, %0" :
253 "=r" (entry) :
254 "r" (addr), "i" (ASI_SEGMAP));
255
256 return (entry&0xff);
257 }
258
259 extern __inline__ void put_segmap(unsigned long addr, unsigned long entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
260 {
261
262 __asm__ __volatile__("stba %1, [%0] %2" : : "r" (addr), "r" (entry&0xff),
263 "i" (ASI_SEGMAP));
264
265 return;
266 }
267
268 extern __inline__ unsigned long get_pte(unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
269 {
270 register unsigned long entry;
271
272 __asm__ __volatile__("lda [%1] %2, %0" :
273 "=r" (entry) :
274 "r" (addr), "i" (ASI_PTE));
275 return entry;
276 }
277
278 extern __inline__ void put_pte(unsigned long addr, unsigned long entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
279 {
280 __asm__ __volatile__("sta %1, [%0] %2" : :
281 "r" (addr),
282 "r" (entry), "i" (ASI_PTE));
283
284 return;
285 }
286
287 extern void (*switch_to_context)(int);
288
289 extern __inline__ int get_context(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
290 {
291 register int ctx;
292
293 __asm__ __volatile__("lduba [%1] %2, %0" :
294 "=r" (ctx) :
295 "r" (AC_CONTEXT), "i" (ASI_CONTROL));
296
297 return ctx;
298 }
299
300 typedef unsigned short mem_map_t;
301
302 #endif /* __ASSEMBLY__ */
303
304 #endif /* __KERNEL__ */
305
306 #endif /* _SPARC_PAGE_H */