This source file includes following definitions.
- mk_pte
- pte_modify
- pmd_set
- pgd_set
- pte_page
- pmd_page
- pgd_page
- pte_none
- pte_present
- pte_inuse
- pte_clear
- pte_reuse
- pmd_none
- pmd_bad
- pmd_present
- pmd_inuse
- pmd_clear
- pmd_reuse
- pgd_none
- pgd_bad
- pgd_present
- pgd_inuse
- pgd_clear
- pgd_reuse
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_cow
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_uncow
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- pte_mkcow
- SET_PAGE_DIR
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
- mk_swap_pte
1 #ifndef _ALPHA_PGTABLE_H
2 #define _ALPHA_PGTABLE_H
3
4
5
6
7
8
9
10
11
12
13 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
14 #define PMD_SIZE (1UL << PMD_SHIFT)
15 #define PMD_MASK (~(PMD_SIZE-1))
16
17
18 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
19 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
20 #define PGDIR_MASK (~(PGDIR_SIZE-1))
21
22
23
24
25
26
27
28 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
29 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
30 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
31
32
33 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
34
35 #define VMALLOC_START 0xFFFFFE0000000000
36 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
37
38
39
40
41 #define _PAGE_VALID 0x0001
42 #define _PAGE_FOR 0x0002
43 #define _PAGE_FOW 0x0004
44 #define _PAGE_FOE 0x0008
45 #define _PAGE_ASM 0x0010
46 #define _PAGE_KRE 0x0100
47 #define _PAGE_URE 0x0200
48 #define _PAGE_KWE 0x1000
49 #define _PAGE_UWE 0x2000
50
51
52 #define _PAGE_COW 0x10000
53 #define _PAGE_DIRTY 0x20000
54 #define _PAGE_ACCESSED 0x40000
55
56
57
58
59
60
61
62
63
64
65
66
67 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
68 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
69
70 #define _PFN_MASK 0xFFFFFFFF00000000
71
72 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
73 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
74
75
76
77
78
79 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
80 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
81 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_COW)
82 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
83 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
84
85 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
86
87 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:(_PAGE_FOW | _PAGE_COW)))
88 #define _PAGE_S(x) _PAGE_NORMAL(x)
89
90
91
92
93
94
95
96
97
98 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
99 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
100 #define __P010 _PAGE_P(_PAGE_FOE)
101 #define __P011 _PAGE_P(_PAGE_FOE)
102 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
103 #define __P101 _PAGE_P(_PAGE_FOW)
104 #define __P110 _PAGE_P(0)
105 #define __P111 _PAGE_P(0)
106
107 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
108 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
109 #define __S010 _PAGE_S(_PAGE_FOE)
110 #define __S011 _PAGE_S(_PAGE_FOE)
111 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
112 #define __S101 _PAGE_S(_PAGE_FOW)
113 #define __S110 _PAGE_S(0)
114 #define __S111 _PAGE_S(0)
115
116
117
118
119
120
121
122
123 extern pte_t __bad_page(void);
124 extern pmd_t * __bad_pagetable(void);
125
126 extern unsigned long __zero_page(void);
127
128 #define BAD_PAGETABLE __bad_pagetable()
129 #define BAD_PAGE __bad_page()
130 #define ZERO_PAGE __zero_page()
131
132
133 #define BITS_PER_PTR (8*sizeof(unsigned long))
134
135
136 #define PTR_MASK (~(sizeof(void*)-1))
137
138
139 #define SIZEOF_PTR_LOG2 3
140
141
142 #define PAGE_PTR(address) \
143 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
144
145 extern unsigned long high_memory;
146
147
148
149
150
151 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
152 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
153
154 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
155 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
156
157 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
158 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
159
160 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
161 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
162
163 extern inline unsigned long pte_page(pte_t pte)
164 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
165
166 extern inline unsigned long pmd_page(pmd_t pmd)
167 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
168
169 extern inline unsigned long pgd_page(pgd_t pgd)
170 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
171
172 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
173 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
174 extern inline int pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)] != 1; }
175 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
176 extern inline void pte_reuse(pte_t * ptep)
177 {
178 if (!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
179 mem_map[MAP_NR(ptep)]++;
180 }
181
182 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
183 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
184 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
185 extern inline int pmd_inuse(pmd_t *pmdp) { return mem_map[MAP_NR(pmdp)] != 1; }
186 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
187 extern inline void pmd_reuse(pmd_t * pmdp)
188 {
189 if (!(mem_map[MAP_NR(pmdp)] & MAP_PAGE_RESERVED))
190 mem_map[MAP_NR(pmdp)]++;
191 }
192
193 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
194 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
195 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
196 extern inline int pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)] != 1; }
197 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
198 extern inline void pgd_reuse(pgd_t * pgdp)
199 {
200 if (!(mem_map[MAP_NR(pgdp)] & MAP_PAGE_RESERVED))
201 mem_map[MAP_NR(pgdp)]++;
202 }
203
204
205
206
207
208 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
209 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
210 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
211 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
212 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
213 extern inline int pte_cow(pte_t pte) { return pte_val(pte) & _PAGE_COW; }
214
215 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
216 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
217 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
218 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
219 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
220 extern inline pte_t pte_uncow(pte_t pte) { pte_val(pte) &= ~_PAGE_COW; return pte; }
221 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
222 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
223 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
224 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
225 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
226 extern inline pte_t pte_mkcow(pte_t pte) { pte_val(pte) |= _PAGE_COW; return pte; }
227
228
229
230
231
232
233
234
235
236 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
237 {
238 pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
239 tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
240 if (tsk == current)
241 __asm__ __volatile__(
242 "bis %0,%0,$16\n\t"
243 "call_pal %1"
244 :
245 : "r" (&tsk->tss), "i" (PAL_swpctx)
246 : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
247 }
248
249 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
250
251
252 extern inline pgd_t * pgd_offset(struct task_struct * tsk, unsigned long address)
253 {
254 return (pgd_t *) ((tsk->tss.ptbr << PAGE_SHIFT) + PAGE_OFFSET) +
255 ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
256 }
257
258
259 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
260 {
261 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
262 }
263
264
265 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
266 {
267 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
268 }
269
270
271
272
273
274
275 extern inline void pte_free_kernel(pte_t * pte)
276 {
277 mem_map[MAP_NR(pte)] = 1;
278 free_page((unsigned long) pte);
279 }
280
281 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
282 {
283 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
284 if (pmd_none(*pmd)) {
285 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
286 if (pmd_none(*pmd)) {
287 if (page) {
288 pmd_set(pmd, page);
289 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
290 return page + address;
291 }
292 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
293 return NULL;
294 }
295 free_page((unsigned long) page);
296 }
297 if (pmd_bad(*pmd)) {
298 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
299 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
300 return NULL;
301 }
302 return (pte_t *) pmd_page(*pmd) + address;
303 }
304
305 extern inline void pmd_free_kernel(pmd_t * pmd)
306 {
307 mem_map[MAP_NR(pmd)] = 1;
308 free_page((unsigned long) pmd);
309 }
310
311 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
312 {
313 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
314 if (pgd_none(*pgd)) {
315 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
316 if (pgd_none(*pgd)) {
317 if (page) {
318 pgd_set(pgd, page);
319 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
320 return page + address;
321 }
322 pgd_set(pgd, BAD_PAGETABLE);
323 return NULL;
324 }
325 free_page((unsigned long) page);
326 }
327 if (pgd_bad(*pgd)) {
328 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
329 pgd_set(pgd, BAD_PAGETABLE);
330 return NULL;
331 }
332 return (pmd_t *) pgd_page(*pgd) + address;
333 }
334
335 extern inline void pte_free(pte_t * pte)
336 {
337 free_page((unsigned long) pte);
338 }
339
340 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
341 {
342 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
343 if (pmd_none(*pmd)) {
344 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
345 if (pmd_none(*pmd)) {
346 if (page) {
347 pmd_set(pmd, page);
348 return page + address;
349 }
350 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
351 return NULL;
352 }
353 free_page((unsigned long) page);
354 }
355 if (pmd_bad(*pmd)) {
356 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
357 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
358 return NULL;
359 }
360 return (pte_t *) pmd_page(*pmd) + address;
361 }
362
363 extern inline void pmd_free(pmd_t * pmd)
364 {
365 free_page((unsigned long) pmd);
366 }
367
368 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
369 {
370 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
371 if (pgd_none(*pgd)) {
372 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
373 if (pgd_none(*pgd)) {
374 if (page) {
375 pgd_set(pgd, page);
376 return page + address;
377 }
378 pgd_set(pgd, BAD_PAGETABLE);
379 return NULL;
380 }
381 free_page((unsigned long) page);
382 }
383 if (pgd_bad(*pgd)) {
384 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
385 pgd_set(pgd, BAD_PAGETABLE);
386 return NULL;
387 }
388 return (pmd_t *) pgd_page(*pgd) + address;
389 }
390
391 extern inline void pgd_free(pgd_t * pgd)
392 {
393 free_page((unsigned long) pgd);
394 }
395
396 extern inline pgd_t * pgd_alloc(void)
397 {
398 return (pgd_t *) get_free_page(GFP_KERNEL);
399 }
400
401 extern pgd_t swapper_pg_dir[1024];
402
403
404
405
406
407 extern inline void update_mmu_cache(struct vm_area_struct * vma,
408 unsigned long address, pte_t pte)
409 {
410 }
411
412
413
414
415
416 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
417 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
418
419 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
420 #define SWP_OFFSET(entry) ((entry) >> 40)
421 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
422
423 #endif