This source file includes following definitions.
- invalidate
- invalidate_all
- invalidate_mm
- invalidate_page
- invalidate_range
- mk_pte
- pte_modify
- pmd_set
- pgd_set
- pte_page
- pmd_page
- pgd_page
- pte_none
- pte_present
- pte_inuse
- pte_clear
- pte_reuse
- pmd_none
- pmd_bad
- pmd_present
- pmd_inuse
- pmd_clear
- pmd_reuse
- pgd_none
- pgd_bad
- pgd_present
- pgd_inuse
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_cow
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_uncow
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- pte_mkcow
- SET_PAGE_DIR
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
- mk_swap_pte
1 #ifndef _ALPHA_PGTABLE_H
2 #define _ALPHA_PGTABLE_H
3
4
5
6
7
8
9
10
11
12 extern void tbi(long type, ...);
13
14 #define tbisi(x) tbi(1,(x))
15 #define tbisd(x) tbi(2,(x))
16 #define tbis(x) tbi(3,(x))
17 #define tbiap() tbi(-1)
18 #define tbia() tbi(-2)
19
20
21
22
23 static inline void invalidate(void)
24 {
25 tbiap();
26 }
27
28
29
30
31
32 static inline void invalidate_all(void)
33 {
34 tbia();
35 }
36
37
38
39
40 static inline void invalidate_mm(struct mm_struct *mm)
41 {
42 tbiap();
43 }
44
45
46
47
48
49
50
51
52
53 static inline void invalidate_page(struct vm_area_struct *vma,
54 unsigned long addr)
55 {
56 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
57 }
58
59
60
61
62
63 static inline void invalidate_range(struct mm_struct *mm,
64 unsigned long start, unsigned long end)
65 {
66 tbiap();
67 }
68
69
70
71
72
73 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
74
75
76 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
77 #define PMD_SIZE (1UL << PMD_SHIFT)
78 #define PMD_MASK (~(PMD_SIZE-1))
79
80
81 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
82 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83 #define PGDIR_MASK (~(PGDIR_SIZE-1))
84
85
86
87
88
89
90
91 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
92 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
93 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
94
95
96 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
97
98 #define VMALLOC_START 0xFFFFFE0000000000
99 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
100
101
102
103
104 #define _PAGE_VALID 0x0001
105 #define _PAGE_FOR 0x0002
106 #define _PAGE_FOW 0x0004
107 #define _PAGE_FOE 0x0008
108 #define _PAGE_ASM 0x0010
109 #define _PAGE_KRE 0x0100
110 #define _PAGE_URE 0x0200
111 #define _PAGE_KWE 0x1000
112 #define _PAGE_UWE 0x2000
113
114
115 #define _PAGE_COW 0x10000
116 #define _PAGE_DIRTY 0x20000
117 #define _PAGE_ACCESSED 0x40000
118
119
120
121
122
123
124
125
126
127
128
129
130 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
131 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
132
133 #define _PFN_MASK 0xFFFFFFFF00000000
134
135 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
136 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
137
138
139
140
141
142 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
143 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
144 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_COW)
145 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
146 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
147
148 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
149
150 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:(_PAGE_FOW | _PAGE_COW)))
151 #define _PAGE_S(x) _PAGE_NORMAL(x)
152
153
154
155
156
157
158
159
160
161 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
162 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
163 #define __P010 _PAGE_P(_PAGE_FOE)
164 #define __P011 _PAGE_P(_PAGE_FOE)
165 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
166 #define __P101 _PAGE_P(_PAGE_FOW)
167 #define __P110 _PAGE_P(0)
168 #define __P111 _PAGE_P(0)
169
170 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
171 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
172 #define __S010 _PAGE_S(_PAGE_FOE)
173 #define __S011 _PAGE_S(_PAGE_FOE)
174 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
175 #define __S101 _PAGE_S(_PAGE_FOW)
176 #define __S110 _PAGE_S(0)
177 #define __S111 _PAGE_S(0)
178
179
180
181
182
183
184
185
186 extern pte_t __bad_page(void);
187 extern pmd_t * __bad_pagetable(void);
188
189 extern unsigned long __zero_page(void);
190
191 #define BAD_PAGETABLE __bad_pagetable()
192 #define BAD_PAGE __bad_page()
193 #define ZERO_PAGE 0xfffffc000030A000
194
195
196 #define BITS_PER_PTR (8*sizeof(unsigned long))
197
198
199 #define PTR_MASK (~(sizeof(void*)-1))
200
201
202 #define SIZEOF_PTR_LOG2 3
203
204
205 #define PAGE_PTR(address) \
206 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
207
208 extern unsigned long high_memory;
209
210
211
212
213
214 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
215 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
216
217 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
218 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
219
220 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
221 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
222
223 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
224 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
225
226 extern inline unsigned long pte_page(pte_t pte)
227 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
228
229 extern inline unsigned long pmd_page(pmd_t pmd)
230 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
231
232 extern inline unsigned long pgd_page(pgd_t pgd)
233 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
234
235 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
236 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
237 extern inline int pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
238 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
239 extern inline void pte_reuse(pte_t * ptep)
240 {
241 if (!mem_map[MAP_NR(ptep)].reserved)
242 mem_map[MAP_NR(ptep)].count++;
243 }
244
245 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
246 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
247 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
248 extern inline int pmd_inuse(pmd_t *pmdp) { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
249 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
250 extern inline void pmd_reuse(pmd_t * pmdp)
251 {
252 if (!mem_map[MAP_NR(pmdp)].reserved)
253 mem_map[MAP_NR(pmdp)].count++;
254 }
255
256 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
257 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
258 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
259 extern inline int pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
260 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
261
262
263
264
265
266 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
267 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
268 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
269 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
270 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
271 extern inline int pte_cow(pte_t pte) { return pte_val(pte) & _PAGE_COW; }
272
273 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
274 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
275 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
276 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
277 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
278 extern inline pte_t pte_uncow(pte_t pte) { pte_val(pte) &= ~_PAGE_COW; return pte; }
279 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
280 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
281 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
282 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
283 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
284 extern inline pte_t pte_mkcow(pte_t pte) { pte_val(pte) |= _PAGE_COW; return pte; }
285
286
287
288
289
290
291
292
293
294 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
295 {
296 pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
297 tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
298 if (tsk == current)
299 __asm__ __volatile__(
300 "bis %0,%0,$16\n\t"
301 "call_pal %1"
302 :
303 : "r" (&tsk->tss), "i" (PAL_swpctx)
304 : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
305 }
306
307 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
308
309
310 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
311 {
312 return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
313 }
314
315
316 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
317 {
318 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
319 }
320
321
322 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
323 {
324 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
325 }
326
327
328
329
330
331
332 extern inline void pte_free_kernel(pte_t * pte)
333 {
334 mem_map[MAP_NR(pte)].reserved = 0;
335 free_page((unsigned long) pte);
336 }
337
338 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
339 {
340 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
341 if (pmd_none(*pmd)) {
342 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
343 if (pmd_none(*pmd)) {
344 if (page) {
345 pmd_set(pmd, page);
346 mem_map[MAP_NR(page)].reserved = 1;
347 return page + address;
348 }
349 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
350 return NULL;
351 }
352 free_page((unsigned long) page);
353 }
354 if (pmd_bad(*pmd)) {
355 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
356 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
357 return NULL;
358 }
359 return (pte_t *) pmd_page(*pmd) + address;
360 }
361
362 extern inline void pmd_free_kernel(pmd_t * pmd)
363 {
364 mem_map[MAP_NR(pmd)].reserved = 0;
365 free_page((unsigned long) pmd);
366 }
367
368 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
369 {
370 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
371 if (pgd_none(*pgd)) {
372 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
373 if (pgd_none(*pgd)) {
374 if (page) {
375 pgd_set(pgd, page);
376 mem_map[MAP_NR(page)].reserved = 1;
377 return page + address;
378 }
379 pgd_set(pgd, BAD_PAGETABLE);
380 return NULL;
381 }
382 free_page((unsigned long) page);
383 }
384 if (pgd_bad(*pgd)) {
385 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
386 pgd_set(pgd, BAD_PAGETABLE);
387 return NULL;
388 }
389 return (pmd_t *) pgd_page(*pgd) + address;
390 }
391
392 extern inline void pte_free(pte_t * pte)
393 {
394 free_page((unsigned long) pte);
395 }
396
397 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
398 {
399 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
400 if (pmd_none(*pmd)) {
401 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
402 if (pmd_none(*pmd)) {
403 if (page) {
404 pmd_set(pmd, page);
405 return page + address;
406 }
407 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
408 return NULL;
409 }
410 free_page((unsigned long) page);
411 }
412 if (pmd_bad(*pmd)) {
413 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
414 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
415 return NULL;
416 }
417 return (pte_t *) pmd_page(*pmd) + address;
418 }
419
420 extern inline void pmd_free(pmd_t * pmd)
421 {
422 free_page((unsigned long) pmd);
423 }
424
425 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
426 {
427 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
428 if (pgd_none(*pgd)) {
429 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
430 if (pgd_none(*pgd)) {
431 if (page) {
432 pgd_set(pgd, page);
433 return page + address;
434 }
435 pgd_set(pgd, BAD_PAGETABLE);
436 return NULL;
437 }
438 free_page((unsigned long) page);
439 }
440 if (pgd_bad(*pgd)) {
441 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
442 pgd_set(pgd, BAD_PAGETABLE);
443 return NULL;
444 }
445 return (pmd_t *) pgd_page(*pgd) + address;
446 }
447
448 extern inline void pgd_free(pgd_t * pgd)
449 {
450 free_page((unsigned long) pgd);
451 }
452
453 extern inline pgd_t * pgd_alloc(void)
454 {
455 return (pgd_t *) get_free_page(GFP_KERNEL);
456 }
457
458 extern pgd_t swapper_pg_dir[1024];
459
460
461
462
463
464 extern inline void update_mmu_cache(struct vm_area_struct * vma,
465 unsigned long address, pte_t pte)
466 {
467 }
468
469
470
471
472
473 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
474 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
475
476 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
477 #define SWP_OFFSET(entry) ((entry) >> 40)
478 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
479
480 #endif