This source file includes following definitions.
- flush_tlb
- flush_tlb_all
- flush_tlb_mm
- flush_tlb_page
- flush_tlb_range
- mk_pte
- pte_modify
- pmd_set
- pgd_set
- pte_page
- pmd_page
- pgd_page
- pte_none
- pte_present
- pte_clear
- pmd_none
- pmd_bad
- pmd_present
- pmd_clear
- pgd_none
- pgd_bad
- pgd_present
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- SET_PAGE_DIR
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
- mk_swap_pte
1 #ifndef _ALPHA_PGTABLE_H
2 #define _ALPHA_PGTABLE_H
3
4
5
6
7
8
9
10
11
12 #include <asm/system.h>
13
14
15 #define flush_cache_all() do { } while (0)
16 #define flush_cache_mm(mm) do { } while (0)
17 #define flush_cache_range(mm, start, end) do { } while (0)
18 #define flush_cache_page(vma, vmaddr) do { } while (0)
19 #define flush_page_to_ram(page) do { } while (0)
20
21
22
23
24 static inline void flush_tlb(void)
25 {
26 tbiap();
27 }
28
29
30
31
32
33 static inline void flush_tlb_all(void)
34 {
35 tbia();
36 }
37
38
39
40
41 static inline void flush_tlb_mm(struct mm_struct *mm)
42 {
43 if (mm != current->mm)
44 mm->context = 0;
45 else
46 tbiap();
47 }
48
49
50
51
52
53
54
55
56
57 static inline void flush_tlb_page(struct vm_area_struct *vma,
58 unsigned long addr)
59 {
60 struct mm_struct * mm = vma->vm_mm;
61
62 if (mm != current->mm)
63 mm->context = 0;
64 else
65 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
66 }
67
68
69
70
71
72 static inline void flush_tlb_range(struct mm_struct *mm,
73 unsigned long start, unsigned long end)
74 {
75 if (mm != current->mm)
76 mm->context = 0;
77 else
78 tbiap();
79 }
80
81
82
83
84
85 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
86
87
88 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
89 #define PMD_SIZE (1UL << PMD_SHIFT)
90 #define PMD_MASK (~(PMD_SIZE-1))
91
92
93 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
94 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
95 #define PGDIR_MASK (~(PGDIR_SIZE-1))
96
97
98
99
100
101
102
103 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
104 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
105 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
106
107
108 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
109
110 #define VMALLOC_START 0xFFFFFE0000000000
111 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
112
113
114
115
116 #define _PAGE_VALID 0x0001
117 #define _PAGE_FOR 0x0002
118 #define _PAGE_FOW 0x0004
119 #define _PAGE_FOE 0x0008
120 #define _PAGE_ASM 0x0010
121 #define _PAGE_KRE 0x0100
122 #define _PAGE_URE 0x0200
123 #define _PAGE_KWE 0x1000
124 #define _PAGE_UWE 0x2000
125
126
127 #define _PAGE_DIRTY 0x20000
128 #define _PAGE_ACCESSED 0x40000
129
130
131
132
133
134
135
136
137
138
139
140
141 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
142 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
143
144 #define _PFN_MASK 0xFFFFFFFF00000000
145
146 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
147 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
148
149
150
151
152
153 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
154 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
155 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
156 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
157 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
158
159 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
160
161 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
162 #define _PAGE_S(x) _PAGE_NORMAL(x)
163
164
165
166
167
168
169
170
171
172 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
173 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
174 #define __P010 _PAGE_P(_PAGE_FOE)
175 #define __P011 _PAGE_P(_PAGE_FOE)
176 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
177 #define __P101 _PAGE_P(_PAGE_FOW)
178 #define __P110 _PAGE_P(0)
179 #define __P111 _PAGE_P(0)
180
181 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
182 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
183 #define __S010 _PAGE_S(_PAGE_FOE)
184 #define __S011 _PAGE_S(_PAGE_FOE)
185 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
186 #define __S101 _PAGE_S(_PAGE_FOW)
187 #define __S110 _PAGE_S(0)
188 #define __S111 _PAGE_S(0)
189
190
191
192
193
194
195
196
197 extern pte_t __bad_page(void);
198 extern pmd_t * __bad_pagetable(void);
199
200 extern unsigned long __zero_page(void);
201
202 #define BAD_PAGETABLE __bad_pagetable()
203 #define BAD_PAGE __bad_page()
204 #define ZERO_PAGE 0xfffffc000030A000
205
206
207 #define BITS_PER_PTR (8*sizeof(unsigned long))
208
209
210 #define PTR_MASK (~(sizeof(void*)-1))
211
212
213 #define SIZEOF_PTR_LOG2 3
214
215
216 #define PAGE_PTR(address) \
217 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
218
219 extern unsigned long high_memory;
220
221
222
223
224
225 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
226 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
227
228 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
229 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
230
231 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
232 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
233
234 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
235 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
236
237 extern inline unsigned long pte_page(pte_t pte)
238 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
239
240 extern inline unsigned long pmd_page(pmd_t pmd)
241 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
242
243 extern inline unsigned long pgd_page(pgd_t pgd)
244 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
245
246 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
247 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
248 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
249
250 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
251 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
252 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
253 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
254
255 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
256 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
257 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
258 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
259
260
261
262
263
264 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
265 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
266 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
267 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
268 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
269
270 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
271 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
272 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
273 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
274 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
275 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
276 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
277 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
278 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
279 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
280
281
282
283
284
285
286
287
288
289 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
290 {
291 pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
292 tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
293 if (tsk == current)
294 __asm__ __volatile__(
295 "bis %0,%0,$16\n\t"
296 "call_pal %1"
297 :
298 : "r" (&tsk->tss), "i" (PAL_swpctx)
299 : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
300 }
301
302 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
303
304
305 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
306 {
307 return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
308 }
309
310
311 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
312 {
313 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
314 }
315
316
317 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
318 {
319 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
320 }
321
322
323
324
325
326
327 extern inline void pte_free_kernel(pte_t * pte)
328 {
329 free_page((unsigned long) pte);
330 }
331
332 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
333 {
334 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
335 if (pmd_none(*pmd)) {
336 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
337 if (pmd_none(*pmd)) {
338 if (page) {
339 pmd_set(pmd, page);
340 return page + address;
341 }
342 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
343 return NULL;
344 }
345 free_page((unsigned long) page);
346 }
347 if (pmd_bad(*pmd)) {
348 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
349 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
350 return NULL;
351 }
352 return (pte_t *) pmd_page(*pmd) + address;
353 }
354
355 extern inline void pmd_free_kernel(pmd_t * pmd)
356 {
357 free_page((unsigned long) pmd);
358 }
359
360 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
361 {
362 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
363 if (pgd_none(*pgd)) {
364 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
365 if (pgd_none(*pgd)) {
366 if (page) {
367 pgd_set(pgd, page);
368 return page + address;
369 }
370 pgd_set(pgd, BAD_PAGETABLE);
371 return NULL;
372 }
373 free_page((unsigned long) page);
374 }
375 if (pgd_bad(*pgd)) {
376 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
377 pgd_set(pgd, BAD_PAGETABLE);
378 return NULL;
379 }
380 return (pmd_t *) pgd_page(*pgd) + address;
381 }
382
383 extern inline void pte_free(pte_t * pte)
384 {
385 free_page((unsigned long) pte);
386 }
387
388 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
389 {
390 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
391 if (pmd_none(*pmd)) {
392 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
393 if (pmd_none(*pmd)) {
394 if (page) {
395 pmd_set(pmd, page);
396 return page + address;
397 }
398 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
399 return NULL;
400 }
401 free_page((unsigned long) page);
402 }
403 if (pmd_bad(*pmd)) {
404 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
405 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
406 return NULL;
407 }
408 return (pte_t *) pmd_page(*pmd) + address;
409 }
410
411 extern inline void pmd_free(pmd_t * pmd)
412 {
413 free_page((unsigned long) pmd);
414 }
415
416 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
417 {
418 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
419 if (pgd_none(*pgd)) {
420 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
421 if (pgd_none(*pgd)) {
422 if (page) {
423 pgd_set(pgd, page);
424 return page + address;
425 }
426 pgd_set(pgd, BAD_PAGETABLE);
427 return NULL;
428 }
429 free_page((unsigned long) page);
430 }
431 if (pgd_bad(*pgd)) {
432 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
433 pgd_set(pgd, BAD_PAGETABLE);
434 return NULL;
435 }
436 return (pmd_t *) pgd_page(*pgd) + address;
437 }
438
439 extern inline void pgd_free(pgd_t * pgd)
440 {
441 free_page((unsigned long) pgd);
442 }
443
444 extern inline pgd_t * pgd_alloc(void)
445 {
446 return (pgd_t *) get_free_page(GFP_KERNEL);
447 }
448
449 extern pgd_t swapper_pg_dir[1024];
450
451
452
453
454
455 extern inline void update_mmu_cache(struct vm_area_struct * vma,
456 unsigned long address, pte_t pte)
457 {
458 }
459
460
461
462
463
464 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
465 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
466
467 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
468 #define SWP_OFFSET(entry) ((entry) >> 40)
469 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
470
471 #endif