This source file includes following definitions.
- reload_context
- flush_tlb_current_page
- flush_tlb
- flush_tlb_all
- flush_tlb_mm
- flush_tlb_page
- flush_tlb_range
- mk_pte
- pte_modify
- pmd_set
- pgd_set
- pte_page
- pmd_page
- pgd_page
- pte_none
- pte_present
- pte_clear
- pmd_none
- pmd_bad
- pmd_present
- pmd_clear
- pgd_none
- pgd_bad
- pgd_present
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- SET_PAGE_DIR
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
- mk_swap_pte
1 #ifndef _ALPHA_PGTABLE_H
2 #define _ALPHA_PGTABLE_H
3
4
5
6
7
8
9
10
11
12 #include <asm/system.h>
13 #include <asm/mmu_context.h>
14
15
16 #define flush_cache_all() do { } while (0)
17 #define flush_cache_mm(mm) do { } while (0)
18 #define flush_cache_range(mm, start, end) do { } while (0)
19 #define flush_cache_page(vma, vmaddr) do { } while (0)
20 #define flush_page_to_ram(page) do { } while (0)
21
22
23
24
25
26
27 static inline void reload_context(struct task_struct *task)
28 {
29 __asm__ __volatile__(
30 "bis %0,%0,$16\n\t"
31 "call_pal %1"
32 :
33 : "r" (&task->tss), "i" (PAL_swpctx)
34 : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
35 }
36
37
38
39
40
41 #ifdef BROKEN_ASN
42
43 #define flush_tlb_current(x) tbiap()
44 #define flush_tlb_other(x) do { } while (0)
45
46 #else
47
48 extern void get_new_asn_and_reload(struct task_struct *, struct mm_struct *);
49
50 #define flush_tlb_current(mm) get_new_asn_and_reload(current, mm)
51 #define flush_tlb_other(mm) do { (mm)->context = 0; } while (0)
52
53 #endif
54
55
56
57
58
59
60 static inline void flush_tlb_current_page(struct mm_struct * mm,
61 struct vm_area_struct *vma,
62 unsigned long addr)
63 {
64 #ifdef BROKEN_ASN
65 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
66 #else
67 if (vma->vm_flags & VM_EXEC)
68 flush_tlb_current(mm);
69 else
70 tbi(2, addr);
71 #endif
72 }
73
74
75
76
77 static inline void flush_tlb(void)
78 {
79 flush_tlb_current(current->mm);
80 }
81
82
83
84
85
86 static inline void flush_tlb_all(void)
87 {
88 tbia();
89 }
90
91
92
93
94 static inline void flush_tlb_mm(struct mm_struct *mm)
95 {
96 if (mm != current->mm)
97 flush_tlb_other(mm);
98 else
99 flush_tlb_current(mm);
100 }
101
102
103
104
105
106
107
108
109
110 static inline void flush_tlb_page(struct vm_area_struct *vma,
111 unsigned long addr)
112 {
113 struct mm_struct * mm = vma->vm_mm;
114
115 if (mm != current->mm)
116 flush_tlb_other(mm);
117 else
118 flush_tlb_current_page(mm, vma, addr);
119 }
120
121
122
123
124
125 static inline void flush_tlb_range(struct mm_struct *mm,
126 unsigned long start, unsigned long end)
127 {
128 flush_tlb_mm(mm);
129 }
130
131
132
133
134
135 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
136
137
138 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
139 #define PMD_SIZE (1UL << PMD_SHIFT)
140 #define PMD_MASK (~(PMD_SIZE-1))
141
142
143 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
144 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
145 #define PGDIR_MASK (~(PGDIR_SIZE-1))
146
147
148
149
150
151
152
153 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
154 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
155 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
156
157
158 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
159
160 #define VMALLOC_START 0xFFFFFE0000000000
161 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
162
163
164
165
166 #define _PAGE_VALID 0x0001
167 #define _PAGE_FOR 0x0002
168 #define _PAGE_FOW 0x0004
169 #define _PAGE_FOE 0x0008
170 #define _PAGE_ASM 0x0010
171 #define _PAGE_KRE 0x0100
172 #define _PAGE_URE 0x0200
173 #define _PAGE_KWE 0x1000
174 #define _PAGE_UWE 0x2000
175
176
177 #define _PAGE_DIRTY 0x20000
178 #define _PAGE_ACCESSED 0x40000
179
180
181
182
183
184
185
186
187
188
189
190
191 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
192 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
193
194 #define _PFN_MASK 0xFFFFFFFF00000000
195
196 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
197 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
198
199
200
201
202
203 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
204 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
205 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
206 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
207 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
208
209 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
210
211 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
212 #define _PAGE_S(x) _PAGE_NORMAL(x)
213
214
215
216
217
218
219
220
221
222 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
223 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
224 #define __P010 _PAGE_P(_PAGE_FOE)
225 #define __P011 _PAGE_P(_PAGE_FOE)
226 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
227 #define __P101 _PAGE_P(_PAGE_FOW)
228 #define __P110 _PAGE_P(0)
229 #define __P111 _PAGE_P(0)
230
231 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
232 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
233 #define __S010 _PAGE_S(_PAGE_FOE)
234 #define __S011 _PAGE_S(_PAGE_FOE)
235 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
236 #define __S101 _PAGE_S(_PAGE_FOW)
237 #define __S110 _PAGE_S(0)
238 #define __S111 _PAGE_S(0)
239
240
241
242
243
244
245
246
247 extern pte_t __bad_page(void);
248 extern pmd_t * __bad_pagetable(void);
249
250 extern unsigned long __zero_page(void);
251
252 #define BAD_PAGETABLE __bad_pagetable()
253 #define BAD_PAGE __bad_page()
254 #define ZERO_PAGE 0xfffffc000030A000
255
256
257 #define BITS_PER_PTR (8*sizeof(unsigned long))
258
259
260 #define PTR_MASK (~(sizeof(void*)-1))
261
262
263 #define SIZEOF_PTR_LOG2 3
264
265
266 #define PAGE_PTR(address) \
267 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
268
269 extern unsigned long high_memory;
270
271
272
273
274
275 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
276 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
277
278 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
279 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
280
281 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
282 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
283
284 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
285 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
286
287 extern inline unsigned long pte_page(pte_t pte)
288 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
289
290 extern inline unsigned long pmd_page(pmd_t pmd)
291 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
292
293 extern inline unsigned long pgd_page(pgd_t pgd)
294 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
295
296 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
297 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
298 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
299
300 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
301 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
302 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
303 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
304
305 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
306 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
307 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
308 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
309
310
311
312
313
314 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
315 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
316 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
317 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
318 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
319
320 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
321 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
322 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
323 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
324 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
325 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
326 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
327 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
328 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
329 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
330
331
332
333
334
335
336
337
338
339 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
340 {
341 pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
342 tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
343 if (tsk == current)
344 reload_context(tsk);
345 }
346
347 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
348
349
350 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
351 {
352 return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
353 }
354
355
356 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
357 {
358 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
359 }
360
361
362 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
363 {
364 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
365 }
366
367
368
369
370
371
372 extern inline void pte_free_kernel(pte_t * pte)
373 {
374 free_page((unsigned long) pte);
375 }
376
377 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
378 {
379 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
380 if (pmd_none(*pmd)) {
381 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
382 if (pmd_none(*pmd)) {
383 if (page) {
384 pmd_set(pmd, page);
385 return page + address;
386 }
387 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
388 return NULL;
389 }
390 free_page((unsigned long) page);
391 }
392 if (pmd_bad(*pmd)) {
393 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
394 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
395 return NULL;
396 }
397 return (pte_t *) pmd_page(*pmd) + address;
398 }
399
400 extern inline void pmd_free_kernel(pmd_t * pmd)
401 {
402 free_page((unsigned long) pmd);
403 }
404
405 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
406 {
407 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
408 if (pgd_none(*pgd)) {
409 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
410 if (pgd_none(*pgd)) {
411 if (page) {
412 pgd_set(pgd, page);
413 return page + address;
414 }
415 pgd_set(pgd, BAD_PAGETABLE);
416 return NULL;
417 }
418 free_page((unsigned long) page);
419 }
420 if (pgd_bad(*pgd)) {
421 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
422 pgd_set(pgd, BAD_PAGETABLE);
423 return NULL;
424 }
425 return (pmd_t *) pgd_page(*pgd) + address;
426 }
427
428 extern inline void pte_free(pte_t * pte)
429 {
430 free_page((unsigned long) pte);
431 }
432
433 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
434 {
435 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
436 if (pmd_none(*pmd)) {
437 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
438 if (pmd_none(*pmd)) {
439 if (page) {
440 pmd_set(pmd, page);
441 return page + address;
442 }
443 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
444 return NULL;
445 }
446 free_page((unsigned long) page);
447 }
448 if (pmd_bad(*pmd)) {
449 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
450 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
451 return NULL;
452 }
453 return (pte_t *) pmd_page(*pmd) + address;
454 }
455
456 extern inline void pmd_free(pmd_t * pmd)
457 {
458 free_page((unsigned long) pmd);
459 }
460
461 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
462 {
463 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
464 if (pgd_none(*pgd)) {
465 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
466 if (pgd_none(*pgd)) {
467 if (page) {
468 pgd_set(pgd, page);
469 return page + address;
470 }
471 pgd_set(pgd, BAD_PAGETABLE);
472 return NULL;
473 }
474 free_page((unsigned long) page);
475 }
476 if (pgd_bad(*pgd)) {
477 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
478 pgd_set(pgd, BAD_PAGETABLE);
479 return NULL;
480 }
481 return (pmd_t *) pgd_page(*pgd) + address;
482 }
483
484 extern inline void pgd_free(pgd_t * pgd)
485 {
486 free_page((unsigned long) pgd);
487 }
488
489 extern inline pgd_t * pgd_alloc(void)
490 {
491 return (pgd_t *) get_free_page(GFP_KERNEL);
492 }
493
494 extern pgd_t swapper_pg_dir[1024];
495
496
497
498
499
500 extern inline void update_mmu_cache(struct vm_area_struct * vma,
501 unsigned long address, pte_t pte)
502 {
503 }
504
505
506
507
508
509 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
510 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
511
512 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
513 #define SWP_OFFSET(entry) ((entry) >> 40)
514 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
515
516 #endif