This source file includes following definitions.
- invalidate_mm
- invalidate_page
- invalidate_range
- invalidate_mm
- invalidate_page
- invalidate_range
- pte_none
- pte_present
- pte_inuse
- pte_clear
- pte_reuse
- pmd_none
- pmd_bad
- pmd_present
- pmd_inuse
- pmd_inuse
- pmd_clear
- pmd_reuse
- pgd_none
- pgd_bad
- pgd_present
- pgd_inuse
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- mk_pte
- pte_modify
- pte_page
- pmd_page
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
3
4 #include <linux/config.h>
5
6
7
8
9
10 #define USE_PENTIUM_MM 1
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #define __invalidate() \
36 __asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
37
38 #ifdef CONFIG_M386
39 #define __invalidate_one(addr) invalidate()
40 #else
41 #define __invalidate_one(addr) \
42 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
43 #endif
44
45 #ifndef __SMP__
46
47 #define invalidate() __invalidate()
48 #define invalidate_all() __invalidate()
49
50 static inline void invalidate_mm(struct mm_struct *mm)
51 {
52 if (mm == current->mm)
53 __invalidate();
54 }
55
56 static inline void invalidate_page(struct vm_area_struct *vma,
57 unsigned long addr)
58 {
59 if (vma->vm_mm == current->mm)
60 __invalidate_one(addr);
61 }
62
63 static inline void invalidate_range(struct mm_struct *mm,
64 unsigned long start, unsigned long end)
65 {
66 if (mm == current->mm)
67 __invalidate();
68 }
69
70 #else
71
72
73
74
75
76
77 #include <asm/smp.h>
78
79 #define local_invalidate() \
80 __invalidate()
81
82 #define invalidate() \
83 smp_invalidate()
84
85 #define invalidate_all() invalidate()
86
87 static inline void invalidate_mm(struct mm_struct *mm)
88 {
89 invalidate();
90 }
91
92 static inline void invalidate_page(struct vm_area_struct *vma,
93 unsigned long addr)
94 {
95 invalidate();
96 }
97
98 static inline void invalidate_range(struct mm_struct *mm,
99 unsigned long start, unsigned long end)
100 {
101 invalidate();
102 }
103
104 #endif
105
106
107
108
109
110
111 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
112
113
114 #define PMD_SHIFT 22
115 #define PMD_SIZE (1UL << PMD_SHIFT)
116 #define PMD_MASK (~(PMD_SIZE-1))
117
118
119 #define PGDIR_SHIFT 22
120 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
121 #define PGDIR_MASK (~(PGDIR_SIZE-1))
122
123
124
125
126
127 #define PTRS_PER_PTE 1024
128 #define PTRS_PER_PMD 1
129 #define PTRS_PER_PGD 1024
130
131
132
133
134
135
136
137
138 #define VMALLOC_OFFSET (8*1024*1024)
139 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
140 #define VMALLOC_VMADDR(x) (TASK_SIZE + (unsigned long)(x))
141
142
143
144
145
146
147
148
149 #define _PAGE_PRESENT 0x001
150 #define _PAGE_RW 0x002
151 #define _PAGE_USER 0x004
152 #define _PAGE_PCD 0x010
153 #define _PAGE_ACCESSED 0x020
154 #define _PAGE_DIRTY 0x040
155 #define _PAGE_4M 0x080
156
157 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
158 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
159
160 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
161 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
162 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
163 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
164 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
165
166
167
168
169
170 #define __P000 PAGE_NONE
171 #define __P001 PAGE_READONLY
172 #define __P010 PAGE_COPY
173 #define __P011 PAGE_COPY
174 #define __P100 PAGE_READONLY
175 #define __P101 PAGE_READONLY
176 #define __P110 PAGE_COPY
177 #define __P111 PAGE_COPY
178
179 #define __S000 PAGE_NONE
180 #define __S001 PAGE_READONLY
181 #define __S010 PAGE_SHARED
182 #define __S011 PAGE_SHARED
183 #define __S100 PAGE_READONLY
184 #define __S101 PAGE_READONLY
185 #define __S110 PAGE_SHARED
186 #define __S111 PAGE_SHARED
187
188
189
190
191
192
193 #undef TEST_VERIFY_AREA
194
195
196 extern unsigned long pg0[1024];
197
198 extern unsigned long empty_zero_page[1024];
199
200
201
202
203
204
205
206
207 extern pte_t __bad_page(void);
208 extern pte_t * __bad_pagetable(void);
209
210 #define BAD_PAGETABLE __bad_pagetable()
211 #define BAD_PAGE __bad_page()
212 #define ZERO_PAGE ((unsigned long) empty_zero_page)
213
214
215 #define BITS_PER_PTR (8*sizeof(unsigned long))
216
217
218 #define PTR_MASK (~(sizeof(void*)-1))
219
220
221
222 #define SIZEOF_PTR_LOG2 2
223
224
225 #define PAGE_PTR(address) \
226 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
227
228
229 #define SET_PAGE_DIR(tsk,pgdir) \
230 do { \
231 (tsk)->tss.cr3 = (unsigned long) (pgdir); \
232 if ((tsk) == current) \
233 __asm__ __volatile__("movl %0,%%cr3": :"a" ((tsk)->tss.cr3)); \
234 } while (0)
235
236 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
237 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
238 extern inline int pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
239 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
240 extern inline void pte_reuse(pte_t * ptep)
241 {
242 if (!mem_map[MAP_NR(ptep)].reserved)
243 mem_map[MAP_NR(ptep)].count++;
244 }
245
246 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
247 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE || pmd_val(pmd) > high_memory; }
248 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_PRESENT; }
249 #ifdef USE_PENTIUM_MM
250 extern inline int pmd_inuse(pmd_t *pmdp) { return (pmd_val(*pmdp) & _PAGE_4M) != 0; }
251 #else
252 extern inline int pmd_inuse(pmd_t *pmdp) { return 0; }
253 #endif
254 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
255 extern inline void pmd_reuse(pmd_t * pmdp) { }
256
257
258
259
260
261
262 extern inline int pgd_none(pgd_t pgd) { return 0; }
263 extern inline int pgd_bad(pgd_t pgd) { return 0; }
264 extern inline int pgd_present(pgd_t pgd) { return 1; }
265 extern inline int pgd_inuse(pgd_t * pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
266 extern inline void pgd_clear(pgd_t * pgdp) { }
267
268
269
270
271
272 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
273 extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
274 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
275 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
276 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
277
278 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; }
279 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
280 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
281 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
282 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
283 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
284 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
285 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
286 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
287 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
288
289
290
291
292
293 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
294 { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; }
295
296 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
297 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
298
299 extern inline unsigned long pte_page(pte_t pte)
300 { return pte_val(pte) & PAGE_MASK; }
301
302 extern inline unsigned long pmd_page(pmd_t pmd)
303 { return pmd_val(pmd) & PAGE_MASK; }
304
305
306 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
307 {
308 return mm->pgd + (address >> PGDIR_SHIFT);
309 }
310
311
312 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
313 {
314 return (pmd_t *) dir;
315 }
316
317
318 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
319 {
320 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
321 }
322
323
324
325
326
327
328 extern inline void pte_free_kernel(pte_t * pte)
329 {
330 mem_map[MAP_NR(pte)].reserved = 0;
331 free_page((unsigned long) pte);
332 }
333
334 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
335 {
336 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
337 if (pmd_none(*pmd)) {
338 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
339 if (pmd_none(*pmd)) {
340 if (page) {
341 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
342 mem_map[MAP_NR(page)].reserved = 1;
343 return page + address;
344 }
345 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
346 return NULL;
347 }
348 free_page((unsigned long) page);
349 }
350 if (pmd_bad(*pmd)) {
351 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
352 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
353 return NULL;
354 }
355 return (pte_t *) pmd_page(*pmd) + address;
356 }
357
358
359
360
361
362 extern inline void pmd_free_kernel(pmd_t * pmd)
363 {
364 pmd_val(*pmd) = 0;
365 }
366
367 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
368 {
369 return (pmd_t *) pgd;
370 }
371
372 extern inline void pte_free(pte_t * pte)
373 {
374 free_page((unsigned long) pte);
375 }
376
377 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
378 {
379 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
380 if (pmd_none(*pmd)) {
381 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
382 if (pmd_none(*pmd)) {
383 if (page) {
384 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
385 return page + address;
386 }
387 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
388 return NULL;
389 }
390 free_page((unsigned long) page);
391 }
392 if (pmd_bad(*pmd)) {
393 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
394 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
395 return NULL;
396 }
397 return (pte_t *) pmd_page(*pmd) + address;
398 }
399
400
401
402
403
404 extern inline void pmd_free(pmd_t * pmd)
405 {
406 pmd_val(*pmd) = 0;
407 }
408
409 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
410 {
411 return (pmd_t *) pgd;
412 }
413
414 extern inline void pgd_free(pgd_t * pgd)
415 {
416 free_page((unsigned long) pgd);
417 }
418
419 extern inline pgd_t * pgd_alloc(void)
420 {
421 return (pgd_t *) get_free_page(GFP_KERNEL);
422 }
423
424 extern pgd_t swapper_pg_dir[1024];
425
426
427
428
429
430 extern inline void update_mmu_cache(struct vm_area_struct * vma,
431 unsigned long address, pte_t pte)
432 {
433 }
434
435 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
436 #define SWP_OFFSET(entry) ((entry) >> 8)
437 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
438
439 #endif