This source file includes following definitions.
- pte_none
- pte_present
- pte_inuse
- pte_clear
- pte_reuse
- pmd_none
- pmd_bad
- pmd_present
- pmd_inuse
- pmd_clear
- pmd_reuse
- pgd_none
- pgd_bad
- pgd_present
- pgd_inuse
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_cow
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_uncow
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- pte_mkcow
- mk_pte
- pte_modify
- pte_page
- pmd_page
- pgd_offset
- pmd_offset
- pte_offset
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pgd_free
- pgd_alloc
- update_mmu_cache
1
2 #ifndef _PPC_PGTABLE_H
3 #define _PPC_PGTABLE_H
4
5 #include <asm/page.h>
6 #include <asm/mmu.h>
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79 #define PMD_SHIFT 22
80 #define PMD_SIZE (1UL << PMD_SHIFT)
81 #define PMD_MASK (~(PMD_SIZE-1))
82
83
84 #define PGDIR_SHIFT 22
85 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
86 #define PGDIR_MASK (~(PGDIR_SIZE-1))
87
88
89
90
91
92 #define PTRS_PER_PTE 1024
93 #define PTRS_PER_PMD 1
94 #define PTRS_PER_PGD 1024
95
96
97
98
99
100
101
102
103 #define VMALLOC_OFFSET (8*1024*1024)
104 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
105 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
106
107 #define _PAGE_PRESENT 0x001
108 #define _PAGE_RW 0x002
109 #define _PAGE_USER 0x004
110 #define _PAGE_PCD 0x010
111 #define _PAGE_ACCESSED 0x020
112 #define _PAGE_DIRTY 0x040
113 #define _PAGE_COW 0x200
114
115 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
116 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
117
118 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
119 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
120 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_COW)
121 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
122 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
123
124
125
126
127
128 #define __P000 PAGE_NONE
129 #define __P001 PAGE_READONLY
130 #define __P010 PAGE_COPY
131 #define __P011 PAGE_COPY
132 #define __P100 PAGE_READONLY
133 #define __P101 PAGE_READONLY
134 #define __P110 PAGE_COPY
135 #define __P111 PAGE_COPY
136
137 #define __S000 PAGE_NONE
138 #define __S001 PAGE_READONLY
139 #define __S010 PAGE_SHARED
140 #define __S011 PAGE_SHARED
141 #define __S100 PAGE_READONLY
142 #define __S101 PAGE_READONLY
143 #define __S110 PAGE_SHARED
144 #define __S111 PAGE_SHARED
145
146
147
148
149
150
151 #undef CONFIG_TEST_VERIFY_AREA
152
153
154 extern unsigned long pg0[1024];
155
156
157
158
159
160
161
162
163 extern pte_t __bad_page(void);
164 extern pte_t * __bad_pagetable(void);
165
166 extern unsigned long __zero_page(void);
167
168 #define BAD_PAGETABLE __bad_pagetable()
169 #define BAD_PAGE __bad_page()
170 #define ZERO_PAGE __zero_page()
171
172
173 #define BITS_PER_PTR (8*sizeof(unsigned long))
174
175
176 #define PTR_MASK (~(sizeof(void*)-1))
177
178
179
180 #define SIZEOF_PTR_LOG2 2
181
182
183 #define PAGE_PTR(address) \
184 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
185
186
187
188 #define SET_PAGE_DIR(tsk,pgdir) \
189 do { \
190 (tsk)->tss.pg_tables = (unsigned long *)(pgdir); \
191 if ((tsk) == current) \
192 { \
193 \
194 } \
195 } while (0)
196
197 extern unsigned long high_memory;
198
199 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
200 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
201 extern inline int pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)].reserved; }
202
203 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
204 extern inline void pte_reuse(pte_t * ptep)
205 {
206 if (!mem_map[MAP_NR(ptep)].reserved)
207 mem_map[MAP_NR(ptep)].count++;
208 }
209
210
211
212
213
214
215
216 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
217 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE; }
218 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_PRESENT; }
219 extern inline int pmd_inuse(pmd_t *pmdp) { return 0; }
220 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
221 extern inline void pmd_reuse(pmd_t * pmdp) { }
222
223
224
225
226
227
228 extern inline int pgd_none(pgd_t pgd) { return 0; }
229 extern inline int pgd_bad(pgd_t pgd) { return 0; }
230 extern inline int pgd_present(pgd_t pgd) { return 1; }
231
232 extern inline int pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
233 extern inline void pgd_clear(pgd_t * pgdp) { }
234
235
236
237
238
239
240
241
242
243
244
245
246
247 extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
248 extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
249 extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
250 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
251 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
252 extern inline int pte_cow(pte_t pte) { return pte_val(pte) & _PAGE_COW; }
253
254 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_RW; return pte; }
255 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
256 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
257 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
258 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
259 extern inline pte_t pte_uncow(pte_t pte) { pte_val(pte) &= ~_PAGE_COW; return pte; }
260 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
261 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
262 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
263 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
264 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
265 extern inline pte_t pte_mkcow(pte_t pte) { pte_val(pte) |= _PAGE_COW; return pte; }
266
267
268
269
270
271 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
272 { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; }
273
274 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
275 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
276
277
278
279
280 extern inline unsigned long pte_page(pte_t pte)
281 { return pte_val(pte) & PAGE_MASK; }
282
283 extern inline unsigned long pmd_page(pmd_t pmd)
284 { return pmd_val(pmd) & PAGE_MASK; }
285
286
287
288 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
289 {
290 return mm->pgd + (address >> PGDIR_SHIFT);
291 }
292
293
294 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
295 {
296 return (pmd_t *) dir;
297 }
298
299
300 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
301 {
302 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
303 }
304
305
306
307
308
309
310
311 extern inline void pte_free_kernel(pte_t * pte)
312 {
313 mem_map[MAP_NR(pte)].reserved = 1;
314 free_page((unsigned long) pte);
315 }
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
386 {
387 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
388 if (pmd_none(*pmd)) {
389 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
390 if (pmd_none(*pmd)) {
391 if (page) {
392
393 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
394 mem_map[MAP_NR(page)].reserved = 1;
395 return page + address;
396 }
397
398 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
399 return NULL;
400 }
401 free_page((unsigned long) page);
402 }
403 if (pmd_bad(*pmd)) {
404 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
405
406 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
407 return NULL;
408 }
409 return (pte_t *) pmd_page(*pmd) + address;
410 }
411
412
413
414
415
416 extern inline void pmd_free_kernel(pmd_t * pmd)
417 {
418 }
419
420 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
421 {
422 return (pmd_t *) pgd;
423 }
424
425 extern inline void pte_free(pte_t * pte)
426 {
427 free_page((unsigned long) pte);
428 }
429
430 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
431 {
432 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
433 if (pmd_none(*pmd)) {
434 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
435 if (pmd_none(*pmd)) {
436 if (page) {
437 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
438 return page + address;
439 }
440 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
441 return NULL;
442 }
443 free_page((unsigned long) page);
444 }
445 if (pmd_bad(*pmd)) {
446 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
447 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
448 return NULL;
449 }
450 return (pte_t *) pmd_page(*pmd) + address;
451 }
452
453
454
455
456
457 extern inline void pmd_free(pmd_t * pmd)
458 {
459 }
460
461 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
462 {
463 return (pmd_t *) pgd;
464 }
465
466 extern inline void pgd_free(pgd_t * pgd)
467 {
468 free_page((unsigned long) pgd);
469 }
470
471 extern inline pgd_t * pgd_alloc(void)
472 {
473 return (pgd_t *) get_free_page(GFP_KERNEL);
474 }
475
476 extern pgd_t swapper_pg_dir[1024*8];
477
478
479
480
481
482
483 extern inline void update_mmu_cache(struct vm_area_struct * vma,
484 unsigned long address, pte_t _pte)
485 {
486 #if 0
487 printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
488 _printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
489
490 #endif
491 MMU_hash_page(&(current)->tss, address & PAGE_MASK, (pte *)&_pte);
492
493 }
494
495
496 #ifdef _SCHED_INIT_
497 #define INIT_MMAP { &init_task, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
498
499 #endif
500
501 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
502 #define SWP_OFFSET(entry) ((entry) >> 8)
503 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
504
505 #endif