This source file includes following definitions.
- __flush_tlb_one
- flush_tlb_mm
- flush_tlb_page
- flush_tlb_range
- mk_pte
- pte_modify
- pmd_set
- pmd_set_et
- pgd_set
- pte_page
- pmd_page2
- pgd_page
- pte_none
- pte_present
- pte_clear
- pmd_none2
- pmd_bad2
- pmd_present2
- pmd_clear
- pgd_none
- pgd_bad
- pgd_present
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- pte_mknocache
- pte_mkcache
- SET_PAGE_DIR
- pgd_offset
- pgd_offset_k
- pmd_offset
- pte_offset
- nocache_page
- cache_page
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pgd_free
- pgd_alloc
- update_mmu_cache
1 #ifndef _M68K_PGTABLE_H
2 #define _M68K_PGTABLE_H
3
4
5
6
7
8
9 #define __flush_tlb() \
10 do { \
11 if (m68k_is040or060) \
12 __asm__ __volatile__(".word 0xf510\n"::); \
13 else \
14 __asm__ __volatile__("pflusha\n"::); \
15 } while (0)
16
17 static inline void __flush_tlb_one(unsigned long addr)
18 {
19 if (m68k_is040or060) {
20 register unsigned long a0 __asm__ ("a0") = addr;
21 __asm__ __volatile__(".word 0xf508"
22 : : "a" (a0));
23 } else
24 __asm__ __volatile__("pflush #0,#0,(%0)" : : "a" (addr));
25 }
26
27 #define flush_tlb() __flush_tlb()
28 #define flush_tlb_all() flush_tlb()
29
30 static inline void flush_tlb_mm(struct mm_struct *mm)
31 {
32 if (mm == current->mm)
33 __flush_tlb();
34 }
35
36 static inline void flush_tlb_page(struct vm_area_struct *vma,
37 unsigned long addr)
38 {
39 if (vma->vm_mm == current->mm)
40 __flush_tlb_one(addr);
41 }
42
43 static inline void flush_tlb_range(struct mm_struct *mm,
44 unsigned long start, unsigned long end)
45 {
46 if (mm == current->mm)
47 __flush_tlb();
48 }
49
50
51
52
53
54 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
55
56
57 #define PMD_SHIFT 22
58 #define PMD_SIZE (1UL << PMD_SHIFT)
59 #define PMD_MASK (~(PMD_SIZE-1))
60
61
62 #define PGDIR_SHIFT 25
63 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
64 #define PGDIR_MASK (~(PGDIR_SIZE-1))
65
66
67
68
69
70 #define PTRS_PER_PTE 1024
71 #define PTRS_PER_PMD 8
72 #define PTRS_PER_PGD 128
73
74
75 #define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
76
77
78
79
80
81
82
83
84 #define VMALLOC_OFFSET (8*1024*1024)
85 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
86 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
87
88
89
90
91 #define _PAGE_PRESENT 0x001
92 #define _PAGE_SHORT 0x002
93 #define _PAGE_RONLY 0x004
94 #define _PAGE_ACCESSED 0x008
95 #define _PAGE_DIRTY 0x010
96 #define _PAGE_GLOBAL040 0x400
97 #define _PAGE_COW 0x800
98 #define _PAGE_NOCACHE030 0x040
99 #define _PAGE_NOCACHE 0x060
100 #define _PAGE_NOCACHE_S 0x040
101 #define _PAGE_CACHE040 0x020
102 #define _PAGE_CACHE040W 0x000
103
104 #define _DESCTYPE_MASK 0x003
105
106 #define _CACHEMASK040 (~0x060)
107 #define _TABLE_MASK (0xfffffff0)
108
109 #define _PAGE_TABLE (_PAGE_SHORT)
110 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
111
112 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
113 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE040)
114 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
115 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
116 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_CACHE040)
117
118
119
120
121
122 #define __P000 PAGE_NONE
123 #define __P001 PAGE_READONLY
124 #define __P010 PAGE_COPY
125 #define __P011 PAGE_COPY
126 #define __P100 PAGE_READONLY
127 #define __P101 PAGE_READONLY
128 #define __P110 PAGE_COPY
129 #define __P111 PAGE_COPY
130
131 #define __S000 PAGE_NONE
132 #define __S001 PAGE_READONLY
133 #define __S010 PAGE_SHARED
134 #define __S011 PAGE_SHARED
135 #define __S100 PAGE_READONLY
136 #define __S101 PAGE_READONLY
137 #define __S110 PAGE_SHARED
138 #define __S111 PAGE_SHARED
139
140
141 extern unsigned long empty_zero_page;
142
143
144
145
146
147
148
149
150 extern pte_t __bad_page(void);
151 extern pte_t * __bad_pagetable(void);
152
153 #define BAD_PAGETABLE __bad_pagetable()
154 #define BAD_PAGE __bad_page()
155 #define ZERO_PAGE empty_zero_page
156
157
158 #define BITS_PER_PTR (8*sizeof(unsigned long))
159
160
161 #define PTR_MASK (~(sizeof(void*)-1))
162
163
164
165 #define SIZEOF_PTR_LOG2 2
166
167
168 #define PAGE_PTR(address) \
169 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
170
171 extern unsigned long high_memory;
172
173
174 extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
175 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
176 #define VTOP(addr) (mm_vtop((unsigned long)(addr)))
177 #define PTOV(addr) (mm_ptov((unsigned long)(addr)))
178
179
180
181
182
183 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
184 { pte_t pte; pte_val(pte) = VTOP(page) | pgprot_val(pgprot); return pte; }
185
186 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
187 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
188
189 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
190 {
191 int i;
192
193 ptep = (pte_t *) VTOP(ptep);
194 for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
195 pmdp->pmd[i] = _PAGE_TABLE | (unsigned long)ptep;
196 }
197
198
199 extern inline void pmd_set_et(pmd_t * pmdp, pte_t * ptep)
200 {
201 int i;
202
203 ptep = (pte_t *) VTOP(ptep);
204 for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
205 pmdp->pmd[i] = _PAGE_PRESENT | (unsigned long)ptep;
206 }
207
208 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
209 { pgd_val(*pgdp) = _PAGE_TABLE | VTOP(pmdp); }
210
211 extern inline unsigned long pte_page(pte_t pte)
212 { return PTOV(pte_val(pte) & PAGE_MASK); }
213
214 extern inline unsigned long pmd_page2(pmd_t *pmd)
215 { return PTOV(pmd_val(*pmd) & _TABLE_MASK); }
216 #define pmd_page(pmd) pmd_page2(&(pmd))
217
218 extern inline unsigned long pgd_page(pgd_t pgd)
219 { return PTOV(pgd_val(pgd) & _TABLE_MASK); }
220
221 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
222 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
223 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
224
225 extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
226 #define pmd_none(pmd) pmd_none2(&(pmd))
227 extern inline int pmd_bad2(pmd_t *pmd) { return (pmd_val(*pmd) & _DESCTYPE_MASK) != _PAGE_TABLE || pmd_page(*pmd) > high_memory; }
228 #define pmd_bad(pmd) pmd_bad2(&(pmd))
229 extern inline int pmd_present2(pmd_t *pmd) { return pmd_val(*pmd) & _PAGE_TABLE; }
230 #define pmd_present(pmd) pmd_present2(&(pmd))
231 extern inline void pmd_clear(pmd_t * pmdp)
232 {
233 int i;
234
235 for (i = 0; i < 16; i++)
236 pmdp->pmd[i] = 0;
237 }
238
239 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
240 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
241 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_TABLE; }
242
243 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
244
245
246
247
248
249 extern inline int pte_read(pte_t pte) { return 1; }
250 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
251 extern inline int pte_exec(pte_t pte) { return 1; }
252 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
253 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
254
255 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
256 extern inline pte_t pte_rdprotect(pte_t pte) { return pte; }
257 extern inline pte_t pte_exprotect(pte_t pte) { return pte; }
258 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
259 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
260 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
261 extern inline pte_t pte_mkread(pte_t pte) { return pte; }
262 extern inline pte_t pte_mkexec(pte_t pte) { return pte; }
263 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
264 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
265 extern inline pte_t pte_mknocache(pte_t pte)
266 {
267 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
268 return pte;
269 }
270 extern inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | _PAGE_CACHE040; return pte; }
271
272
273 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
274 {
275 tsk->tss.pagedir_v = (unsigned long *)pgdir;
276 tsk->tss.pagedir_p = VTOP(pgdir);
277 tsk->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
278 tsk->tss.crp[1] = tsk->tss.pagedir_p;
279 if (tsk == current) {
280 if (m68k_is040or060)
281 __asm__ __volatile__ (".word 0xf510\n\t"
282 "movel %0@,%/d0\n\t"
283 ".long 0x4e7b0806\n\t"
284
285 : : "a" (&tsk->tss.crp[1])
286 : "d0");
287 else
288 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
289 "oriw #0x0808,%/d0\n\t"
290 "movec %/d0,%/cacr\n\t"
291 "pmove %0@,%/crp\n\t"
292 : : "a" (&tsk->tss.crp[0])
293 : "d0");
294 }
295 }
296
297 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
298
299
300 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
301 {
302 return mm->pgd + (address >> PGDIR_SHIFT);
303 }
304
305 extern pgd_t swapper_pg_dir[128];
306 extern pgd_t kernel_pg_dir[128];
307
308 extern inline pgd_t * pgd_offset_k(unsigned long address)
309 {
310 return kernel_pg_dir + (address >> PGDIR_SHIFT);
311 }
312
313
314
315 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
316 {
317 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
318 }
319
320
321 extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
322 {
323 return (pte_t *) pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
324 }
325
326
327
328
329
330
331
332 extern inline void nocache_page (unsigned long vaddr)
333 {
334 if (m68k_is040or060) {
335 pgd_t *dir;
336 pmd_t *pmdp;
337 pte_t *ptep;
338
339 dir = pgd_offset_k(vaddr);
340 pmdp = pmd_offset(dir,vaddr);
341 ptep = pte_offset(pmdp,vaddr);
342 *ptep = pte_mknocache(*ptep);
343 }
344 }
345
346 static inline void cache_page (unsigned long vaddr)
347 {
348 if (m68k_is040or060) {
349 pgd_t *dir;
350 pmd_t *pmdp;
351 pte_t *ptep;
352
353 dir = pgd_offset_k(vaddr);
354 pmdp = pmd_offset(dir,vaddr);
355 ptep = pte_offset(pmdp,vaddr);
356 *ptep = pte_mkcache(*ptep);
357 }
358 }
359
360
361 extern inline void pte_free(pte_t * pte)
362 {
363 cache_page((unsigned long)pte);
364 free_page((unsigned long) pte);
365 }
366
367 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
368 {
369 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
370 if (pmd_none(*pmd)) {
371 pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
372 if (pmd_none(*pmd)) {
373 if (page) {
374 nocache_page((unsigned long)page);
375 pmd_set(pmd,page);
376 return page + address;
377 }
378 pmd_set(pmd, BAD_PAGETABLE);
379 return NULL;
380 }
381 free_page((unsigned long)page);
382 }
383 if (pmd_bad(*pmd)) {
384 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
385 pmd_set(pmd, BAD_PAGETABLE);
386 return NULL;
387 }
388 return (pte_t *) pmd_page(*pmd) + address;
389 }
390
391 extern pmd_t *get_pointer_table (void);
392 extern void free_pointer_table (pmd_t *);
393 extern pmd_t *get_kpointer_table (void);
394 extern void free_kpointer_table (pmd_t *);
395
396 extern inline void pmd_free(pmd_t * pmd)
397 {
398 free_pointer_table (pmd);
399 }
400
401 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
402 {
403 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
404 if (pgd_none(*pgd)) {
405 pmd_t *page = get_pointer_table();
406 if (pgd_none(*pgd)) {
407 if (page) {
408 pgd_set(pgd, page);
409 return page + address;
410 }
411 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
412 return NULL;
413 }
414 free_pointer_table(page);
415 }
416 if (pgd_bad(*pgd)) {
417 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
418 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
419 return NULL;
420 }
421 return (pmd_t *) pgd_page(*pgd) + address;
422 }
423
424 extern inline void pte_free_kernel(pte_t * pte)
425 {
426 cache_page((unsigned long)pte);
427 free_page((unsigned long) pte);
428 }
429
430 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
431 {
432 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
433 if (pmd_none(*pmd)) {
434 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
435 if (pmd_none(*pmd)) {
436 if (page) {
437 nocache_page((unsigned long)page);
438 pmd_set(pmd, page);
439 return page + address;
440 }
441 pmd_set(pmd, BAD_PAGETABLE);
442 return NULL;
443 }
444 free_page((unsigned long) page);
445 }
446 if (pmd_bad(*pmd)) {
447 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
448 pmd_set(pmd, BAD_PAGETABLE);
449 return NULL;
450 }
451 return (pte_t *) pmd_page(*pmd) + address;
452 }
453
454 extern inline void pmd_free_kernel(pmd_t * pmd)
455 {
456 free_kpointer_table(pmd);
457 }
458
459 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
460 {
461 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
462 if (pgd_none(*pgd)) {
463 pmd_t *page = get_kpointer_table();
464 if (pgd_none(*pgd)) {
465 if (page) {
466 pgd_set(pgd, page);
467 return page + address;
468 }
469 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
470 return NULL;
471 }
472 free_kpointer_table(page);
473 }
474 if (pgd_bad(*pgd)) {
475 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
476 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
477 return NULL;
478 }
479 return (pmd_t *) pgd_page(*pgd) + address;
480 }
481
482 extern inline void pgd_free(pgd_t * pgd)
483 {
484 free_pointer_table ((pmd_t *) pgd);
485 }
486
487 extern inline pgd_t * pgd_alloc(void)
488 {
489 return (pgd_t *)get_pointer_table ();
490 }
491
492 #define flush_icache() \
493 do { \
494 if (m68k_is040or060) \
495 asm (".word 0xf498"); \
496 else \
497 asm ("movec %/cacr,%/d0;" \
498 "oriw %0,%/d0;" \
499 "movec %/d0,%/cacr" \
500 : \
501 : "i" (FLUSH_I) \
502 : "d0"); \
503 } while (0)
504
505
506
507
508
509
510 extern void cache_clear (unsigned long paddr, int len);
511
512
513
514
515
516 extern void cache_push (unsigned long paddr, int len);
517
518
519
520
521
522 extern void cache_push_v (unsigned long vaddr, int len);
523
524
525
526
527 extern void flush_cache_all(void);
528 #define flush_cache_mm(mm) flush_cache_all()
529 #define flush_cache_range(mm, start, end) flush_cache_all()
530 #define flush_cache_page(vma, addr) flush_cache_all()
531 extern void flush_page_to_ram(unsigned long addr);
532
533
534 #define FLUSH_I_AND_D (0x00000808)
535 #define FLUSH_I (0x00000008)
536
537
538
539
540
541 int mm_end_of_chunk (unsigned long addr, int len);
542
543
544
545
546
547 extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
548 int nocacheflag, unsigned long *memavailp );
549
550
551
552 extern void kernel_set_cachemode( unsigned long address, unsigned long size,
553 unsigned cmode );
554
555
556 #define KERNELMAP_FULL_CACHING 0
557 #define KERNELMAP_NOCACHE_SER 1
558 #define KERNELMAP_NOCACHE_NONSER 2
559 #define KERNELMAP_NO_COPYBACK 3
560
561
562
563
564
565 extern inline void update_mmu_cache(struct vm_area_struct * vma,
566 unsigned long address, pte_t pte)
567 {
568 }
569
570
571
572
573
574
575 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
576 #if 0
577 #define SWP_OFFSET(entry) ((entry) >> 9)
578 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
579 #else
580 #define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
581 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
582 #endif
583
584 #endif