This source file includes following definitions.
- __flush_tlb_one
- flush_tlb_mm
- flush_tlb_page
- flush_tlb_range
- mk_pte
- pte_modify
- pmd_set
- pmd_set_et
- pgd_set
- pte_page
- pmd_page2
- pgd_page
- pte_none
- pte_present
- pte_clear
- pmd_none2
- pmd_bad2
- pmd_present2
- pmd_clear
- pgd_none
- pgd_bad
- pgd_present
- pgd_clear
- pte_read
- pte_write
- pte_exec
- pte_dirty
- pte_young
- pte_wrprotect
- pte_rdprotect
- pte_exprotect
- pte_mkclean
- pte_mkold
- pte_mkwrite
- pte_mkread
- pte_mkexec
- pte_mkdirty
- pte_mkyoung
- pte_mknocache
- pte_mkcache
- SET_PAGE_DIR
- pgd_offset
- pgd_offset_k
- pmd_offset
- pte_offset
- nocache_page
- cache_page
- pte_free
- pte_alloc
- pmd_free
- pmd_alloc
- pte_free_kernel
- pte_alloc_kernel
- pmd_free_kernel
- pmd_alloc_kernel
- pgd_free
- pgd_alloc
- update_mmu_cache
1 #ifndef _M68K_PGTABLE_H
2 #define _M68K_PGTABLE_H
3
4
5
6
7
8
9 #define __flush_tlb() \
10 do { \
11 if (m68k_is040or060) \
12 __asm__ __volatile__(".word 0xf510\n"::); \
13 else \
14 __asm__ __volatile__("pflusha\n"::); \
15 } while (0)
16
17 #if 1
18 static inline void __flush_tlb_one(unsigned long addr)
19 {
20 if (m68k_is040or060) {
21 register unsigned long a0 __asm__ ("a0") = addr;
22 __asm__ __volatile__(".word 0xf508"
23 : : "a" (a0));
24 } else
25 __asm__ __volatile__("pflush #0,#0,(%0)" : : "a" (addr));
26 }
27 #else
28 #define __flush_tlb_one(addr) __flush_tlb()
29 #endif
30
31 #define flush_tlb() __flush_tlb()
32 #define flush_tlb_all() flush_tlb()
33
34 static inline void flush_tlb_mm(struct mm_struct *mm)
35 {
36 if (mm == current->mm)
37 __flush_tlb();
38 }
39
40 static inline void flush_tlb_page(struct vm_area_struct *vma,
41 unsigned long addr)
42 {
43 if (vma->vm_mm == current->mm)
44 __flush_tlb_one(addr);
45 }
46
47 static inline void flush_tlb_range(struct mm_struct *mm,
48 unsigned long start, unsigned long end)
49 {
50 if (mm == current->mm)
51 __flush_tlb();
52 }
53
54
55
56
57
58 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
59
60
61 #define PMD_SHIFT 22
62 #define PMD_SIZE (1UL << PMD_SHIFT)
63 #define PMD_MASK (~(PMD_SIZE-1))
64
65
66 #define PGDIR_SHIFT 25
67 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
68 #define PGDIR_MASK (~(PGDIR_SIZE-1))
69
70
71
72
73
74 #define PTRS_PER_PTE 1024
75 #define PTRS_PER_PMD 8
76 #define PTRS_PER_PGD 128
77
78
79 #define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
80
81
82
83
84
85
86
87
88 #define VMALLOC_OFFSET (8*1024*1024)
89 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
90 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
91
92
93
94
95 #define _PAGE_PRESENT 0x001
96 #define _PAGE_SHORT 0x002
97 #define _PAGE_RONLY 0x004
98 #define _PAGE_ACCESSED 0x008
99 #define _PAGE_DIRTY 0x010
100 #define _PAGE_GLOBAL040 0x400
101 #define _PAGE_COW 0x800
102 #define _PAGE_NOCACHE030 0x040
103 #define _PAGE_NOCACHE 0x060
104 #define _PAGE_NOCACHE_S 0x040
105 #define _PAGE_CACHE040 0x020
106 #define _PAGE_CACHE040W 0x000
107
108 #define _DESCTYPE_MASK 0x003
109
110 #define _CACHEMASK040 (~0x060)
111 #define _TABLE_MASK (0xfffffff0)
112
113 #define _PAGE_TABLE (_PAGE_SHORT)
114 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
115
116 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
117 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE040)
118 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
119 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
120 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_CACHE040)
121
122
123
124
125
126 #define __P000 PAGE_NONE
127 #define __P001 PAGE_READONLY
128 #define __P010 PAGE_COPY
129 #define __P011 PAGE_COPY
130 #define __P100 PAGE_READONLY
131 #define __P101 PAGE_READONLY
132 #define __P110 PAGE_COPY
133 #define __P111 PAGE_COPY
134
135 #define __S000 PAGE_NONE
136 #define __S001 PAGE_READONLY
137 #define __S010 PAGE_SHARED
138 #define __S011 PAGE_SHARED
139 #define __S100 PAGE_READONLY
140 #define __S101 PAGE_READONLY
141 #define __S110 PAGE_SHARED
142 #define __S111 PAGE_SHARED
143
144
145 extern unsigned long empty_zero_page;
146
147
148
149
150
151
152
153
154 extern pte_t __bad_page(void);
155 extern pte_t * __bad_pagetable(void);
156
157 #define BAD_PAGETABLE __bad_pagetable()
158 #define BAD_PAGE __bad_page()
159 #define ZERO_PAGE empty_zero_page
160
161
162 #define BITS_PER_PTR (8*sizeof(unsigned long))
163
164
165 #define PTR_MASK (~(sizeof(void*)-1))
166
167
168
169 #define SIZEOF_PTR_LOG2 2
170
171
172 #define PAGE_PTR(address) \
173 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
174
175 extern unsigned long high_memory;
176
177
178 extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
179 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
180 #define VTOP(addr) (mm_vtop((unsigned long)(addr)))
181 #define PTOV(addr) (mm_ptov((unsigned long)(addr)))
182
183
184
185
186
187 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
188 { pte_t pte; pte_val(pte) = VTOP(page) | pgprot_val(pgprot); return pte; }
189
190 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
191 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
192
193 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
194 {
195 int i;
196
197 ptep = (pte_t *) VTOP(ptep);
198 for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
199 pmdp->pmd[i] = _PAGE_TABLE | (unsigned long)ptep;
200 }
201
202
203 extern inline void pmd_set_et(pmd_t * pmdp, pte_t * ptep)
204 {
205 int i;
206
207 ptep = (pte_t *) VTOP(ptep);
208 for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
209 pmdp->pmd[i] = _PAGE_PRESENT | (unsigned long)ptep;
210 }
211
212 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
213 { pgd_val(*pgdp) = _PAGE_TABLE | VTOP(pmdp); }
214
215 extern inline unsigned long pte_page(pte_t pte)
216 { return PTOV(pte_val(pte) & PAGE_MASK); }
217
218 extern inline unsigned long pmd_page2(pmd_t *pmd)
219 { return PTOV(pmd_val(*pmd) & _TABLE_MASK); }
220 #define pmd_page(pmd) pmd_page2(&(pmd))
221
222 extern inline unsigned long pgd_page(pgd_t pgd)
223 { return PTOV(pgd_val(pgd) & _TABLE_MASK); }
224
225 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
226 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
227 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
228
229 extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
230 #define pmd_none(pmd) pmd_none2(&(pmd))
231 extern inline int pmd_bad2(pmd_t *pmd) { return (pmd_val(*pmd) & _DESCTYPE_MASK) != _PAGE_TABLE || pmd_page(*pmd) > high_memory; }
232 #define pmd_bad(pmd) pmd_bad2(&(pmd))
233 extern inline int pmd_present2(pmd_t *pmd) { return pmd_val(*pmd) & _PAGE_TABLE; }
234 #define pmd_present(pmd) pmd_present2(&(pmd))
235 extern inline void pmd_clear(pmd_t * pmdp)
236 {
237 int i;
238
239 for (i = 0; i < 16; i++)
240 pmdp->pmd[i] = 0;
241 }
242
243 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
244 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
245 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_TABLE; }
246
247 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
248
249
250
251
252
253 extern inline int pte_read(pte_t pte) { return 1; }
254 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
255 extern inline int pte_exec(pte_t pte) { return 1; }
256 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
257 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
258
259 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
260 extern inline pte_t pte_rdprotect(pte_t pte) { return pte; }
261 extern inline pte_t pte_exprotect(pte_t pte) { return pte; }
262 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
263 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
264 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
265 extern inline pte_t pte_mkread(pte_t pte) { return pte; }
266 extern inline pte_t pte_mkexec(pte_t pte) { return pte; }
267 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
268 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
269 extern inline pte_t pte_mknocache(pte_t pte)
270 {
271 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
272 return pte;
273 }
274 extern inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | _PAGE_CACHE040; return pte; }
275
276
277 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
278 {
279 tsk->tss.pagedir_v = (unsigned long *)pgdir;
280 tsk->tss.pagedir_p = VTOP(pgdir);
281 tsk->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
282 tsk->tss.crp[1] = tsk->tss.pagedir_p;
283 if (tsk == current) {
284 if (m68k_is040or060)
285 __asm__ __volatile__ (".word 0xf510\n\t"
286 "movel %0@,%/d0\n\t"
287 ".long 0x4e7b0806\n\t"
288
289 : : "a" (&tsk->tss.crp[1])
290 : "d0");
291 else
292 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
293 "oriw #0x0808,%/d0\n\t"
294 "movec %/d0,%/cacr\n\t"
295 "pmove %0@,%/crp\n\t"
296 : : "a" (&tsk->tss.crp[0])
297 : "d0");
298 }
299 }
300
301 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
302
303
304 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
305 {
306 return mm->pgd + (address >> PGDIR_SHIFT);
307 }
308
309 extern pgd_t swapper_pg_dir[128];
310 extern pgd_t kernel_pg_dir[128];
311
312 extern inline pgd_t * pgd_offset_k(unsigned long address)
313 {
314 return kernel_pg_dir + (address >> PGDIR_SHIFT);
315 }
316
317
318
319 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
320 {
321 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
322 }
323
324
325 extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
326 {
327 return (pte_t *) pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
328 }
329
330
331
332
333
334
335
336 extern inline void nocache_page (unsigned long vaddr)
337 {
338 if (m68k_is040or060) {
339 pgd_t *dir;
340 pmd_t *pmdp;
341 pte_t *ptep;
342
343 dir = pgd_offset_k(vaddr);
344 pmdp = pmd_offset(dir,vaddr);
345 ptep = pte_offset(pmdp,vaddr);
346 *ptep = pte_mknocache(*ptep);
347 }
348 }
349
350 static inline void cache_page (unsigned long vaddr)
351 {
352 if (m68k_is040or060) {
353 pgd_t *dir;
354 pmd_t *pmdp;
355 pte_t *ptep;
356
357 dir = pgd_offset_k(vaddr);
358 pmdp = pmd_offset(dir,vaddr);
359 ptep = pte_offset(pmdp,vaddr);
360 *ptep = pte_mkcache(*ptep);
361 }
362 }
363
364
365 extern inline void pte_free(pte_t * pte)
366 {
367 cache_page((unsigned long)pte);
368 free_page((unsigned long) pte);
369 }
370
371 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
372 {
373 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
374 if (pmd_none(*pmd)) {
375 pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
376 if (pmd_none(*pmd)) {
377 if (page) {
378 nocache_page((unsigned long)page);
379 pmd_set(pmd,page);
380 return page + address;
381 }
382 pmd_set(pmd, BAD_PAGETABLE);
383 return NULL;
384 }
385 free_page((unsigned long)page);
386 }
387 if (pmd_bad(*pmd)) {
388 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
389 pmd_set(pmd, BAD_PAGETABLE);
390 return NULL;
391 }
392 return (pte_t *) pmd_page(*pmd) + address;
393 }
394
395 extern pmd_t *get_pointer_table (void);
396 extern void free_pointer_table (pmd_t *);
397 extern pmd_t *get_kpointer_table (void);
398 extern void free_kpointer_table (pmd_t *);
399
400 extern inline void pmd_free(pmd_t * pmd)
401 {
402 free_pointer_table (pmd);
403 }
404
405 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
406 {
407 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
408 if (pgd_none(*pgd)) {
409 pmd_t *page = get_pointer_table();
410 if (pgd_none(*pgd)) {
411 if (page) {
412 pgd_set(pgd, page);
413 return page + address;
414 }
415 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
416 return NULL;
417 }
418 free_pointer_table(page);
419 }
420 if (pgd_bad(*pgd)) {
421 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
422 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
423 return NULL;
424 }
425 return (pmd_t *) pgd_page(*pgd) + address;
426 }
427
428 extern inline void pte_free_kernel(pte_t * pte)
429 {
430 cache_page((unsigned long)pte);
431 free_page((unsigned long) pte);
432 }
433
434 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
435 {
436 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
437 if (pmd_none(*pmd)) {
438 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
439 if (pmd_none(*pmd)) {
440 if (page) {
441 nocache_page((unsigned long)page);
442 pmd_set(pmd, page);
443 return page + address;
444 }
445 pmd_set(pmd, BAD_PAGETABLE);
446 return NULL;
447 }
448 free_page((unsigned long) page);
449 }
450 if (pmd_bad(*pmd)) {
451 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
452 pmd_set(pmd, BAD_PAGETABLE);
453 return NULL;
454 }
455 return (pte_t *) pmd_page(*pmd) + address;
456 }
457
458 extern inline void pmd_free_kernel(pmd_t * pmd)
459 {
460 free_kpointer_table(pmd);
461 }
462
463 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
464 {
465 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
466 if (pgd_none(*pgd)) {
467 pmd_t *page = get_kpointer_table();
468 if (pgd_none(*pgd)) {
469 if (page) {
470 pgd_set(pgd, page);
471 return page + address;
472 }
473 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
474 return NULL;
475 }
476 free_kpointer_table(page);
477 }
478 if (pgd_bad(*pgd)) {
479 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
480 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
481 return NULL;
482 }
483 return (pmd_t *) pgd_page(*pgd) + address;
484 }
485
486 extern inline void pgd_free(pgd_t * pgd)
487 {
488 free_pointer_table ((pmd_t *) pgd);
489 }
490
491 extern inline pgd_t * pgd_alloc(void)
492 {
493 return (pgd_t *)get_pointer_table ();
494 }
495
496 #define flush_icache() \
497 do { \
498 if (m68k_is040or060) \
499 asm (".word 0xf498"); \
500 else \
501 asm ("movec %/cacr,%/d0;" \
502 "oriw %0,%/d0;" \
503 "movec %/d0,%/cacr" \
504 : \
505 : "i" (FLUSH_I) \
506 : "d0"); \
507 } while (0)
508
509
510
511
512
513
514 extern void cache_clear (unsigned long paddr, int len);
515
516
517
518
519
520 extern void cache_push (unsigned long paddr, int len);
521
522
523
524
525
526 extern void cache_push_v (unsigned long vaddr, int len);
527
528
529
530
531 extern void flush_cache_all(void);
532 #define flush_cache_mm(mm) flush_cache_all()
533 #define flush_cache_range(mm, start, end) flush_cache_all()
534 #define flush_cache_page(vma, addr) flush_cache_all()
535 extern void flush_page_to_ram(unsigned long addr);
536
537
538 #define FLUSH_I_AND_D (0x00000808)
539 #define FLUSH_I (0x00000008)
540
541
542
543
544
545 int mm_end_of_chunk (unsigned long addr, int len);
546
547
548
549
550
551 extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
552 int nocacheflag, unsigned long *memavailp );
553
554
555
556 extern void kernel_set_cachemode( unsigned long address, unsigned long size,
557 unsigned cmode );
558
559
560 #define KERNELMAP_FULL_CACHING 0
561 #define KERNELMAP_NOCACHE_SER 1
562 #define KERNELMAP_NOCACHE_NONSER 2
563 #define KERNELMAP_NO_COPYBACK 3
564
565
566
567
568
569 extern inline void update_mmu_cache(struct vm_area_struct * vma,
570 unsigned long address, pte_t pte)
571 {
572 }
573
574
575
576
577
578
579 #if 0
580 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
581 #define SWP_OFFSET(entry) ((entry) >> 9)
582 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
583 #else
584 #define SWP_TYPE(entry) (((entry) & 0x1fc) >> 2)
585 #define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
586 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
587 #endif
588
589 #endif