This source file includes following definitions.
- get_pointer_table
- free_pointer_table
- get_kpointer_table
- free_kpointer_table
- mm_vtop
- mm_ptov
- cache_clear
- cache_push
- cache_push_v
- flush_cache_all
- flush_cache_mm
- flush_cache_range
- flush_cache_page
- flush_page_to_ram
- mm_phys_to_virt
- mm_end_of_chunk
- kernel_map
- set_cmode_pte
- set_cmode_pmd
- kernel_set_cachemode
1
2
3
4
5
6
7 #include <linux/mm.h>
8 #include <linux/kernel.h>
9 #include <linux/string.h>
10 #include <linux/types.h>
11 #include <linux/malloc.h>
12
13 #include <asm/segment.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/system.h>
17 #include <asm/traps.h>
18 #include <asm/amigahw.h>
19 #include <asm/bootinfo.h>
20
21 extern pte_t *kernel_page_table (unsigned long *memavailp);
22
23 static struct ptable_desc {
24 struct ptable_desc *prev;
25 struct ptable_desc *next;
26 unsigned long page;
27 unsigned char alloced;
28 } ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
29
30 #define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
31 #define PD_ALLFREE(dp) ((dp)->alloced == 0)
32 #define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
33 #define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
34 #define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
35
36 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
37
38 pmd_t *get_pointer_table (void)
39 {
40 pmd_t *pmdp = NULL;
41 unsigned long flags;
42 struct ptable_desc *dp = ptable_list.next;
43 int i;
44
45
46
47
48
49
50
51 if (PD_NONEFREE (dp)) {
52
53 if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
54 return 0;
55 }
56
57 if (!(dp->page = __get_free_page (GFP_KERNEL))) {
58 kfree (dp);
59 return 0;
60 }
61
62 nocache_page (dp->page);
63
64 dp->alloced = 0;
65
66 save_flags(flags);
67 cli();
68 dp->next = ptable_list.next;
69 dp->prev = ptable_list.next->prev;
70 ptable_list.next->prev = dp;
71 ptable_list.next = dp;
72 restore_flags(flags);
73 }
74
75 for (i = 0; i < 8; i++)
76 if (PD_TABLEFREE (dp, i)) {
77 PD_MARKUSED (dp, i);
78 pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
79 break;
80 }
81
82 if (PD_NONEFREE (dp)) {
83
84 save_flags(flags);
85 cli();
86 dp->prev->next = dp->next;
87 dp->next->prev = dp->prev;
88
89 dp->next = ptable_list.next->prev;
90 dp->prev = ptable_list.prev;
91 ptable_list.prev->next = dp;
92 ptable_list.prev = dp;
93 restore_flags(flags);
94 }
95
96 memset (pmdp, 0, PTABLE_SIZE);
97
98 return pmdp;
99 }
100
101 void free_pointer_table (pmd_t *ptable)
102 {
103 struct ptable_desc *dp;
104 unsigned long page = (unsigned long)ptable & PAGE_MASK;
105 int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
106 unsigned long flags;
107
108 for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
109 ;
110
111 if (!dp->page)
112 panic ("unable to find desc for ptable %p on list!", ptable);
113
114 if (PD_TABLEFREE (dp, index))
115 panic ("table already free!");
116
117 PD_MARKFREE (dp, index);
118
119 if (PD_ALLFREE (dp)) {
120
121 save_flags(flags);
122 cli();
123 dp->prev->next = dp->next;
124 dp->next->prev = dp->prev;
125 restore_flags(flags);
126 cache_page (dp->page);
127 free_page (dp->page);
128 kfree (dp);
129 return;
130 } else {
131
132
133
134
135 save_flags(flags);
136 cli();
137 dp->prev->next = dp->next;
138 dp->next->prev = dp->prev;
139
140 dp->next = ptable_list.next;
141 dp->prev = ptable_list.next->prev;
142 ptable_list.next->prev = dp;
143 ptable_list.next = dp;
144 restore_flags(flags);
145 }
146 }
147
148 static unsigned char alloced = 0;
149 extern pmd_t (*kernel_pmd_table)[PTRS_PER_PMD];
150
151 pmd_t *get_kpointer_table (void)
152 {
153
154
155
156
157
158 pmd_t *ptable;
159 int i;
160
161 for (i = 0; i < PAGE_SIZE/(PTRS_PER_PMD*sizeof(pmd_t)); i++)
162 if ((alloced & (1 << i)) == 0) {
163 ptable = kernel_pmd_table[i];
164 memset (ptable, 0, PTRS_PER_PMD*sizeof(pmd_t));
165 alloced |= (1 << i);
166 return ptable;
167 }
168 printk ("no space for kernel pointer table\n");
169 return NULL;
170 }
171
172 void free_kpointer_table (pmd_t *pmdp)
173 {
174 int index = (pmd_t (*)[PTRS_PER_PMD])pmdp - kernel_pmd_table;
175
176 if (index < 0 || index > 7 ||
177
178 ((unsigned long)pmdp & (sizeof(pmd_t) * PTRS_PER_PMD - 1)))
179 panic("attempt to free invalid kernel pointer table");
180 else
181 alloced &= ~(1 << index);
182 }
183
184
185
186
187
188 unsigned long mm_vtop (unsigned long vaddr)
189 {
190 int i;
191 unsigned long voff = vaddr;
192 unsigned long offset = 0;
193
194 for (i = 0; i < boot_info.num_memory; i++)
195 {
196 if (voff < offset + boot_info.memory[i].size) {
197 #ifdef DEBUGPV
198 printk ("VTOP(%lx)=%lx\n", vaddr,
199 boot_info.memory[i].addr + voff - offset);
200 #endif
201 return boot_info.memory[i].addr + voff - offset;
202 } else
203 offset += boot_info.memory[i].size;
204 }
205
206
207
208
209 if (m68k_is040or060 == 6) {
210 unsigned long fs = get_fs();
211 unsigned long paddr;
212
213 set_fs (SUPER_DATA);
214
215
216
217
218 asm volatile ("movel %1,%/a0\n\t"
219 ".word 0xf5c8\n\t"
220 "movel %/a0,%0"
221 : "=g" (paddr)
222 : "g" (vaddr)
223 : "a0" );
224 set_fs (fs);
225
226 return paddr;
227
228 } else if (m68k_is040or060 == 4) {
229 unsigned long mmusr;
230 unsigned long fs = get_fs();
231
232 set_fs (SUPER_DATA);
233
234 asm volatile ("movel %1,%/a0\n\t"
235 ".word 0xf568\n\t"
236 ".long 0x4e7a8805\n\t"
237 "movel %/a0,%0"
238 : "=g" (mmusr)
239 : "g" (vaddr)
240 : "a0", "d0");
241 set_fs (fs);
242
243 if (mmusr & MMU_R_040)
244 return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
245
246 panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
247 } else {
248 volatile unsigned short temp;
249 unsigned short mmusr;
250 unsigned long *descaddr;
251
252 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
253 "pmove %/psr,%1@"
254 : "=a&" (descaddr)
255 : "a" (&temp), "a" (vaddr));
256 mmusr = temp;
257
258 if (mmusr & (MMU_I|MMU_B|MMU_L))
259 panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
260
261 descaddr = (unsigned long *)PTOV(descaddr);
262
263 switch (mmusr & MMU_NUM) {
264 case 1:
265 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
266 case 2:
267 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
268 case 3:
269 return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
270 default:
271 panic ("VTOP: bad levels (%u) for virtual address %08lx",
272 mmusr & MMU_NUM, vaddr);
273 }
274 }
275
276 panic ("VTOP: bad virtual address %08lx", vaddr);
277 }
278
279 unsigned long mm_ptov (unsigned long paddr)
280 {
281 int i;
282 unsigned long offset = 0;
283
284 for (i = 0; i < boot_info.num_memory; i++)
285 {
286 if (paddr >= boot_info.memory[i].addr &&
287 paddr < (boot_info.memory[i].addr
288 + boot_info.memory[i].size)) {
289 #ifdef DEBUGPV
290 printk ("PTOV(%lx)=%lx\n", paddr,
291 (paddr - boot_info.memory[i].addr) + offset);
292 #endif
293 return (paddr - boot_info.memory[i].addr) + offset;
294 } else
295 offset += boot_info.memory[i].size;
296 }
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 if (MACH_IS_AMIGA && paddr < 16*1024*1024)
318 return ZTWO_VADDR(paddr);
319 return paddr;
320 }
321
322 #define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
323 ".word 0xf4d0"\
324 \
325 : : "g" ((paddr))\
326 : "a0")
327
328 #define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
329 ".word 0xf4f0"\
330 \
331 : : "g" ((paddr))\
332 : "a0")
333
334 #define pushcl040(paddr) do { push040((paddr));\
335 if (m68k_is040or060 == 6) clear040((paddr));\
336 } while(0)
337
338 #define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
339 \
340 ".word 0xf568\n\t"\
341 \
342 ".long 0x4e7a0805\n\t"\
343 "andw #0xf000,%/d0\n\t"\
344 "movel %/d0,%/a0\n\t"\
345 \
346 ".word 0xf4f0"\
347 : : "g" ((vaddr))\
348 : "a0", "d0")
349
350 #define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
351 \
352 ".word 0xf5c8\n\t"\
353 \
354 ".word 0xf4f0"\
355 : : "g" ((vaddr))\
356 : "a0")
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377 void cache_clear (unsigned long paddr, int len)
378 {
379 if (m68k_is040or060) {
380
381
382
383
384 while (len > PAGE_SIZE) {
385 pushcl040(paddr);
386 len -= PAGE_SIZE;
387 paddr += PAGE_SIZE;
388 }
389 if (len > 0) {
390 pushcl040(paddr);
391 if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
392
393 pushcl040(paddr + len - 1);
394 }
395 }
396 }
397 #if 0
398
399 while (len > PAGE_SIZE) {
400 clear040(paddr);
401 len -= PAGE_SIZE;
402 paddr += PAGE_SIZE;
403 }
404 if (len > 0) {
405
406 clear040(paddr);
407 if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
408
409 clear040(paddr + len - 1);
410 }
411 }
412 #endif
413 else
414 asm volatile ("movec %/cacr,%/d0\n\t"
415 "oriw %0,%/d0\n\t"
416 "movec %/d0,%/cacr"
417 : : "i" (FLUSH_I_AND_D)
418 : "d0");
419 }
420
421
422
423 void cache_push (unsigned long paddr, int len)
424 {
425 if (m68k_is040or060) {
426
427
428
429
430
431 while (len > PAGE_SIZE) {
432 push040(paddr);
433 len -= PAGE_SIZE;
434 paddr += PAGE_SIZE;
435 }
436 if (len > 0) {
437 push040(paddr);
438 #if 0
439 if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
440 #endif
441 if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
442
443 push040(paddr + len - 1);
444 }
445 }
446 }
447
448
449
450
451
452
453
454
455
456
457
458 else
459 asm volatile ("movec %/cacr,%/d0\n\t"
460 "oriw %0,%/d0\n\t"
461 "movec %/d0,%/cacr"
462 : : "i" (FLUSH_I)
463 : "d0");
464 }
465
466 void cache_push_v (unsigned long vaddr, int len)
467 {
468 if (m68k_is040or060 == 4) {
469
470 while (len > PAGE_SIZE) {
471 pushv040(vaddr);
472 len -= PAGE_SIZE;
473 vaddr += PAGE_SIZE;
474 }
475 if (len > 0) {
476 pushv040(vaddr);
477 #if 0
478 if (((vaddr + len - 1) / PAGE_SIZE) != (vaddr / PAGE_SIZE)) {
479 #endif
480 if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
481
482 pushv040(vaddr + len - 1);
483 }
484 }
485 }
486 else if (m68k_is040or060 == 6) {
487
488 while (len > PAGE_SIZE) {
489 pushv060(vaddr);
490 len -= PAGE_SIZE;
491 vaddr += PAGE_SIZE;
492 }
493 if (len > 0) {
494 pushv060(vaddr);
495 if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
496
497 pushv060(vaddr + len - 1);
498 }
499 }
500 }
501
502 else
503 asm volatile ("movec %/cacr,%/d0\n\t"
504 "oriw %0,%/d0\n\t"
505 "movec %/d0,%/cacr"
506 : : "i" (FLUSH_I)
507 : "d0");
508 }
509 #if 1
510 void flush_cache_all(void)
511 {
512 if (m68k_is040or060 >= 4)
513 __asm__ __volatile__ (".word 0xf478\n" ::);
514 else
515 asm volatile ("movec %/cacr,%/d0\n\t"
516 "oriw %0,%/d0\n\t"
517 "movec %/d0,%/cacr"
518 : : "i" (FLUSH_I_AND_D)
519 : "d0");
520 }
521
522 void flush_cache_mm(struct mm_struct *mm){
523
524 if (mm == current->mm)
525 flush_cache_all();
526 }
527
528 void flush_cache_range(struct mm_struct *mm, unsigned long start,
529 unsigned long end){
530 if (mm == current->mm)
531 cache_push_v(start, end-start);
532 }
533
534 void flush_cache_page (struct vm_area_struct *vma, unsigned long vaddr)
535 {
536 if (m68k_is040or060 >= 4)
537 pushv040(vaddr);
538
539
540
541
542
543 else
544 asm volatile ("movec %/cacr,%/d0\n\t"
545 "oriw %0,%/d0\n\t"
546 "movec %/d0,%/cacr"
547 : : "i" (FLUSH_I_AND_D)
548 : "d0");
549 }
550
551 void flush_page_to_ram (unsigned long vaddr)
552 {
553 if (m68k_is040or060 >= 4)
554 pushcl040(VTOP(vaddr));
555
556
557 else
558 asm volatile ("movec %/cacr,%/d0\n\t"
559 "oriw %0,%/d0\n\t"
560 "movec %/d0,%/cacr"
561 : : "i" (FLUSH_I_AND_D)
562 : "d0");
563 }
564 #endif
565
566 #undef clear040
567 #undef push040
568 #undef pushv040
569 #undef pushv060
570
571 unsigned long mm_phys_to_virt (unsigned long addr)
572 {
573 return PTOV (addr);
574 }
575
576 int mm_end_of_chunk (unsigned long addr, int len)
577 {
578 int i;
579
580 for (i = 0; i < boot_info.num_memory; i++)
581 if (boot_info.memory[i].addr + boot_info.memory[i].size
582 == addr + len)
583 return 1;
584 return 0;
585 }
586
587
588
589
590
591 unsigned long kernel_map(unsigned long paddr, unsigned long size,
592 int nocacheflag, unsigned long *memavailp )
593 {
594 #define STEP_SIZE (256*1024)
595
596 static unsigned long vaddr = 0xe0000000;
597 unsigned long physaddr, retaddr;
598 pte_t *ktablep = NULL;
599 pmd_t *kpointerp;
600 pgd_t *page_dir;
601 int pindex;
602 int prot;
603
604
605 physaddr = paddr & ~(STEP_SIZE-1);
606 size += paddr - physaddr;
607 retaddr = vaddr + (paddr - physaddr);
608 paddr = physaddr;
609
610
611 size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
612
613 if (m68k_is040or060) {
614 prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
615 switch( nocacheflag ) {
616 case KERNELMAP_FULL_CACHING:
617 prot |= _PAGE_CACHE040;
618 break;
619 case KERNELMAP_NOCACHE_SER:
620 default:
621 prot |= _PAGE_NOCACHE_S;
622 break;
623 case KERNELMAP_NOCACHE_NONSER:
624 prot |= _PAGE_NOCACHE;
625 break;
626 case KERNELMAP_NO_COPYBACK:
627 prot |= _PAGE_CACHE040W;
628
629 break;
630 }
631 } else
632 prot = _PAGE_PRESENT |
633 ((nocacheflag == KERNELMAP_FULL_CACHING ||
634 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
635
636 page_dir = pgd_offset_k(vaddr);
637 if (pgd_present(*page_dir)) {
638 kpointerp = (pmd_t *)pgd_page(*page_dir);
639 pindex = (vaddr >> 18) & 0x7f;
640 if (pindex != 0 && m68k_is040or060) {
641 if (pmd_present(*kpointerp))
642 ktablep = (pte_t *)pmd_page(*kpointerp);
643 else {
644 ktablep = kernel_page_table (memavailp);
645
646 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
647 pmd_set(kpointerp,ktablep);
648 }
649 ktablep += (pindex & 15)*64;
650 }
651 }
652 else {
653
654 kpointerp = get_kpointer_table ();
655 pgd_set(page_dir, (pmd_t *)kpointerp);
656 memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
657 pindex = 0;
658 }
659
660 for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
661
662 if (pindex > 127) {
663
664 kpointerp = get_kpointer_table ();
665 pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
666 memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
667 pindex = 0;
668 }
669
670 if (m68k_is040or060) {
671 int i;
672 unsigned long ktable;
673
674
675
676
677
678
679 if ((pindex & 15) == 0) {
680
681 ktablep = kernel_page_table (memavailp);
682
683 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
684 }
685
686 ktable = VTOP(ktablep);
687
688
689
690
691
692 for (i = 0; i < 64; i++) {
693 pte_val(*ktablep++) = physaddr | prot;
694 physaddr += PAGE_SIZE;
695 }
696
697
698
699
700
701
702 ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
703
704 } else {
705
706
707
708
709 ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
710 physaddr += 64 * PAGE_SIZE;
711 }
712 }
713
714 return( retaddr );
715 }
716
717
718 static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
719 unsigned long size, unsigned cmode )
720 { pte_t *pte;
721 unsigned long end;
722
723 if (pmd_none(*pmd))
724 return;
725
726 pte = pte_offset( pmd, address );
727 address &= ~PMD_MASK;
728 end = address + size;
729 if (end >= PMD_SIZE)
730 end = PMD_SIZE;
731
732 for( ; address < end; pte++ ) {
733 pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
734 address += PAGE_SIZE;
735 }
736 }
737
738
739 static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
740 unsigned long size, unsigned cmode )
741 {
742 pmd_t *pmd;
743 unsigned long end;
744
745 if (pgd_none(*dir))
746 return;
747
748 pmd = pmd_offset( dir, address );
749 address &= ~PGDIR_MASK;
750 end = address + size;
751 if (end > PGDIR_SIZE)
752 end = PGDIR_SIZE;
753
754 if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
755
756 pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
757 return;
758 }
759 else {
760
761 for( ; address < end; pmd++ ) {
762 set_cmode_pte( pmd, address, end - address, cmode );
763 address = (address + PMD_SIZE) & PMD_MASK;
764 }
765 }
766 }
767
768
769
770
771
772
773
774
775 void kernel_set_cachemode( unsigned long address, unsigned long size,
776 unsigned cmode )
777 {
778 pgd_t *dir = pgd_offset_k( address );
779 unsigned long end = address + size;
780
781 if (m68k_is040or060) {
782 switch( cmode ) {
783 case KERNELMAP_FULL_CACHING:
784 cmode = _PAGE_CACHE040;
785 break;
786 case KERNELMAP_NOCACHE_SER:
787 default:
788 cmode = _PAGE_NOCACHE_S;
789 break;
790 case KERNELMAP_NOCACHE_NONSER:
791 cmode = _PAGE_NOCACHE;
792 break;
793 case KERNELMAP_NO_COPYBACK:
794 cmode = _PAGE_CACHE040W;
795 break;
796 }
797 } else
798 cmode = ((cmode == KERNELMAP_FULL_CACHING ||
799 cmode == KERNELMAP_NO_COPYBACK) ?
800 0 : _PAGE_NOCACHE030);
801
802 for( ; address < end; dir++ ) {
803 set_cmode_pmd( dir, address, end - address, cmode );
804 address = (address + PGDIR_SIZE) & PGDIR_MASK;
805 }
806 flush_tlb_all();
807 }
808
809