This source file includes following definitions.
- get_pointer_table
- free_pointer_table
- get_kpointer_table
- free_kpointer_table
- mm_vtop
- mm_ptov
- cache_clear
- cache_push
- cache_push_v
- flush_cache_all
- flush_page_to_ram
- mm_phys_to_virt
- mm_end_of_chunk
- kernel_map
- set_cmode_pte
- set_cmode_pmd
- kernel_set_cachemode
1
2
3
4
5
6
7 #include <linux/mm.h>
8 #include <linux/kernel.h>
9 #include <linux/string.h>
10 #include <linux/types.h>
11 #include <linux/malloc.h>
12
13 #include <asm/segment.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/system.h>
17 #include <asm/traps.h>
18 #include <asm/amigahw.h>
19 #include <asm/bootinfo.h>
20
21 extern pte_t *kernel_page_table (unsigned long *memavailp);
22
23 static struct ptable_desc {
24 struct ptable_desc *prev;
25 struct ptable_desc *next;
26 unsigned long page;
27 unsigned char alloced;
28 } ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
29
30 #define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
31 #define PD_ALLFREE(dp) ((dp)->alloced == 0)
32 #define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
33 #define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
34 #define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
35
36 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
37
38 pmd_t *get_pointer_table (void)
39 {
40 pmd_t *pmdp = NULL;
41 unsigned long flags;
42 struct ptable_desc *dp = ptable_list.next;
43 int i;
44
45
46
47
48
49
50
51 if (PD_NONEFREE (dp)) {
52
53 if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
54 return 0;
55 }
56
57 if (!(dp->page = __get_free_page (GFP_KERNEL))) {
58 kfree (dp);
59 return 0;
60 }
61
62 nocache_page (dp->page);
63
64 dp->alloced = 0;
65
66 save_flags(flags);
67 cli();
68 dp->next = ptable_list.next;
69 dp->prev = ptable_list.next->prev;
70 ptable_list.next->prev = dp;
71 ptable_list.next = dp;
72 restore_flags(flags);
73 }
74
75 for (i = 0; i < 8; i++)
76 if (PD_TABLEFREE (dp, i)) {
77 PD_MARKUSED (dp, i);
78 pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
79 break;
80 }
81
82 if (PD_NONEFREE (dp)) {
83
84 save_flags(flags);
85 cli();
86 dp->prev->next = dp->next;
87 dp->next->prev = dp->prev;
88
89 dp->next = ptable_list.next->prev;
90 dp->prev = ptable_list.prev;
91 ptable_list.prev->next = dp;
92 ptable_list.prev = dp;
93 restore_flags(flags);
94 }
95
96 memset (pmdp, 0, PTABLE_SIZE);
97
98 return pmdp;
99 }
100
101 void free_pointer_table (pmd_t *ptable)
102 {
103 struct ptable_desc *dp;
104 unsigned long page = (unsigned long)ptable & PAGE_MASK;
105 int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
106 unsigned long flags;
107
108 for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
109 ;
110
111 if (!dp->page)
112 panic ("unable to find desc for ptable %p on list!", ptable);
113
114 if (PD_TABLEFREE (dp, index))
115 panic ("table already free!");
116
117 PD_MARKFREE (dp, index);
118
119 if (PD_ALLFREE (dp)) {
120
121 save_flags(flags);
122 cli();
123 dp->prev->next = dp->next;
124 dp->next->prev = dp->prev;
125 restore_flags(flags);
126 cache_page (dp->page);
127 free_page (dp->page);
128 kfree (dp);
129 return;
130 } else {
131
132
133
134
135 save_flags(flags);
136 cli();
137 dp->prev->next = dp->next;
138 dp->next->prev = dp->prev;
139
140 dp->next = ptable_list.next;
141 dp->prev = ptable_list.next->prev;
142 ptable_list.next->prev = dp;
143 ptable_list.next = dp;
144 restore_flags(flags);
145 }
146 }
147
148 static unsigned char alloced = 0;
149 extern pmd_t (*kernel_pmd_table)[PTRS_PER_PMD];
150
151 pmd_t *get_kpointer_table (void)
152 {
153
154
155
156
157
158 pmd_t *ptable;
159 int i;
160
161 for (i = 0; i < PAGE_SIZE/(PTRS_PER_PMD*sizeof(pmd_t)); i++)
162 if ((alloced & (1 << i)) == 0) {
163 ptable = kernel_pmd_table[i];
164 memset (ptable, 0, PTRS_PER_PMD*sizeof(pmd_t));
165 alloced |= (1 << i);
166 return ptable;
167 }
168 printk ("no space for kernel pointer table\n");
169 return NULL;
170 }
171
172 void free_kpointer_table (pmd_t *pmdp)
173 {
174 int index = (pmd_t (*)[PTRS_PER_PMD])pmdp - kernel_pmd_table;
175
176 if (index < 0 || index > 7 ||
177
178 ((unsigned long)pmdp & (sizeof(pmd_t) * PTRS_PER_PMD - 1)))
179 panic("attempt to free invalid kernel pointer table");
180 else
181 alloced &= ~(1 << index);
182 }
183
184
185
186
187
188 unsigned long mm_vtop (unsigned long vaddr)
189 {
190 int i;
191 unsigned long voff = vaddr;
192 unsigned long offset = 0;
193
194 for (i = 0; i < boot_info.num_memory; i++)
195 {
196 if (voff < offset + boot_info.memory[i].size) {
197 #ifdef DEBUGPV
198 printk ("VTOP(%lx)=%lx\n", vaddr,
199 boot_info.memory[i].addr + voff - offset);
200 #endif
201 return boot_info.memory[i].addr + voff - offset;
202 } else
203 offset += boot_info.memory[i].size;
204 }
205
206
207
208
209 if (m68k_is040or060 == 6) {
210 unsigned long fs = get_fs();
211 unsigned long paddr;
212
213 set_fs (SUPER_DATA);
214
215
216
217
218 asm volatile ("movel %1,%/a0\n\t"
219 ".word 0xf5c8\n\t"
220 "movel %/a0,%0"
221 : "=g" (paddr)
222 : "g" (vaddr)
223 : "a0" );
224 set_fs (fs);
225
226 return paddr;
227
228 } else if (m68k_is040or060 == 4) {
229 unsigned long mmusr;
230 unsigned long fs = get_fs();
231
232 set_fs (SUPER_DATA);
233
234 asm volatile ("movel %1,%/a0\n\t"
235 ".word 0xf568\n\t"
236 ".long 0x4e7a8805\n\t"
237 "movel %/a0,%0"
238 : "=g" (mmusr)
239 : "g" (vaddr)
240 : "a0", "d0");
241 set_fs (fs);
242
243 if (mmusr & MMU_R_040)
244 return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
245
246 panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
247 } else {
248 volatile unsigned short temp;
249 unsigned short mmusr;
250 unsigned long *descaddr;
251
252 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
253 "pmove %/psr,%1@"
254 : "=a&" (descaddr)
255 : "a" (&temp), "a" (vaddr));
256 mmusr = temp;
257
258 if (mmusr & (MMU_I|MMU_B|MMU_L))
259 panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
260
261 descaddr = (unsigned long *)PTOV(descaddr);
262
263 switch (mmusr & MMU_NUM) {
264 case 1:
265 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
266 case 2:
267 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
268 case 3:
269 return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
270 default:
271 panic ("VTOP: bad levels (%u) for virtual address %08lx",
272 mmusr & MMU_NUM, vaddr);
273 }
274 }
275
276 panic ("VTOP: bad virtual address %08lx", vaddr);
277 }
278
279 unsigned long mm_ptov (unsigned long paddr)
280 {
281 int i;
282 unsigned long offset = 0;
283
284 for (i = 0; i < boot_info.num_memory; i++)
285 {
286 if (paddr >= boot_info.memory[i].addr &&
287 paddr < (boot_info.memory[i].addr
288 + boot_info.memory[i].size)) {
289 #ifdef DEBUGPV
290 printk ("PTOV(%lx)=%lx\n", paddr,
291 (paddr - boot_info.memory[i].addr) + offset);
292 #endif
293 return (paddr - boot_info.memory[i].addr) + offset;
294 } else
295 offset += boot_info.memory[i].size;
296 }
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 if (MACH_IS_AMIGA && paddr < 16*1024*1024)
318 return ZTWO_VADDR(paddr);
319 return paddr;
320 }
321
322 #define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
323 ".word 0xf4d0"\
324 \
325 : : "g" ((paddr))\
326 : "a0")
327
328 #define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
329 ".word 0xf4f0"\
330 \
331 : : "g" ((paddr))\
332 : "a0")
333
334 #define pushcl040(paddr) do { push040((paddr));\
335 if (m68k_is040or060 == 6) clear040((paddr));\
336 } while(0)
337
338 #define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
339 \
340 ".word 0xf568\n\t"\
341 \
342 ".long 0x4e7a0805\n\t"\
343 "andw #0xf000,%/d0\n\t"\
344 "movel %/d0,%/a0\n\t"\
345 \
346 ".word 0xf4f0"\
347 : : "g" ((vaddr))\
348 : "a0", "d0")
349
350 #define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
351 \
352 ".word 0xf5c8\n\t"\
353 \
354 ".word 0xf4f0"\
355 : : "g" ((vaddr))\
356 : "a0")
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377 void cache_clear (unsigned long paddr, int len)
378 {
379 if (m68k_is040or060) {
380
381
382
383
384 while (len > PAGE_SIZE) {
385 pushcl040(paddr);
386 len -= PAGE_SIZE;
387 paddr += PAGE_SIZE;
388 }
389 if (len > 0) {
390 pushcl040(paddr);
391 if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
392
393 pushcl040(paddr + len - 1);
394 }
395 }
396 }
397 #if 0
398
399 while (len > PAGE_SIZE) {
400 clear040(paddr);
401 len -= PAGE_SIZE;
402 paddr += PAGE_SIZE;
403 }
404 if (len > 0) {
405
406 clear040(paddr);
407 if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
408
409 clear040(paddr + len - 1);
410 }
411 }
412 #endif
413 else
414 asm volatile ("movec %/cacr,%/d0\n\t"
415 "oriw %0,%/d0\n\t"
416 "movec %/d0,%/cacr"
417 : : "i" (FLUSH_I_AND_D)
418 : "d0");
419 }
420
421
422
423 void cache_push (unsigned long paddr, int len)
424 {
425 if (m68k_is040or060) {
426
427
428
429
430
431 while (len > PAGE_SIZE) {
432 push040(paddr);
433 len -= PAGE_SIZE;
434 paddr += PAGE_SIZE;
435 }
436 if (len > 0) {
437 push040(paddr);
438 #if 0
439 if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
440 #endif
441 if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
442
443 push040(paddr + len - 1);
444 }
445 }
446 }
447
448
449
450
451
452
453
454
455
456
457
458 else
459 asm volatile ("movec %/cacr,%/d0\n\t"
460 "oriw %0,%/d0\n\t"
461 "movec %/d0,%/cacr"
462 : : "i" (FLUSH_I)
463 : "d0");
464 }
465
466 void cache_push_v (unsigned long vaddr, int len)
467 {
468 if (m68k_is040or060 == 4) {
469
470 while (len > PAGE_SIZE) {
471 pushv040(vaddr);
472 len -= PAGE_SIZE;
473 vaddr += PAGE_SIZE;
474 }
475 if (len > 0) {
476 pushv040(vaddr);
477 #if 0
478 if (((vaddr + len - 1) / PAGE_SIZE) != (vaddr / PAGE_SIZE)) {
479 #endif
480 if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
481
482 pushv040(vaddr + len - 1);
483 }
484 }
485 }
486 else if (m68k_is040or060 == 6) {
487
488 while (len > PAGE_SIZE) {
489 pushv060(vaddr);
490 len -= PAGE_SIZE;
491 vaddr += PAGE_SIZE;
492 }
493 if (len > 0) {
494 pushv060(vaddr);
495 if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
496
497 pushv060(vaddr + len - 1);
498 }
499 }
500 }
501
502 else
503 asm volatile ("movec %/cacr,%/d0\n\t"
504 "oriw %0,%/d0\n\t"
505 "movec %/d0,%/cacr"
506 : : "i" (FLUSH_I)
507 : "d0");
508 }
509
510 void flush_cache_all(void)
511 {
512 if (m68k_is040or060 >= 4)
513 __asm__ __volatile__ (".word 0xf478\n" ::);
514 else
515 asm volatile ("movec %/cacr,%/d0\n\t"
516 "oriw %0,%/d0\n\t"
517 "movec %/d0,%/cacr"
518 : : "i" (FLUSH_I)
519 : "d0");
520 }
521
522 void flush_page_to_ram (unsigned long addr)
523 {
524 if (m68k_is040or060 == 4)
525 pushv040(addr);
526
527 else if (m68k_is040or060 == 6)
528 push040(VTOP(addr));
529
530
531 else
532 asm volatile ("movec %/cacr,%/d0\n\t"
533 "oriw %0,%/d0\n\t"
534 "movec %/d0,%/cacr"
535 : : "i" (FLUSH_I)
536 : "d0");
537 }
538
539 #undef clear040
540 #undef push040
541 #undef pushv040
542 #undef pushv060
543
544 unsigned long mm_phys_to_virt (unsigned long addr)
545 {
546 return PTOV (addr);
547 }
548
549 int mm_end_of_chunk (unsigned long addr, int len)
550 {
551 int i;
552
553 for (i = 0; i < boot_info.num_memory; i++)
554 if (boot_info.memory[i].addr + boot_info.memory[i].size
555 == addr + len)
556 return 1;
557 return 0;
558 }
559
560
561
562
563
564 unsigned long kernel_map(unsigned long paddr, unsigned long size,
565 int nocacheflag, unsigned long *memavailp )
566 {
567 #define STEP_SIZE (256*1024)
568
569 static unsigned long vaddr = 0xe0000000;
570 unsigned long physaddr, retaddr;
571 pte_t *ktablep = NULL;
572 pmd_t *kpointerp;
573 pgd_t *page_dir;
574 int pindex;
575 int prot;
576
577
578 physaddr = paddr & ~(STEP_SIZE-1);
579 size += paddr - physaddr;
580 retaddr = vaddr + (paddr - physaddr);
581 paddr = physaddr;
582
583
584 size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
585
586 if (m68k_is040or060) {
587 prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
588 switch( nocacheflag ) {
589 case KERNELMAP_FULL_CACHING:
590 prot |= _PAGE_CACHE040;
591 break;
592 case KERNELMAP_NOCACHE_SER:
593 default:
594 prot |= _PAGE_NOCACHE_S;
595 break;
596 case KERNELMAP_NOCACHE_NONSER:
597 prot |= _PAGE_NOCACHE;
598 break;
599 case KERNELMAP_NO_COPYBACK:
600 prot |= _PAGE_CACHE040W;
601
602 break;
603 }
604 } else
605 prot = _PAGE_PRESENT |
606 ((nocacheflag == KERNELMAP_FULL_CACHING ||
607 nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
608
609 page_dir = pgd_offset_k(vaddr);
610 if (pgd_present(*page_dir)) {
611 kpointerp = (pmd_t *)pgd_page(*page_dir);
612 pindex = (vaddr >> 18) & 0x7f;
613 if (pindex != 0 && m68k_is040or060) {
614 if (pmd_present(*kpointerp))
615 ktablep = (pte_t *)pmd_page(*kpointerp);
616 else {
617 ktablep = kernel_page_table (memavailp);
618
619 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
620 pmd_set(kpointerp,ktablep);
621 }
622 ktablep += (pindex & 15)*64;
623 }
624 }
625 else {
626
627 kpointerp = get_kpointer_table ();
628 pgd_set(page_dir, (pmd_t *)kpointerp);
629 memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
630 pindex = 0;
631 }
632
633 for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
634
635 if (pindex > 127) {
636
637 kpointerp = get_kpointer_table ();
638 pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
639 memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
640 pindex = 0;
641 }
642
643 if (m68k_is040or060) {
644 int i;
645 unsigned long ktable;
646
647
648
649
650
651
652 if ((pindex & 15) == 0) {
653
654 ktablep = kernel_page_table (memavailp);
655
656 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
657 }
658
659 ktable = VTOP(ktablep);
660
661
662
663
664
665 for (i = 0; i < 64; i++) {
666 pte_val(*ktablep++) = physaddr | prot;
667 physaddr += PAGE_SIZE;
668 }
669
670
671
672
673
674
675 ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
676
677 } else {
678
679
680
681
682 ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
683 physaddr += 64 * PAGE_SIZE;
684 }
685 }
686
687 return( retaddr );
688 }
689
690
691 static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
692 unsigned long size, unsigned cmode )
693 { pte_t *pte;
694 unsigned long end;
695
696 if (pmd_none(*pmd))
697 return;
698
699 pte = pte_offset( pmd, address );
700 address &= ~PMD_MASK;
701 end = address + size;
702 if (end >= PMD_SIZE)
703 end = PMD_SIZE;
704
705 for( ; address < end; pte++ ) {
706 pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
707 address += PAGE_SIZE;
708 }
709 }
710
711
712 static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
713 unsigned long size, unsigned cmode )
714 {
715 pmd_t *pmd;
716 unsigned long end;
717
718 if (pgd_none(*dir))
719 return;
720
721 pmd = pmd_offset( dir, address );
722 address &= ~PGDIR_MASK;
723 end = address + size;
724 if (end > PGDIR_SIZE)
725 end = PGDIR_SIZE;
726
727 if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
728
729 pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
730 return;
731 }
732 else {
733
734 for( ; address < end; pmd++ ) {
735 set_cmode_pte( pmd, address, end - address, cmode );
736 address = (address + PMD_SIZE) & PMD_MASK;
737 }
738 }
739 }
740
741
742
743
744
745
746
747
748 void kernel_set_cachemode( unsigned long address, unsigned long size,
749 unsigned cmode )
750 {
751 pgd_t *dir = pgd_offset_k( address );
752 unsigned long end = address + size;
753
754 if (m68k_is040or060) {
755 switch( cmode ) {
756 case KERNELMAP_FULL_CACHING:
757 cmode = _PAGE_CACHE040;
758 break;
759 case KERNELMAP_NOCACHE_SER:
760 default:
761 cmode = _PAGE_NOCACHE_S;
762 break;
763 case KERNELMAP_NOCACHE_NONSER:
764 cmode = _PAGE_NOCACHE;
765 break;
766 case KERNELMAP_NO_COPYBACK:
767 cmode = _PAGE_CACHE040W;
768 break;
769 }
770 } else
771 cmode = ((cmode == KERNELMAP_FULL_CACHING ||
772 cmode == KERNELMAP_NO_COPYBACK) ?
773 0 : _PAGE_NOCACHE030);
774
775 for( ; address < end; dir++ ) {
776 set_cmode_pmd( dir, address, end - address, cmode );
777 address = (address + PGDIR_SIZE) & PGDIR_MASK;
778 }
779 flush_tlb_all();
780 }
781
782