This source file includes following definitions.
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
- MMU_free_item
- MMU_get_item
- MMU_init
- MMU_get_page
- MMU_map_page
- MMU_hash_page
- invalidate
- cache_mode
1
2
3
4
5
6
7
8
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/head.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20
21 #include <asm/pgtable.h>
22
23
24
25
26
27
28
29
30
31 pgd_t swapper_pg_dir[1024];
32
33
34 pte *MMU_get_page(void);
35
36
37 #if 0
38 #include <asm/system.h>
39 #include <asm/segment.h>
40 #include <asm/mipsconfig.h>
41
42 extern unsigned long pg0[1024];
43 #endif
44
45 #ifdef CONFIG_DESKSTATION_TYNE
46 extern void deskstation_tyne_dma_init(void);
47 #endif
48 #ifdef CONFIG_SOUND
49 extern void sound_mem_init(void);
50 #endif
51 extern void die_if_kernel(char *,struct pt_regs *,long);
52 extern void show_net_buffers(void);
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 pte_t * __bad_pagetable(void)
68 {
69 panic("__bad_pagetable");
70 #if 0
71 extern char empty_bad_page_table[PAGE_SIZE];
72 unsigned long dummy;
73
74 __asm__ __volatile__(
75 ".set\tnoreorder\n\t"
76 "1:\tsw\t%2,(%0)\n\t"
77 "subu\t%1,%1,1\n\t"
78 "bne\t$0,%1,1b\n\t"
79 "addiu\t%0,%0,1\n\t"
80 ".set\treorder"
81 :"=r" (dummy),
82 "=r" (dummy)
83 :"r" (pte_val(BAD_PAGE)),
84 "0" ((long) empty_bad_page_table),
85 "1" (PTRS_PER_PAGE));
86
87 return (pte_t *) empty_bad_page_table;
88 #endif
89 }
90
91 pte_t __bad_page(void)
92 {
93 panic("__bad_page");
94 #if 0
95 extern char empty_bad_page[PAGE_SIZE];
96 unsigned long dummy;
97
98 __asm__ __volatile__(
99 ".set\tnoreorder\n\t"
100 "1:\tsw\t$0,(%0)\n\t"
101 "subu\t%1,%1,1\n\t"
102 "bne\t$0,%1,1b\n\t"
103 "addiu\t%0,%0,1\n\t"
104 ".set\treorder"
105 :"=r" (dummy),
106 "=r" (dummy)
107 :"0" ((long) empty_bad_page),
108 "1" (PTRS_PER_PAGE));
109
110 return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
111 #endif
112 }
113
114 unsigned long __zero_page(void)
115 {
116 #if 0
117 panic("__zero_page");
118 #else
119 extern char empty_zero_page[PAGE_SIZE];
120 bzero(empty_zero_page, PAGE_SIZE);
121 return (unsigned long) empty_zero_page;
122 #endif
123 }
124
125 void show_mem(void)
126 {
127 int i,free = 0,total = 0,reserved = 0;
128 int shared = 0;
129
130 printk("Mem-info:\n");
131 show_free_areas();
132 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
133 i = high_memory >> PAGE_SHIFT;
134 while (i-- > 0) {
135 total++;
136 if (mem_map[i].reserved)
137 reserved++;
138 else if (!mem_map[i].count)
139 free++;
140 else
141 shared += mem_map[i].count-1;
142 }
143 printk("%d pages of RAM\n",total);
144 printk("%d free pages\n",free);
145 printk("%d reserved pages\n",reserved);
146 printk("%d pages shared\n",shared);
147 show_buffers();
148 #ifdef CONFIG_NET
149 show_net_buffers();
150 #endif
151 }
152
153 extern unsigned long free_area_init(unsigned long, unsigned long);
154
155
156
157
158
159
160
161
162 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
163 {
164
165 #if 0
166 pgd_t * pg_dir;
167 pte_t * pg_table;
168 unsigned long tmp;
169 unsigned long address;
170
171 start_mem = PAGE_ALIGN(start_mem);
172 address = 0;
173 pg_dir = swapper_pg_dir;
174 while (address < end_mem) {
175 if (pgd_none(pg_dir[0])) {
176 pgd_set(pg_dir, (pte_t *) start_mem);
177 start_mem += PAGE_SIZE;
178 }
179
180
181
182 pg_table = (pte_t *) pgd_page(pg_dir[0]);
183 pgd_set(pg_dir, pg_table);
184 pg_dir++;
185 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
186 if (address < end_mem)
187 *pg_table = mk_pte(address, PAGE_SHARED);
188 else
189 pte_clear(pg_table);
190 address += PAGE_SIZE;
191 }
192 }
193 #if KERNELBASE == KSEG0
194 cacheflush();
195 #endif
196 invalidate();
197 #endif
198 return free_area_init(start_mem, end_mem);
199 }
200
201 void mem_init(unsigned long start_mem, unsigned long end_mem)
202 {
203 int codepages = 0;
204 int reservedpages = 0;
205 int datapages = 0;
206 unsigned long tmp;
207 extern int etext;
208
209 end_mem &= PAGE_MASK;
210 high_memory = end_mem;
211
212
213 start_mem = PAGE_ALIGN(start_mem);
214
215 #if 0
216 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
217 #endif
218 while (start_mem < high_memory) {
219 mem_map[MAP_NR(start_mem)].reserved = 0;
220 start_mem += PAGE_SIZE;
221 }
222 #ifdef CONFIG_DESKSTATION_TYNE
223 deskstation_tyne_dma_init();
224 #endif
225 #ifdef CONFIG_SOUND
226 sound_mem_init();
227 #endif
228 for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
229 {
230 if (mem_map[MAP_NR(tmp)].reserved)
231 {
232
233
234
235
236 if (0)
237 {
238 reservedpages++;
239 } else if (tmp < (unsigned long) &etext)
240 {
241 codepages++;
242 } else
243 {
244 datapages++;
245 }
246 continue;
247 }
248 mem_map[MAP_NR(tmp)].count = 1;
249 free_page(tmp);
250 }
251 tmp = nr_free_pages << PAGE_SHIFT;
252 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
253 tmp >> 10,
254 ((int)high_memory - (int)KERNELBASE) >> 10,
255 codepages << (PAGE_SHIFT-10),
256 reservedpages << (PAGE_SHIFT-10),
257 datapages << (PAGE_SHIFT-10));
258 invalidate();
259 return;
260 }
261
262 void si_meminfo(struct sysinfo *val)
263 {
264 #if 0
265 int i;
266
267 i = high_memory >> PAGE_SHIFT;
268 val->totalram = 0;
269 val->sharedram = 0;
270 val->freeram = nr_free_pages << PAGE_SHIFT;
271 val->bufferram = buffermem;
272 while (i-- > 0) {
273 if (mem_map[i] & MAP_PAGE_RESERVED)
274 continue;
275 val->totalram++;
276 if (!mem_map[i])
277 continue;
278 val->sharedram += mem_map[i]-1;
279 }
280 val->totalram <<= PAGE_SHIFT;
281 val->sharedram <<= PAGE_SHIFT;
282 return;
283 #endif
284 }
285
286
287
288
289
290
291
292
293 BAT BAT0 =
294 {
295 {
296 0x80000000>>17,
297 BL_256M,
298 1,
299 1,
300 },
301 {
302 0x80000000>>17,
303 1,
304 1,
305 0,
306 1,
307 BPP_RW
308 }
309 };
310 BAT BAT1 =
311 {
312 {
313 0xC0000000>>17,
314 BL_256M,
315 1,
316 1,
317 },
318 {
319 0xC0000000>>17,
320 1,
321 1,
322 0,
323 1,
324 BPP_RW
325 }
326 };
327 BAT BAT2 =
328 {
329
330 #if 1
331 {
332 0x00000000>>17,
333 BL_256M,
334 0,
335 0,
336 },
337 {
338 0x00000000>>17,
339 1,
340 1,
341 0,
342 0,
343 BPP_RW
344 }
345 #else
346 {
347 0x90000000>>17,
348 BL_256M,
349 1,
350 1,
351 },
352 {
353 0x00000000>>17,
354 1,
355 0,
356 0,
357 0,
358 BPP_RW
359 }
360 #endif
361 };
362 BAT BAT3 =
363 {
364 {
365 0x00000000>>17,
366 BL_256M,
367 0,
368 0,
369 },
370 {
371 0x00000000>>17,
372 1,
373 1,
374 0,
375 0,
376 BPP_RW
377 }
378 };
379 BAT TMP_BAT2 =
380 {
381 {
382 0x90000000>>17,
383 BL_256M,
384 1,
385 1,
386 },
387 {
388 0x00000000>>17,
389 1,
390 0,
391 0,
392 0,
393 BPP_RW
394 }
395 };
396
397 unsigned long _SDR1;
398 PTE *Hash;
399 int Hash_size, Hash_mask;
400 int cache_is_copyback = 1;
401 int kernel_pages_are_copyback = 1;
402
403 #define NUM_MAPPINGS 8
404 struct
405 {
406 int va, pa, task;
407 } last_mappings[NUM_MAPPINGS];
408 int next_mapping = 0;
409
410
411 struct item
412 {
413 struct item *next;
414 };
415
416 #ifndef NULL
417 #define NULL 0
418 #endif
419
420 #define MAX_CONTEXTS 16
421 #define MAX_MMU_PAGES 8
422
423 static struct item _free_pages;
424 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
425
426
427
428
429
430 MMU_free_item(struct item *hdr, struct item *elem)
431 {
432 if (hdr->next == (struct item *)NULL)
433 {
434 elem->next = (struct item *)NULL;
435 } else
436 {
437 elem->next = hdr->next;
438 }
439 hdr->next = elem;
440 }
441
442 struct item *
443 MMU_get_item(struct item *hdr)
444 {
445 struct item *item;
446 if ((item = hdr->next) != (struct item *)NULL)
447 {
448 item = hdr->next;
449 hdr->next = item->next;
450 }
451 return (item);
452 }
453
454
455
456
457
458
459
460
461 extern char _start[], _end[];
462
463 void MMU_init(void)
464 {
465 int i, p;
466 SEGREG *segs;
467
468 find_end_of_memory();
469
470 _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
471 p = (int)mmu_pages;
472 p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
473 _free_pages.next = (struct item *)NULL;
474 for (i = 0; i < MAX_MMU_PAGES; i++)
475 {
476 MMU_free_item(&_free_pages, (struct item *)p);
477 p += MMU_PAGE_SIZE;
478 }
479
480
481 init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
482
483
484 segs = (SEGREG *)init_task.tss.segs;
485 for (i = 0; i < 16; i++)
486 {
487 segs[i].ks = 0;
488 segs[i].kp = 1;
489 segs[i].vsid = i;
490 }
491
492 #if 0
493 for (i = (int)_start; i <= (int)_end; i += MMU_PAGE_SIZE)
494 #else
495
496 for (i = (int)_start; i <= (int)Hash; i += MMU_PAGE_SIZE)
497 #endif
498 {
499 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
500 }
501
502 for (i = (int)Hash; i < (int)Hash+Hash_size; i += MMU_PAGE_SIZE)
503 {
504 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
505 }
506
507 }
508
509 pte *
510 MMU_get_page(void)
511 {
512 pte *pg;
513 if ((pg = (pte *)MMU_get_item(&_free_pages)))
514 {
515 bzero((char *)pg, MMU_PAGE_SIZE);
516 }
517
518 return(pg);
519 }
520
521 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
522 {
523 pte *pd, *pg;
524 if (va < (unsigned long)0x90000000)
525 _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
526 if ((pte **)tss->pg_tables == (pte **)NULL)
527 {
528 (pte **)tss->pg_tables = (pte **)MMU_get_page();
529 if ((pte **)tss->pg_tables == (pte **)NULL)
530 {
531 _panic("Out of MMU pages (PD)\n");
532 }
533 }
534
535 pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
536 if (pd == (pte *)NULL)
537 {
538 pd = (pte *)MMU_get_page();
539 if (pd == (pte *)NULL)
540 {
541 _panic("Out of MMU pages (PG)\n");
542 }
543 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
544 }
545
546 pg = &pd[(va>>PT_SHIFT)&PT_MASK];
547 *(long *)pg = 0;
548 pg->page_num = pa>>PG_SHIFT;
549 pg->flags = flags;
550 MMU_hash_page(tss, va, pg);
551 }
552
553
554
555
556 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
557 {
558 int hash, page_index, segment, i, h, _h, api, vsid, perms;
559 PTE *_pte, *empty, *slot;
560 PTE *slot0, *slot1;
561 extern char _etext;
562
563
564
565
566 last_mappings[next_mapping].va = va;
567 last_mappings[next_mapping].pa = pg?*(int *)pg:0;
568 last_mappings[next_mapping].task = current;
569 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
570
571
572 page_index = ((int)va & 0x0FFFF000) >> 12;
573 segment = (unsigned int)va >> 28;
574 api = page_index >> 10;
575 vsid = ((SEGREG *)tss->segs)[segment].vsid;
576 empty = slot = (PTE *)NULL;
577 for (_h = 0; _h < 2; _h++)
578 {
579 hash = page_index ^ vsid;
580 if (_h)
581 {
582 hash = ~hash;
583 }
584 hash &= 0x3FF | (Hash_mask << 10);
585 hash *= 8;
586 _pte = &Hash[hash];
587
588 if (_h)
589 {
590 slot1 = _pte;
591 } else
592 {
593 slot0 = _pte;
594 }
595 for (i = 0; i < 8; i++, _pte++)
596 {
597 if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
598 {
599 h = _h;
600 slot = _pte;
601 goto found_it;
602 }
603 if ((empty == (PTE *)NULL) && !_pte->v)
604 {
605 h = _h;
606 empty = _pte;
607 }
608 }
609 }
610 if (slot == (PTE *)NULL)
611 {
612 if (pg == (pte *)NULL)
613 {
614 return (0);
615 }
616 if (empty == (PTE *)NULL)
617 {
618 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
619 printk("Slot0:\n");
620 _pte = slot0;
621 for (i = 0; i < 8; i++, _pte++)
622 {
623 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
624 }
625 printk("Slot1:\n");
626 _pte = slot1;
627 for (i = 0; i < 8; i++, _pte++)
628 {
629 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
630 }
631 cnpause();
632 printk("Last mappings:\n");
633 for (i = 0; i < NUM_MAPPINGS; i++)
634 {
635 printk(" VA: %08x, PA: %08X, TASK: %08X\n",
636 last_mappings[next_mapping].va,
637 last_mappings[next_mapping].pa,
638 last_mappings[next_mapping].task);
639 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
640 }
641 cnpause();
642 _panic("Hash table full!\n");
643 }
644 slot = empty;
645 }
646 found_it:
647 #if 0
648 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
649 #endif
650 _tlbie(va);
651 if (pg)
652 {
653 slot->v = 1;
654 slot->vsid = vsid;
655 slot->h = h;
656 slot->api = api;
657 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
658 {
659 slot->rpn = pg->page_num - (KERNELBASE>>12);
660 } else
661 {
662 slot->rpn = pg->page_num;
663 }
664 slot->r = 0;
665 slot->c = 0;
666 slot->i = 0;
667 slot->g = 0;
668 if (cache_is_copyback)
669 {
670 if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
671 {
672 slot->w = 0;
673 slot->m = 1;
674 } else
675 {
676 slot->w = 1;
677 slot->m = 0;
678 }
679 } else
680 {
681 slot->w = 1;
682 slot->m = 0;
683 }
684 if (pg->flags & _PAGE_USER)
685 {
686 if (pg->flags & _PAGE_RW)
687 {
688 perms = PP_RWRW;
689 } else
690 {
691 perms = PP_RWRX;
692 }
693 } else
694 {
695 perms = PP_RWRW;
696 perms = PP_RWXX;
697 }
698 #ifdef SHOW_FAULTS
699 if (va < KERNELBASE)
700 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
701 #endif
702 slot->pp = perms;
703 return (0);
704 } else
705 {
706 int flags = 0;
707 if (slot->r) flags |= _PAGE_ACCESSED;
708 if (slot->c) flags |= _PAGE_DIRTY;
709 slot->v = 0;
710 #ifdef SHOW_FAULTS
711 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
712 #endif
713 return (flags);
714 }
715 }
716
717
718
719
720 void
721 invalidate(void)
722 {
723 int i, j, flags;
724 unsigned long address;
725 pgd_t *pgd;
726 pte_t *_pte;
727 #if 0
728 _tlbia();
729 #endif
730 pgd = pgd_offset(current->mm, 0);
731 if (!pgd) return;
732 address = 0;
733 for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
734 {
735 if (*(long *)pgd)
736 {
737
738 _pte = pte_offset(pmd_offset(pgd,0),0);
739 if (_pte)
740 {
741 for (j = 0; j < PTRS_PER_PTE; j++)
742 {
743 if (pte_present(*_pte))
744 {
745 flags = MMU_hash_page(¤t->tss, address, 0);
746 ((pte *)_pte)->flags |= flags;
747 }
748 _pte++;
749 address += PAGE_SIZE;
750 }
751 } else
752 {
753 address += PAGE_SIZE*PTRS_PER_PTE;
754 }
755 } else
756 {
757 address += PAGE_SIZE*PTRS_PER_PTE;
758 }
759 pgd++;
760 }
761 }
762
763 void
764 cache_mode(char *str, int *ints)
765 {
766 cache_is_copyback = ints[0];
767 }
768
769