This source file includes following definitions.
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
- MMU_free_item
- MMU_get_item
- MMU_init
- MMU_get_page
- MMU_map_page
- MMU_hash_page
- invalidate
- cache_mode
1
2
3
4
5
6
7
8 #include <linux/config.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/head.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19
20 #include <asm/pgtable.h>
21
22
23
24
25
26
27
28
29
30 pgd_t swapper_pg_dir[1024*8];
31
32
33 pte *MMU_get_page(void);
34
35
36 #if 0
37 #include <asm/system.h>
38 #include <asm/segment.h>
39 #include <asm/mipsconfig.h>
40
41 extern unsigned long pg0[1024];
42 #endif
43
44 #ifdef CONFIG_DESKSTATION_TYNE
45 extern void deskstation_tyne_dma_init(void);
46 #endif
47 #ifdef CONFIG_SCSI
48 extern void scsi_mem_init(unsigned long);
49 #endif
50 #ifdef CONFIG_SOUND
51 extern void sound_mem_init(void);
52 #endif
53 extern void die_if_kernel(char *,struct pt_regs *,long);
54 extern void show_net_buffers(void);
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69 pte_t * __bad_pagetable(void)
70 {
71 panic("__bad_pagetable");
72 #if 0
73 extern char empty_bad_page_table[PAGE_SIZE];
74 unsigned long dummy;
75
76 __asm__ __volatile__(
77 ".set\tnoreorder\n\t"
78 "1:\tsw\t%2,(%0)\n\t"
79 "subu\t%1,%1,1\n\t"
80 "bne\t$0,%1,1b\n\t"
81 "addiu\t%0,%0,1\n\t"
82 ".set\treorder"
83 :"=r" (dummy),
84 "=r" (dummy)
85 :"r" (pte_val(BAD_PAGE)),
86 "0" ((long) empty_bad_page_table),
87 "1" (PTRS_PER_PAGE));
88
89 return (pte_t *) empty_bad_page_table;
90 #endif
91 }
92
93 pte_t __bad_page(void)
94 {
95 panic("__bad_page");
96 #if 0
97 extern char empty_bad_page[PAGE_SIZE];
98 unsigned long dummy;
99
100 __asm__ __volatile__(
101 ".set\tnoreorder\n\t"
102 "1:\tsw\t$0,(%0)\n\t"
103 "subu\t%1,%1,1\n\t"
104 "bne\t$0,%1,1b\n\t"
105 "addiu\t%0,%0,1\n\t"
106 ".set\treorder"
107 :"=r" (dummy),
108 "=r" (dummy)
109 :"0" ((long) empty_bad_page),
110 "1" (PTRS_PER_PAGE));
111
112 return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
113 #endif
114 }
115
116 unsigned long __zero_page(void)
117 {
118 #if 0
119 panic("__zero_page");
120 #else
121 extern char empty_zero_page[PAGE_SIZE];
122 bzero(empty_zero_page, PAGE_SIZE);
123 return (unsigned long) empty_zero_page;
124 #endif
125 }
126
127 void show_mem(void)
128 {
129 int i,free = 0,total = 0,reserved = 0;
130 int shared = 0;
131
132 printk("Mem-info:\n");
133 show_free_areas();
134 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
135 i = high_memory >> PAGE_SHIFT;
136 while (i-- > 0) {
137 total++;
138 if (mem_map[i].reserved)
139 reserved++;
140 else if (!mem_map[i].count)
141 free++;
142 else
143 shared += mem_map[i].count-1;
144 }
145 printk("%d pages of RAM\n",total);
146 printk("%d free pages\n",free);
147 printk("%d reserved pages\n",reserved);
148 printk("%d pages shared\n",shared);
149 show_buffers();
150 #ifdef CONFIG_NET
151 show_net_buffers();
152 #endif
153 }
154
155 extern unsigned long free_area_init(unsigned long, unsigned long);
156
157
158
159
160
161
162
163
164 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
165 {
166
167 #if 0
168 pgd_t * pg_dir;
169 pte_t * pg_table;
170 unsigned long tmp;
171 unsigned long address;
172
173 start_mem = PAGE_ALIGN(start_mem);
174 address = 0;
175 pg_dir = swapper_pg_dir;
176 while (address < end_mem) {
177 if (pgd_none(pg_dir[0])) {
178 pgd_set(pg_dir, (pte_t *) start_mem);
179 start_mem += PAGE_SIZE;
180 }
181
182
183
184 pg_table = (pte_t *) pgd_page(pg_dir[0]);
185 pgd_set(pg_dir, pg_table);
186 pg_dir++;
187 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
188 if (address < end_mem)
189 *pg_table = mk_pte(address, PAGE_SHARED);
190 else
191 pte_clear(pg_table);
192 address += PAGE_SIZE;
193 }
194 }
195 #if KERNELBASE == KSEG0
196 cacheflush();
197 #endif
198 invalidate();
199 #endif
200 return free_area_init(start_mem, end_mem);
201 }
202
203 void mem_init(unsigned long start_mem, unsigned long end_mem)
204 {
205 int codepages = 0;
206 int reservedpages = 0;
207 int datapages = 0;
208 unsigned long tmp;
209 extern int etext;
210
211 end_mem &= PAGE_MASK;
212 high_memory = end_mem;
213
214
215 start_mem = PAGE_ALIGN(start_mem);
216
217 #if 0
218 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
219 #endif
220 while (start_mem < high_memory) {
221 mem_map[MAP_NR(start_mem)].reserved = 0;
222 start_mem += PAGE_SIZE;
223 }
224 #ifdef CONFIG_DESKSTATION_TYNE
225 deskstation_tyne_dma_init();
226 #endif
227 #ifdef CONFIG_SCSI
228 scsi_mem_init(high_memory);
229 #endif
230 #ifdef CONFIG_SOUND
231 sound_mem_init();
232 #endif
233 for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
234 {
235 if (mem_map[MAP_NR(tmp)].reserved)
236 {
237
238
239
240
241 if (0)
242 {
243 reservedpages++;
244 } else if (tmp < (unsigned long) &etext)
245 {
246 codepages++;
247 } else
248 {
249 datapages++;
250 }
251 continue;
252 }
253 mem_map[MAP_NR(tmp)].count = 1;
254 free_page(tmp);
255 }
256 tmp = nr_free_pages << PAGE_SHIFT;
257 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
258 tmp >> 10,
259 ((int)high_memory - (int)KERNELBASE) >> 10,
260 codepages << (PAGE_SHIFT-10),
261 reservedpages << (PAGE_SHIFT-10),
262 datapages << (PAGE_SHIFT-10));
263 invalidate();
264 return;
265 }
266
267 void si_meminfo(struct sysinfo *val)
268 {
269 #if 0
270 int i;
271
272 i = high_memory >> PAGE_SHIFT;
273 val->totalram = 0;
274 val->sharedram = 0;
275 val->freeram = nr_free_pages << PAGE_SHIFT;
276 val->bufferram = buffermem;
277 while (i-- > 0) {
278 if (mem_map[i] & MAP_PAGE_RESERVED)
279 continue;
280 val->totalram++;
281 if (!mem_map[i])
282 continue;
283 val->sharedram += mem_map[i]-1;
284 }
285 val->totalram <<= PAGE_SHIFT;
286 val->sharedram <<= PAGE_SHIFT;
287 return;
288 #endif
289 }
290
291
292
293
294
295
296
297
298 BAT BAT0 =
299 {
300 {
301 0x80000000>>17,
302 BL_256M,
303 1,
304 1,
305 },
306 {
307 0x80000000>>17,
308 1,
309 1,
310 0,
311 1,
312 BPP_RW
313 }
314 };
315 BAT BAT1 =
316 {
317 {
318 0xC0000000>>17,
319 BL_256M,
320 1,
321 1,
322 },
323 {
324 0xC0000000>>17,
325 1,
326 1,
327 0,
328 1,
329 BPP_RW
330 }
331 };
332 BAT BAT2 =
333 {
334 #if 1
335 {
336 0x00000000>>17,
337 BL_256M,
338 0,
339 0,
340 },
341 {
342 0x00000000>>17,
343 1,
344 1,
345 0,
346 0,
347 BPP_RW
348 }
349 #else
350 {
351 0x90000000>>17,
352 BL_256M,
353 1,
354 1,
355 },
356 {
357 0x00000000>>17,
358 1,
359 0,
360 0,
361 0,
362 BPP_RW
363 }
364 #endif
365 };
366 BAT BAT3 =
367 {
368 {
369 0x00000000>>17,
370 BL_256M,
371 0,
372 0,
373 },
374 {
375 0x00000000>>17,
376 1,
377 1,
378 0,
379 0,
380 BPP_RW
381 }
382 };
383 BAT TMP_BAT2 =
384 {
385 {
386 0x90000000>>17,
387 BL_256M,
388 1,
389 1,
390 },
391 {
392 0x00000000>>17,
393 1,
394 0,
395 0,
396 0,
397 BPP_RW
398 }
399 };
400
401 unsigned long _SDR1;
402 PTE *Hash;
403 int Hash_size, Hash_mask;
404 int cache_is_copyback = 1;
405 int kernel_pages_are_copyback = 1;
406
407 #define NUM_MAPPINGS 8
408 struct
409 {
410 int va, pa, task;
411 } last_mappings[NUM_MAPPINGS];
412 int next_mapping = 0;
413
414
415 struct item
416 {
417 struct item *next;
418 };
419
420 #ifndef NULL
421 #define NULL 0
422 #endif
423
424 #define MAX_CONTEXTS 16
425 #define MAX_MMU_PAGES 8
426
427 static struct item _free_pages;
428 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
429
430
431
432
433
434 MMU_free_item(struct item *hdr, struct item *elem)
435 {
436 if (hdr->next == (struct item *)NULL)
437 {
438 elem->next = (struct item *)NULL;
439 } else
440 {
441 elem->next = hdr->next;
442 }
443 hdr->next = elem;
444 }
445
446 struct item *
447 MMU_get_item(struct item *hdr)
448 {
449 struct item *item;
450 if ((item = hdr->next) != (struct item *)NULL)
451 {
452 item = hdr->next;
453 hdr->next = item->next;
454 }
455 return (item);
456 }
457
458
459
460
461
462
463
464
465 extern char _start[], _end[];
466
467 void MMU_init(void)
468 {
469 int i, p;
470 SEGREG *segs;
471 _printk("MMU init - started\n");
472 find_end_of_memory();
473 _printk(" Start at 0x%08X, End at 0x%08X, Hash at 0x%08X\n", _start, _end, Hash);
474 _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
475 p = (int)mmu_pages;
476 p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
477 _free_pages.next = (struct item *)NULL;
478 for (i = 0; i < MAX_MMU_PAGES; i++)
479 {
480 MMU_free_item(&_free_pages, (struct item *)p);
481 p += MMU_PAGE_SIZE;
482 }
483
484
485 init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
486
487
488 segs = (SEGREG *)init_task.tss.segs;
489 for (i = 0; i < 16; i++)
490 {
491 segs[i].ks = 0;
492 segs[i].kp = 1;
493 segs[i].vsid = i;
494 }
495
496 #if 0
497 for (i = (int)_start; i <= (int)_end; i += MMU_PAGE_SIZE)
498 #else
499
500 for (i = (int)_start; i <= (int)Hash; i += MMU_PAGE_SIZE)
501 #endif
502 {
503 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
504 }
505
506 for (i = (int)Hash; i < (int)Hash+Hash_size; i += MMU_PAGE_SIZE)
507 {
508 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
509 }
510 _printk("MMU init - done!\n");
511 }
512
513 pte *
514 MMU_get_page(void)
515 {
516 pte *pg;
517 if ((pg = (pte *)MMU_get_item(&_free_pages)))
518 {
519 bzero((char *)pg, MMU_PAGE_SIZE);
520 }
521 _printk("MMU Allocate Page at %08X\n", pg);
522 return(pg);
523 }
524
525 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
526 {
527 pte *pd, *pg;
528 if (va < (unsigned long)0x90000000)
529 _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
530 if ((pte **)tss->pg_tables == (pte **)NULL)
531 {
532 (pte **)tss->pg_tables = (pte **)MMU_get_page();
533 if ((pte **)tss->pg_tables == (pte **)NULL)
534 {
535 _panic("Out of MMU pages (PD)\n");
536 }
537 }
538
539 pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
540 if (pd == (pte *)NULL)
541 {
542 pd = (pte *)MMU_get_page();
543 if (pd == (pte *)NULL)
544 {
545 _panic("Out of MMU pages (PG)\n");
546 }
547 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
548 }
549
550 pg = &pd[(va>>PT_SHIFT)&PT_MASK];
551 *(long *)pg = 0;
552 pg->page_num = pa>>PG_SHIFT;
553 pg->flags = flags;
554 MMU_hash_page(tss, va, pg);
555 }
556
557
558
559
560 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
561 {
562 int hash, page_index, segment, i, h, _h, api, vsid, perms;
563 PTE *_pte, *empty, *slot;
564 PTE *slot0, *slot1;
565 extern char _etext;
566
567
568
569
570 last_mappings[next_mapping].va = va;
571 last_mappings[next_mapping].pa = pg?*(int *)pg:0;
572 last_mappings[next_mapping].task = current;
573 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
574
575
576 page_index = ((int)va & 0x0FFFF000) >> 12;
577 segment = (unsigned int)va >> 28;
578 api = page_index >> 10;
579 vsid = ((SEGREG *)tss->segs)[segment].vsid;
580 empty = slot = (PTE *)NULL;
581 for (_h = 0; _h < 2; _h++)
582 {
583 hash = page_index ^ vsid;
584 if (_h)
585 {
586 hash = ~hash;
587 }
588 hash &= 0x3FF | (Hash_mask << 10);
589 hash *= 8;
590 _pte = &Hash[hash];
591
592 if (_h)
593 {
594 slot1 = _pte;
595 } else
596 {
597 slot0 = _pte;
598 }
599 for (i = 0; i < 8; i++, _pte++)
600 {
601 if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
602 {
603 h = _h;
604 slot = _pte;
605 goto found_it;
606 }
607 if ((empty == (PTE *)NULL) && !_pte->v)
608 {
609 h = _h;
610 empty = _pte;
611 }
612 }
613 }
614 if (slot == (PTE *)NULL)
615 {
616 if (pg == (pte *)NULL)
617 {
618 return (0);
619 }
620 if (empty == (PTE *)NULL)
621 {
622 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
623 printk("Slot0:\n");
624 _pte = slot0;
625 for (i = 0; i < 8; i++, _pte++)
626 {
627 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
628 }
629 printk("Slot1:\n");
630 _pte = slot1;
631 for (i = 0; i < 8; i++, _pte++)
632 {
633 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
634 }
635 cnpause();
636 printk("Last mappings:\n");
637 for (i = 0; i < NUM_MAPPINGS; i++)
638 {
639 printk(" VA: %08x, PA: %08X, TASK: %08X\n",
640 last_mappings[next_mapping].va,
641 last_mappings[next_mapping].pa,
642 last_mappings[next_mapping].task);
643 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
644 }
645 cnpause();
646 _panic("Hash table full!\n");
647 }
648 slot = empty;
649 }
650 found_it:
651 #if 0
652 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
653 #endif
654 _tlbie(va);
655 if (pg)
656 {
657 slot->v = 1;
658 slot->vsid = vsid;
659 slot->h = h;
660 slot->api = api;
661 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
662 {
663 slot->rpn = pg->page_num - (KERNELBASE>>12);
664 } else
665 {
666 slot->rpn = pg->page_num;
667 }
668 slot->r = 0;
669 slot->c = 0;
670 slot->i = 0;
671 slot->g = 0;
672 if (cache_is_copyback)
673 {
674 if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
675 {
676 slot->w = 0;
677 slot->m = 1;
678 } else
679 {
680 slot->w = 1;
681 slot->m = 0;
682 }
683 } else
684 {
685 slot->w = 1;
686 slot->m = 0;
687 }
688 if (pg->flags & _PAGE_USER)
689 {
690 if (pg->flags & _PAGE_RW)
691 {
692 perms = PP_RWRW;
693 } else
694 {
695 perms = PP_RWRX;
696 }
697 } else
698 {
699 perms = PP_RWRW;
700 perms = PP_RWXX;
701 }
702 #ifdef SHOW_FAULTS
703 if (va < KERNELBASE)
704 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
705 #endif
706 slot->pp = perms;
707 return (0);
708 } else
709 {
710 int flags = 0;
711 if (slot->r) flags |= _PAGE_ACCESSED;
712 if (slot->c) flags |= _PAGE_DIRTY;
713 slot->v = 0;
714 #ifdef SHOW_FAULTS
715 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
716 #endif
717 return (flags);
718 }
719 }
720
721
722
723
724 void
725 invalidate(void)
726 {
727 int i, j, flags;
728 unsigned long address;
729 pgd_t *pgd;
730 pte_t *_pte;
731 #if 0
732 _tlbia();
733 #endif
734 pgd = pgd_offset(current->mm, 0);
735 if (!pgd) return;
736 address = 0;
737 for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
738 {
739 if (*(long *)pgd)
740 {
741
742 _pte = pte_offset(pmd_offset(pgd,0),0);
743 if (_pte)
744 {
745 for (j = 0; j < PTRS_PER_PTE; j++)
746 {
747 if (pte_present(*_pte))
748 {
749 flags = MMU_hash_page(¤t->tss, address, 0);
750 ((pte *)_pte)->flags |= flags;
751 }
752 _pte++;
753 address += PAGE_SIZE;
754 }
755 } else
756 {
757 address += PAGE_SIZE*PTRS_PER_PTE;
758 }
759 } else
760 {
761 address += PAGE_SIZE*PTRS_PER_PTE;
762 }
763 pgd++;
764 }
765 }
766
767 void
768 cache_mode(char *str, int *ints)
769 {
770 cache_is_copyback = ints[0];
771 }
772
773