This source file includes following definitions.
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
- MMU_free_item
- MMU_get_item
- MMU_init
- MMU_get_page
- MMU_map_page
- MMU_hash_page
- invalidate
- cache_mode
1
2
3
4
5
6
7
8
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/head.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20
21 #include <asm/pgtable.h>
22
23
24
25
26
27
28
29
30
31 pgd_t swapper_pg_dir[1024];
32
33
34 pte *MMU_get_page(void);
35
36
37 #if 0
38 #include <asm/system.h>
39 #include <asm/segment.h>
40 #include <asm/mipsconfig.h>
41
42 extern unsigned long pg0[1024];
43 #endif
44
45 #ifdef CONFIG_DESKSTATION_TYNE
46 extern void deskstation_tyne_dma_init(void);
47 #endif
48 #ifdef CONFIG_SCSI
49 extern void scsi_mem_init(unsigned long);
50 #endif
51 #ifdef CONFIG_SOUND
52 extern void sound_mem_init(void);
53 #endif
54 extern void die_if_kernel(char *,struct pt_regs *,long);
55 extern void show_net_buffers(void);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 pte_t * __bad_pagetable(void)
71 {
72 panic("__bad_pagetable");
73 #if 0
74 extern char empty_bad_page_table[PAGE_SIZE];
75 unsigned long dummy;
76
77 __asm__ __volatile__(
78 ".set\tnoreorder\n\t"
79 "1:\tsw\t%2,(%0)\n\t"
80 "subu\t%1,%1,1\n\t"
81 "bne\t$0,%1,1b\n\t"
82 "addiu\t%0,%0,1\n\t"
83 ".set\treorder"
84 :"=r" (dummy),
85 "=r" (dummy)
86 :"r" (pte_val(BAD_PAGE)),
87 "0" ((long) empty_bad_page_table),
88 "1" (PTRS_PER_PAGE));
89
90 return (pte_t *) empty_bad_page_table;
91 #endif
92 }
93
94 pte_t __bad_page(void)
95 {
96 panic("__bad_page");
97 #if 0
98 extern char empty_bad_page[PAGE_SIZE];
99 unsigned long dummy;
100
101 __asm__ __volatile__(
102 ".set\tnoreorder\n\t"
103 "1:\tsw\t$0,(%0)\n\t"
104 "subu\t%1,%1,1\n\t"
105 "bne\t$0,%1,1b\n\t"
106 "addiu\t%0,%0,1\n\t"
107 ".set\treorder"
108 :"=r" (dummy),
109 "=r" (dummy)
110 :"0" ((long) empty_bad_page),
111 "1" (PTRS_PER_PAGE));
112
113 return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
114 #endif
115 }
116
117 unsigned long __zero_page(void)
118 {
119 #if 0
120 panic("__zero_page");
121 #else
122 extern char empty_zero_page[PAGE_SIZE];
123 bzero(empty_zero_page, PAGE_SIZE);
124 return (unsigned long) empty_zero_page;
125 #endif
126 }
127
128 void show_mem(void)
129 {
130 int i,free = 0,total = 0,reserved = 0;
131 int shared = 0;
132
133 printk("Mem-info:\n");
134 show_free_areas();
135 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
136 i = high_memory >> PAGE_SHIFT;
137 while (i-- > 0) {
138 total++;
139 if (mem_map[i].reserved)
140 reserved++;
141 else if (!mem_map[i].count)
142 free++;
143 else
144 shared += mem_map[i].count-1;
145 }
146 printk("%d pages of RAM\n",total);
147 printk("%d free pages\n",free);
148 printk("%d reserved pages\n",reserved);
149 printk("%d pages shared\n",shared);
150 show_buffers();
151 #ifdef CONFIG_NET
152 show_net_buffers();
153 #endif
154 }
155
156 extern unsigned long free_area_init(unsigned long, unsigned long);
157
158
159
160
161
162
163
164
165 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
166 {
167
168 #if 0
169 pgd_t * pg_dir;
170 pte_t * pg_table;
171 unsigned long tmp;
172 unsigned long address;
173
174 start_mem = PAGE_ALIGN(start_mem);
175 address = 0;
176 pg_dir = swapper_pg_dir;
177 while (address < end_mem) {
178 if (pgd_none(pg_dir[0])) {
179 pgd_set(pg_dir, (pte_t *) start_mem);
180 start_mem += PAGE_SIZE;
181 }
182
183
184
185 pg_table = (pte_t *) pgd_page(pg_dir[0]);
186 pgd_set(pg_dir, pg_table);
187 pg_dir++;
188 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
189 if (address < end_mem)
190 *pg_table = mk_pte(address, PAGE_SHARED);
191 else
192 pte_clear(pg_table);
193 address += PAGE_SIZE;
194 }
195 }
196 #if KERNELBASE == KSEG0
197 cacheflush();
198 #endif
199 invalidate();
200 #endif
201 return free_area_init(start_mem, end_mem);
202 }
203
204 void mem_init(unsigned long start_mem, unsigned long end_mem)
205 {
206 int codepages = 0;
207 int reservedpages = 0;
208 int datapages = 0;
209 unsigned long tmp;
210 extern int etext;
211
212 end_mem &= PAGE_MASK;
213 high_memory = end_mem;
214
215
216 start_mem = PAGE_ALIGN(start_mem);
217
218 #if 0
219 printk("Mem init - Start: %x, End: %x\n", start_mem, high_memory);
220 #endif
221 while (start_mem < high_memory) {
222 mem_map[MAP_NR(start_mem)].reserved = 0;
223 start_mem += PAGE_SIZE;
224 }
225 #ifdef CONFIG_DESKSTATION_TYNE
226 deskstation_tyne_dma_init();
227 #endif
228 #ifdef CONFIG_SCSI
229 scsi_mem_init(high_memory);
230 #endif
231 #ifdef CONFIG_SOUND
232 sound_mem_init();
233 #endif
234 for (tmp = KERNELBASE ; tmp < high_memory ; tmp += PAGE_SIZE)
235 {
236 if (mem_map[MAP_NR(tmp)].reserved)
237 {
238
239
240
241
242 if (0)
243 {
244 reservedpages++;
245 } else if (tmp < (unsigned long) &etext)
246 {
247 codepages++;
248 } else
249 {
250 datapages++;
251 }
252 continue;
253 }
254 mem_map[MAP_NR(tmp)].count = 1;
255 free_page(tmp);
256 }
257 tmp = nr_free_pages << PAGE_SHIFT;
258 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
259 tmp >> 10,
260 ((int)high_memory - (int)KERNELBASE) >> 10,
261 codepages << (PAGE_SHIFT-10),
262 reservedpages << (PAGE_SHIFT-10),
263 datapages << (PAGE_SHIFT-10));
264 invalidate();
265 return;
266 }
267
268 void si_meminfo(struct sysinfo *val)
269 {
270 #if 0
271 int i;
272
273 i = high_memory >> PAGE_SHIFT;
274 val->totalram = 0;
275 val->sharedram = 0;
276 val->freeram = nr_free_pages << PAGE_SHIFT;
277 val->bufferram = buffermem;
278 while (i-- > 0) {
279 if (mem_map[i] & MAP_PAGE_RESERVED)
280 continue;
281 val->totalram++;
282 if (!mem_map[i])
283 continue;
284 val->sharedram += mem_map[i]-1;
285 }
286 val->totalram <<= PAGE_SHIFT;
287 val->sharedram <<= PAGE_SHIFT;
288 return;
289 #endif
290 }
291
292
293
294
295
296
297
298
299 BAT BAT0 =
300 {
301 {
302 0x80000000>>17,
303 BL_256M,
304 1,
305 1,
306 },
307 {
308 0x80000000>>17,
309 1,
310 1,
311 0,
312 1,
313 BPP_RW
314 }
315 };
316 BAT BAT1 =
317 {
318 {
319 0xC0000000>>17,
320 BL_256M,
321 1,
322 1,
323 },
324 {
325 0xC0000000>>17,
326 1,
327 1,
328 0,
329 1,
330 BPP_RW
331 }
332 };
333 BAT BAT2 =
334 {
335
336 #if 1
337 {
338 0x00000000>>17,
339 BL_256M,
340 0,
341 0,
342 },
343 {
344 0x00000000>>17,
345 1,
346 1,
347 0,
348 0,
349 BPP_RW
350 }
351 #else
352 {
353 0x90000000>>17,
354 BL_256M,
355 1,
356 1,
357 },
358 {
359 0x00000000>>17,
360 1,
361 0,
362 0,
363 0,
364 BPP_RW
365 }
366 #endif
367 };
368 BAT BAT3 =
369 {
370 {
371 0x00000000>>17,
372 BL_256M,
373 0,
374 0,
375 },
376 {
377 0x00000000>>17,
378 1,
379 1,
380 0,
381 0,
382 BPP_RW
383 }
384 };
385 BAT TMP_BAT2 =
386 {
387 {
388 0x90000000>>17,
389 BL_256M,
390 1,
391 1,
392 },
393 {
394 0x00000000>>17,
395 1,
396 0,
397 0,
398 0,
399 BPP_RW
400 }
401 };
402
403 unsigned long _SDR1;
404 PTE *Hash;
405 int Hash_size, Hash_mask;
406 int cache_is_copyback = 1;
407 int kernel_pages_are_copyback = 1;
408
409 #define NUM_MAPPINGS 8
410 struct
411 {
412 int va, pa, task;
413 } last_mappings[NUM_MAPPINGS];
414 int next_mapping = 0;
415
416
417 struct item
418 {
419 struct item *next;
420 };
421
422 #ifndef NULL
423 #define NULL 0
424 #endif
425
426 #define MAX_CONTEXTS 16
427 #define MAX_MMU_PAGES 8
428
429 static struct item _free_pages;
430 static char mmu_pages[(MAX_MMU_PAGES+1)*MMU_PAGE_SIZE];
431
432
433
434
435
436 MMU_free_item(struct item *hdr, struct item *elem)
437 {
438 if (hdr->next == (struct item *)NULL)
439 {
440 elem->next = (struct item *)NULL;
441 } else
442 {
443 elem->next = hdr->next;
444 }
445 hdr->next = elem;
446 }
447
448 struct item *
449 MMU_get_item(struct item *hdr)
450 {
451 struct item *item;
452 if ((item = hdr->next) != (struct item *)NULL)
453 {
454 item = hdr->next;
455 hdr->next = item->next;
456 }
457 return (item);
458 }
459
460
461
462
463
464
465
466
467 extern char _start[], _end[];
468
469 void MMU_init(void)
470 {
471 int i, p;
472 SEGREG *segs;
473
474 find_end_of_memory();
475
476 _SDR1 = ((unsigned long)Hash & 0x00FFFFFF) | Hash_mask;
477 p = (int)mmu_pages;
478 p = (p + (MMU_PAGE_SIZE-1)) & ~(MMU_PAGE_SIZE-1);
479 _free_pages.next = (struct item *)NULL;
480 for (i = 0; i < MAX_MMU_PAGES; i++)
481 {
482 MMU_free_item(&_free_pages, (struct item *)p);
483 p += MMU_PAGE_SIZE;
484 }
485
486
487 init_task.tss.pg_tables = (unsigned long *)swapper_pg_dir;
488
489
490 segs = (SEGREG *)init_task.tss.segs;
491 for (i = 0; i < 16; i++)
492 {
493 segs[i].ks = 0;
494 segs[i].kp = 1;
495 segs[i].vsid = i;
496 }
497
498 #if 0
499 for (i = (int)_start; i <= (int)_end; i += MMU_PAGE_SIZE)
500 #else
501
502 for (i = (int)_start; i <= (int)Hash; i += MMU_PAGE_SIZE)
503 #endif
504 {
505 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
506 }
507
508 for (i = (int)Hash; i < (int)Hash+Hash_size; i += MMU_PAGE_SIZE)
509 {
510 MMU_map_page(&init_task.tss, i, i & 0x00FFFFFF, PAGE_KERNEL);
511 }
512
513 }
514
515 pte *
516 MMU_get_page(void)
517 {
518 pte *pg;
519 if ((pg = (pte *)MMU_get_item(&_free_pages)))
520 {
521 bzero((char *)pg, MMU_PAGE_SIZE);
522 }
523
524 return(pg);
525 }
526
527 MMU_map_page(struct thread_struct *tss, unsigned long va, unsigned long pa, int flags)
528 {
529 pte *pd, *pg;
530 if (va < (unsigned long)0x90000000)
531 _printk("Thread: %x, Map VA: %08x -> PA: %08X, Flags: %x\n", tss, va, pa, flags);
532 if ((pte **)tss->pg_tables == (pte **)NULL)
533 {
534 (pte **)tss->pg_tables = (pte **)MMU_get_page();
535 if ((pte **)tss->pg_tables == (pte **)NULL)
536 {
537 _panic("Out of MMU pages (PD)\n");
538 }
539 }
540
541 pd = ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK];
542 if (pd == (pte *)NULL)
543 {
544 pd = (pte *)MMU_get_page();
545 if (pd == (pte *)NULL)
546 {
547 _panic("Out of MMU pages (PG)\n");
548 }
549 ((pte **)tss->pg_tables)[(va>>PD_SHIFT)&PD_MASK] = (pte *)((unsigned long)pd | _PAGE_TABLE);
550 }
551
552 pg = &pd[(va>>PT_SHIFT)&PT_MASK];
553 *(long *)pg = 0;
554 pg->page_num = pa>>PG_SHIFT;
555 pg->flags = flags;
556 MMU_hash_page(tss, va, pg);
557 }
558
559
560
561
562 MMU_hash_page(struct thread_struct *tss, unsigned long va, pte *pg)
563 {
564 int hash, page_index, segment, i, h, _h, api, vsid, perms;
565 PTE *_pte, *empty, *slot;
566 PTE *slot0, *slot1;
567 extern char _etext;
568
569
570
571
572 last_mappings[next_mapping].va = va;
573 last_mappings[next_mapping].pa = pg?*(int *)pg:0;
574 last_mappings[next_mapping].task = current;
575 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
576
577
578 page_index = ((int)va & 0x0FFFF000) >> 12;
579 segment = (unsigned int)va >> 28;
580 api = page_index >> 10;
581 vsid = ((SEGREG *)tss->segs)[segment].vsid;
582 empty = slot = (PTE *)NULL;
583 for (_h = 0; _h < 2; _h++)
584 {
585 hash = page_index ^ vsid;
586 if (_h)
587 {
588 hash = ~hash;
589 }
590 hash &= 0x3FF | (Hash_mask << 10);
591 hash *= 8;
592 _pte = &Hash[hash];
593
594 if (_h)
595 {
596 slot1 = _pte;
597 } else
598 {
599 slot0 = _pte;
600 }
601 for (i = 0; i < 8; i++, _pte++)
602 {
603 if (_pte->v && _pte->vsid == vsid && _pte->h == _h && _pte->api == api)
604 {
605 h = _h;
606 slot = _pte;
607 goto found_it;
608 }
609 if ((empty == (PTE *)NULL) && !_pte->v)
610 {
611 h = _h;
612 empty = _pte;
613 }
614 }
615 }
616 if (slot == (PTE *)NULL)
617 {
618 if (pg == (pte *)NULL)
619 {
620 return (0);
621 }
622 if (empty == (PTE *)NULL)
623 {
624 printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
625 printk("Slot0:\n");
626 _pte = slot0;
627 for (i = 0; i < 8; i++, _pte++)
628 {
629 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
630 }
631 printk("Slot1:\n");
632 _pte = slot1;
633 for (i = 0; i < 8; i++, _pte++)
634 {
635 printk(" V: %d, VSID: %05x, H: %d, RPN: %04x, R: %d, C: %d, PP: %x\n", _pte->v, _pte->vsid, _pte->h, _pte->rpn, _pte->r, _pte->c, _pte->pp);
636 }
637 cnpause();
638 printk("Last mappings:\n");
639 for (i = 0; i < NUM_MAPPINGS; i++)
640 {
641 printk(" VA: %08x, PA: %08X, TASK: %08X\n",
642 last_mappings[next_mapping].va,
643 last_mappings[next_mapping].pa,
644 last_mappings[next_mapping].task);
645 if (++next_mapping == NUM_MAPPINGS) next_mapping = 0;
646 }
647 cnpause();
648 _panic("Hash table full!\n");
649 }
650 slot = empty;
651 }
652 found_it:
653 #if 0
654 _printk("Map VA: %08X, Slot: %08X[%08X/%08X], H: %d\n", va, slot, slot0, slot1, h);
655 #endif
656 _tlbie(va);
657 if (pg)
658 {
659 slot->v = 1;
660 slot->vsid = vsid;
661 slot->h = h;
662 slot->api = api;
663 if (((pg->page_num << 12) & 0xF0000000) == KERNELBASE)
664 {
665 slot->rpn = pg->page_num - (KERNELBASE>>12);
666 } else
667 {
668 slot->rpn = pg->page_num;
669 }
670 slot->r = 0;
671 slot->c = 0;
672 slot->i = 0;
673 slot->g = 0;
674 if (cache_is_copyback)
675 {
676 if (kernel_pages_are_copyback || (pg->flags & _PAGE_USER) || (va < (unsigned long)&_etext))
677 {
678 slot->w = 0;
679 slot->m = 1;
680 } else
681 {
682 slot->w = 1;
683 slot->m = 0;
684 }
685 } else
686 {
687 slot->w = 1;
688 slot->m = 0;
689 }
690 if (pg->flags & _PAGE_USER)
691 {
692 if (pg->flags & _PAGE_RW)
693 {
694 perms = PP_RWRW;
695 } else
696 {
697 perms = PP_RWRX;
698 }
699 } else
700 {
701 perms = PP_RWRW;
702 perms = PP_RWXX;
703 }
704 #ifdef SHOW_FAULTS
705 if (va < KERNELBASE)
706 _printk("VA: %08X, PA: %08X, Flags: %x, Perms: %d\n", va, pg->page_num<<12, pg->flags, perms);
707 #endif
708 slot->pp = perms;
709 return (0);
710 } else
711 {
712 int flags = 0;
713 if (slot->r) flags |= _PAGE_ACCESSED;
714 if (slot->c) flags |= _PAGE_DIRTY;
715 slot->v = 0;
716 #ifdef SHOW_FAULTS
717 _printk("Pull VA: %08X, Flags: %x\n", va, flags);
718 #endif
719 return (flags);
720 }
721 }
722
723
724
725
726 void
727 invalidate(void)
728 {
729 int i, j, flags;
730 unsigned long address;
731 pgd_t *pgd;
732 pte_t *_pte;
733 #if 0
734 _tlbia();
735 #endif
736 pgd = pgd_offset(current->mm, 0);
737 if (!pgd) return;
738 address = 0;
739 for (i = 0 ; (i < PTRS_PER_PGD) && (address < KERNELBASE); i++)
740 {
741 if (*(long *)pgd)
742 {
743
744 _pte = pte_offset(pmd_offset(pgd,0),0);
745 if (_pte)
746 {
747 for (j = 0; j < PTRS_PER_PTE; j++)
748 {
749 if (pte_present(*_pte))
750 {
751 flags = MMU_hash_page(¤t->tss, address, 0);
752 ((pte *)_pte)->flags |= flags;
753 }
754 _pte++;
755 address += PAGE_SIZE;
756 }
757 } else
758 {
759 address += PAGE_SIZE*PTRS_PER_PTE;
760 }
761 } else
762 {
763 address += PAGE_SIZE*PTRS_PER_PTE;
764 }
765 pgd++;
766 }
767 }
768
769 void
770 cache_mode(char *str, int *ints)
771 {
772 cache_is_copyback = ints[0];
773 }
774
775