1 /* 2 * linux/arch/sparc/mm/init.c 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 */ 6
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18
19 #include <asm/system.h>
20 #include <asm/segment.h>
21 #include <asm/vac-ops.h>
22
23 externvoidscsi_mem_init(unsignedlong);
24 externvoidsound_mem_init(void);
25 externvoiddie_if_kernel(char *,structpt_regs *,long);
26 externvoidshow_net_buffers(void);
27
28 externintmap_the_prom(int);
29
30 externintinvalid_segment, num_segmaps, num_contexts;
31
32 /* 33 * BAD_PAGE is the page that is used for page faults when linux 34 * is out-of-memory. Older versions of linux just did a 35 * do_exit(), but using this instead means there is less risk 36 * for a process dying in kernel mode, possibly leaving a inode 37 * unused etc.. 38 * 39 * BAD_PAGETABLE is the accompanying page-table: it is initialized 40 * to point to BAD_PAGE entries. 41 * 42 * ZERO_PAGE is a special page that is used for zero-initialized 43 * data and COW. 44 */ 45 unsignedlong__bad_pagetable(void)
/* */ 46 { 47 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
48 returnEMPTY_PGT;
49 } 50
51 unsignedlong__bad_page(void)
/* */ 52 { 53 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
54 returnEMPTY_PGE;
55 } 56
57 unsignedlong__zero_page(void)
/* */ 58 { 59 memset((void *) ZERO_PGE, 0, PAGE_SIZE);
60 returnZERO_PGE;
61 } 62
63 voidshow_mem(void)
/* */ 64 { 65 inti,free = 0,total = 0,reserved = 0;
66 intshared = 0;
67
68 printk("Mem-info:\n");
69 show_free_areas();
70 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
71 i = high_memory >> PAGE_SHIFT;
72 while (i-- > 0) { 73 total++;
74 if (mem_map[i] & MAP_PAGE_RESERVED)
75 reserved++;
76 elseif (!mem_map[i])
77 free++;
78 else 79 shared += mem_map[i]-1;
80 } 81 printk("%d pages of RAM\n",total);
82 printk("%d free pages\n",free);
83 printk("%d reserved pages\n",reserved);
84 printk("%d pages shared\n",shared);
85 show_buffers();
86 #ifdefCONFIG_NET 87 show_net_buffers();
88 #endif 89 } 90
91 externunsignedlongfree_area_init(unsignedlong, unsignedlong);
92
93 /* 94 * paging_init() sets up the page tables: in the alpha version this actually 95 * unmaps the bootup page table (as we're now in KSEG, so we don't need it). 96 * 97 * The bootup sequence put the virtual page table into high memory: that 98 * means that we cah change the L1 page table by just using VL1p below. 99 */ 100
101 unsignedlongpaging_init(unsignedlongstart_mem, unsignedlongend_mem)
/* */ 102 { 103 intpg_segmap = 0;
104 unsignedlongi, a, b, mask=0;
105 registerintnum_segs, num_ctx;
106 registerchar * c;
107
108 num_segs = num_segmaps;
109 num_ctx = num_contexts;
110
111 num_segs -= 1;
112 invalid_segment = num_segs;
113
114 /* On the sparc we first need to allocate the segmaps for the 115 * PROM's virtual space, and make those segmaps unusable. We 116 * map the PROM in ALL contexts therefore the break key and the 117 * sync command work no matter what state you took the machine 118 * out of 119 */ 120
121 printk("mapping the prom...\n");
122 num_segs = map_the_prom(num_segs);
123
124 start_mem = PAGE_ALIGN(start_mem);
125
126 /* ok, allocate the kernel pages, map them in all contexts 127 * (with help from the prom), and lock them. Isn't the sparc 128 * fun kiddies? TODO 129 */ 130
131 b=PGDIR_ALIGN(start_mem)>>18;
132 c= (char *)0x0;
133
134 printk("mapping kernel in all contexts...\n");
135
136 for(a=0; a<b; a++)
137 { 138 for(i=1; i<num_contexts; i++)
139 { 140 /* map the kernel virt_addrs */ 141 (*(romvec->pv_setctxt))(i, (char *) c, a);
142 c += 4096;
143 } 144 } 145
146 /* Ok, since now mapped in all contexts, we can free up 147 * context zero to be used amongst user processes. 148 */ 149
150 /* free context 0 here TODO */ 151
152 /* invalidate all user pages and initialize the pte struct 153 * for userland. TODO 154 */ 155
156 /* Make the kernel text unwritable and cacheable, the prom 157 * loaded out text as writable, only sneaky sunos kernels need 158 * self-modifying code. 159 */ 160
161 a= (unsignedlong) etext;
162 mask=~(PTE_NC|PTE_W); /* make cacheable + not writable */ 163
164 printk("changing kernel text perms...\n");
165
166
167 /* must do for every segment since kernel uses all contexts 168 * and unlike some sun kernels I know of, we can't hard wire 169 * context 0 just for the kernel, that is unnecessary. 170 */ 171
172 for(i=0; i<8; i++)
173 { 174 b=PAGE_ALIGN((unsignedlong) trapbase);
175
176 switch_to_context(i);
177
178 for(;b<a; b+=4096)
179 { 180 put_pte(b, (get_pte(b) & mask));
181 } 182 } 183
184 #if 0 /* bogosity */ 185 invalidate(); /* flush the virtual address cache */ 186 #endif/* bletcherous */ 187
188 printk("\nCurrently in context - ");
189 for(i=0; i<num_contexts; i++)
190 { 191 switch_to_context(i);
192 printk("%d ", (int) i);
193 } 194
195 switch_to_context(0);
196
197 /* invalidate all user segmaps for virt addrs 0-KERNBASE */ 198
199 /* WRONG, now I just let the kernel sit in low addresses only 200 * from 0 -- end_kernel just like i386-linux. This will make 201 * mem-code a bit easier to cope with. 202 */ 203
204 printk("\ninvalidating user segmaps\n");
205 for(i = 0; i<8; i++)
206 { 207 switch_to_context(i);
208 a=((unsignedlong) &end);
209 for(a+=524288, pg_segmap=0; ++pg_segmap<=3584; a+=(1<<18))
210 put_segmap((unsignedlong *) a, (invalid_segment&0x7f));
211 } 212
213 printk("wheee! have I sold out yet?\n");
214
215 invalidate();
216 returnfree_area_init(start_mem, end_mem);
217 } 218
219 voidmem_init(unsignedlongstart_mem, unsignedlongend_mem)
/* */ 220 { 221 return;
222 } 223
224 voidsi_meminfo(structsysinfo *val)
/* */ 225 { 226 inti;
227
228 i = high_memory >> PAGE_SHIFT;
229 val->totalram = 0;
230 val->sharedram = 0;
231 val->freeram = nr_free_pages << PAGE_SHIFT;
232 val->bufferram = buffermem;
233 while (i-- > 0) { 234 if (mem_map[i] & MAP_PAGE_RESERVED)
235 continue;
236 val->totalram++;
237 if (!mem_map[i])
238 continue;
239 val->sharedram += mem_map[i]-1;
240 } 241 val->totalram <<= PAGE_SHIFT;
242 val->sharedram <<= PAGE_SHIFT;
243 return;
244 }