This source file includes following definitions.
- __bad_pagetable
- __bad_page
- show_mem
- sparc_context_init
- paging_init
- taint_real_pages
- mem_init
- si_meminfo
1
2
3
4
5
6
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19
20 #include <asm/system.h>
21 #include <asm/segment.h>
22 #include <asm/vac-ops.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/vaddrs.h>
26
27 extern void show_net_buffers(void);
28
29 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 pte_t *__bad_pagetable(void)
45 {
46 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
47 return (pte_t *) EMPTY_PGT;
48 }
49
50 pte_t __bad_page(void)
51 {
52 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
53 return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
54 }
55
56 void show_mem(void)
57 {
58 int i,free = 0,total = 0,reserved = 0;
59 int shared = 0;
60
61 printk("\nMem-info:\n");
62 show_free_areas();
63 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
64 i = MAP_NR(high_memory);
65 while (i-- > 0) {
66 total++;
67 if (PageReserved(mem_map + i))
68 reserved++;
69 else if (!mem_map[i].count)
70 free++;
71 else
72 shared += mem_map[i].count-1;
73 }
74 printk("%d pages of RAM\n",total);
75 printk("%d free pages\n",free);
76 printk("%d reserved pages\n",reserved);
77 printk("%d pages shared\n",shared);
78 show_buffers();
79 #ifdef CONFIG_NET
80 show_net_buffers();
81 #endif
82 }
83
84 extern pgprot_t protection_map[16];
85
86 unsigned long sparc_context_init(unsigned long start_mem, int numctx)
87 {
88 int ctx;
89
90 ctx_list_pool = (struct ctx_list *) start_mem;
91 start_mem += (numctx * sizeof(struct ctx_list));
92 for(ctx = 0; ctx < numctx; ctx++) {
93 struct ctx_list *clist;
94
95 clist = (ctx_list_pool + ctx);
96 clist->ctx_number = ctx;
97 clist->ctx_mm = 0;
98 }
99 ctx_free.next = ctx_free.prev = &ctx_free;
100 ctx_used.next = ctx_used.prev = &ctx_used;
101 for(ctx = 0; ctx < numctx; ctx++)
102 add_to_free_ctxlist(ctx_list_pool + ctx);
103 return start_mem;
104 }
105
106
107
108
109
110
111 extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
112 extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
113 extern unsigned long device_scan(unsigned long);
114
115 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
116 {
117 switch(sparc_cpu_model) {
118 case sun4c:
119 case sun4e:
120 start_mem = sun4c_paging_init(start_mem, end_mem);
121 break;
122 case sun4m:
123 case sun4d:
124 start_mem = srmmu_paging_init(start_mem, end_mem);
125 break;
126 default:
127 prom_printf("paging_init: Cannot init paging on this Sparc\n");
128 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
129 prom_printf("paging_init: Halting...\n");
130 prom_halt();
131 };
132
133
134
135
136 protection_map[0] = PAGE_NONE;
137 protection_map[1] = PAGE_READONLY;
138 protection_map[2] = PAGE_COPY;
139 protection_map[3] = PAGE_COPY;
140 protection_map[4] = PAGE_READONLY;
141 protection_map[5] = PAGE_READONLY;
142 protection_map[6] = PAGE_COPY;
143 protection_map[7] = PAGE_COPY;
144 protection_map[8] = PAGE_NONE;
145 protection_map[9] = PAGE_READONLY;
146 protection_map[10] = PAGE_SHARED;
147 protection_map[11] = PAGE_SHARED;
148 protection_map[12] = PAGE_READONLY;
149 protection_map[13] = PAGE_READONLY;
150 protection_map[14] = PAGE_SHARED;
151 protection_map[15] = PAGE_SHARED;
152 return device_scan(start_mem);
153 }
154
155 struct cache_palias *sparc_aliases;
156
157 extern int min_free_pages;
158 extern int free_pages_low;
159 extern int free_pages_high;
160
161 int physmem_mapped_contig = 1;
162
163 static void taint_real_pages(unsigned long start_mem, unsigned long end_mem)
164 {
165 unsigned long addr, tmp2 = 0;
166
167 if(physmem_mapped_contig) {
168 for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
169 for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
170 unsigned long phys_addr = (addr - PAGE_OFFSET);
171 unsigned long base = sp_banks[tmp2].base_addr;
172 unsigned long limit = base + sp_banks[tmp2].num_bytes;
173
174 if((phys_addr >= base) && (phys_addr < limit) &&
175 ((phys_addr + PAGE_SIZE) < limit))
176 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
177 }
178 }
179 } else {
180 for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE)
181 mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
182 }
183 }
184
185 void mem_init(unsigned long start_mem, unsigned long end_mem)
186 {
187 int codepages = 0;
188 int datapages = 0;
189 unsigned long tmp2, addr;
190 extern char etext;
191
192
193 memset((void *) ZERO_PAGE, 0, PAGE_SIZE);
194
195 end_mem &= PAGE_MASK;
196 high_memory = end_mem;
197
198 start_mem = PAGE_ALIGN(start_mem);
199
200 addr = PAGE_OFFSET;
201 while(addr < start_mem) {
202 mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
203 addr += PAGE_SIZE;
204 }
205
206 taint_real_pages(start_mem, end_mem);
207 for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
208 if(PageReserved(mem_map + MAP_NR(addr))) {
209 if (addr < (unsigned long) &etext)
210 codepages++;
211 else if(addr < start_mem)
212 datapages++;
213 continue;
214 }
215 mem_map[MAP_NR(addr)].count = 1;
216 free_page(addr);
217 }
218
219 tmp2 = nr_free_pages << PAGE_SHIFT;
220
221 printk("Memory: %luk available (%dk kernel code, %dk data)\n",
222 tmp2 >> 10,
223 codepages << (PAGE_SHIFT-10),
224 datapages << (PAGE_SHIFT-10));
225
226 min_free_pages = nr_free_pages >> 7;
227 if(min_free_pages < 16)
228 min_free_pages = 16;
229 free_pages_low = min_free_pages + (min_free_pages >> 1);
230 free_pages_high = min_free_pages + min_free_pages;
231
232 }
233
234 void si_meminfo(struct sysinfo *val)
235 {
236 int i;
237
238 i = MAP_NR(high_memory);
239 val->totalram = 0;
240 val->sharedram = 0;
241 val->freeram = nr_free_pages << PAGE_SHIFT;
242 val->bufferram = buffermem;
243 while (i-- > 0) {
244 if (PageReserved(mem_map + i))
245 continue;
246 val->totalram++;
247 if (!mem_map[i].count)
248 continue;
249 val->sharedram += mem_map[i].count-1;
250 }
251 val->totalram <<= PAGE_SHIFT;
252 val->sharedram <<= PAGE_SHIFT;
253 }