This source file includes following definitions.
- __bad_pagetable
- __bad_page
- show_mem
- paging_init
- mem_init
- si_meminfo
1
2
3
4
5
6
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #ifdef CONFIG_BLK_DEV_INITRD
21 #include <linux/blk.h>
22 #endif
23
24 #include <asm/system.h>
25 #include <asm/segment.h>
26 #include <asm/pgtable.h>
27 #include <asm/dma.h>
28
29
30
31
32 #ifdef __SMP__
33 #undef USE_PENTIUM_MM
34 #endif
35
36 extern void die_if_kernel(char *,struct pt_regs *,long);
37 extern void show_net_buffers(void);
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 pte_t * __bad_pagetable(void)
53 {
54 extern char empty_bad_page_table[PAGE_SIZE];
55
56 __asm__ __volatile__("cld ; rep ; stosl":
57 :"a" (pte_val(BAD_PAGE)),
58 "D" ((long) empty_bad_page_table),
59 "c" (PAGE_SIZE/4)
60 :"di","cx");
61 return (pte_t *) empty_bad_page_table;
62 }
63
64 pte_t __bad_page(void)
65 {
66 extern char empty_bad_page[PAGE_SIZE];
67
68 __asm__ __volatile__("cld ; rep ; stosl":
69 :"a" (0),
70 "D" ((long) empty_bad_page),
71 "c" (PAGE_SIZE/4)
72 :"di","cx");
73 return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
74 }
75
76 void show_mem(void)
77 {
78 int i,free = 0,total = 0,reserved = 0;
79 int shared = 0;
80
81 printk("Mem-info:\n");
82 show_free_areas();
83 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
84 i = high_memory >> PAGE_SHIFT;
85 while (i-- > 0) {
86 total++;
87 if (PageReserved(mem_map+i))
88 reserved++;
89 else if (!mem_map[i].count)
90 free++;
91 else
92 shared += mem_map[i].count-1;
93 }
94 printk("%d pages of RAM\n",total);
95 printk("%d free pages\n",free);
96 printk("%d reserved pages\n",reserved);
97 printk("%d pages shared\n",shared);
98 show_buffers();
99 #ifdef CONFIG_NET
100 show_net_buffers();
101 #endif
102 }
103
104 extern unsigned long free_area_init(unsigned long, unsigned long);
105
106
107
108
109
110
111
112
113 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
114 {
115 pgd_t * pg_dir;
116 pte_t * pg_table;
117 unsigned long tmp;
118 unsigned long address;
119
120
121
122
123
124
125
126
127 #if 0
128 memset((void *) 0, 0, PAGE_SIZE);
129 #endif
130 #ifdef __SMP__
131 if (!smp_scan_config(0x0,0x400))
132 {
133
134
135
136
137 if (!smp_scan_config(639*0x400,0x400))
138 smp_scan_config(0xF0000,0x10000);
139 }
140
141
142
143
144
145
146
147 #endif
148 #ifdef TEST_VERIFY_AREA
149 wp_works_ok = 0;
150 #endif
151 start_mem = PAGE_ALIGN(start_mem);
152 address = 0;
153 pg_dir = swapper_pg_dir;
154 while (address < end_mem) {
155 #ifdef USE_PENTIUM_MM
156
157
158
159
160
161
162 if (x86_capability & 8) {
163 #ifdef GAS_KNOWS_CR4
164 __asm__("movl %%cr4,%%eax\n\t"
165 "orl $16,%%eax\n\t"
166 "movl %%eax,%%cr4"
167 : : :"ax");
168 #else
169 __asm__(".byte 0x0f,0x20,0xe0\n\t"
170 "orl $16,%%eax\n\t"
171 ".byte 0x0f,0x22,0xe0"
172 : : :"ax");
173 #endif
174 wp_works_ok = 1;
175 pgd_val(pg_dir[0]) = _PAGE_TABLE | _PAGE_4M | address;
176 pgd_val(pg_dir[768]) = _PAGE_TABLE | _PAGE_4M | address;
177 pg_dir++;
178 address += 4*1024*1024;
179 continue;
180 }
181 #endif
182
183 pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[768]));
184 if (!pg_table) {
185 pg_table = (pte_t *) start_mem;
186 start_mem += PAGE_SIZE;
187 }
188
189
190 pgd_val(pg_dir[0]) = _PAGE_TABLE | (unsigned long) pg_table;
191 pgd_val(pg_dir[768]) = _PAGE_TABLE | (unsigned long) pg_table;
192 pg_dir++;
193 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
194 if (address < end_mem)
195 set_pte(pg_table, mk_pte(address, PAGE_SHARED));
196 else
197 pte_clear(pg_table);
198 address += PAGE_SIZE;
199 }
200 }
201 flush_tlb();
202 return free_area_init(start_mem, end_mem);
203 }
204
205 void mem_init(unsigned long start_mem, unsigned long end_mem)
206 {
207 unsigned long start_low_mem = PAGE_SIZE;
208 int codepages = 0;
209 int reservedpages = 0;
210 int datapages = 0;
211 unsigned long tmp;
212 extern int _etext;
213
214 end_mem &= PAGE_MASK;
215 high_memory = end_mem;
216
217
218 memset(empty_zero_page, 0, PAGE_SIZE);
219
220
221 start_low_mem = PAGE_ALIGN(start_low_mem);
222
223 #ifdef __SMP__
224
225
226
227 start_low_mem += PAGE_SIZE;
228 start_low_mem = smp_alloc_memory(start_low_mem);
229 #endif
230 start_mem = PAGE_ALIGN(start_mem);
231
232
233
234
235
236
237 while (start_low_mem < 0x9f000) {
238 clear_bit(PG_reserved, &mem_map[MAP_NR(start_low_mem)].flags);
239 start_low_mem += PAGE_SIZE;
240 }
241
242 while (start_mem < high_memory) {
243 clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
244 start_mem += PAGE_SIZE;
245 }
246 for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
247 if (tmp >= MAX_DMA_ADDRESS)
248 clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
249 if (PageReserved(mem_map+MAP_NR(tmp))) {
250 if (tmp >= 0xA0000 && tmp < 0x100000)
251 reservedpages++;
252 else if (tmp < (unsigned long) &_etext)
253 codepages++;
254 else
255 datapages++;
256 continue;
257 }
258 mem_map[MAP_NR(tmp)].count = 1;
259 #ifdef CONFIG_BLK_DEV_INITRD
260 if (!initrd_start || (tmp < initrd_start || tmp >=
261 initrd_end))
262 #endif
263 free_page(tmp);
264 }
265 tmp = nr_free_pages << PAGE_SHIFT;
266 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
267 tmp >> 10,
268 high_memory >> 10,
269 codepages << (PAGE_SHIFT-10),
270 reservedpages << (PAGE_SHIFT-10),
271 datapages << (PAGE_SHIFT-10));
272
273 if (wp_works_ok < 0) {
274 pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
275 flush_tlb();
276 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
277 pg0[0] = 0;
278 flush_tlb();
279 if (wp_works_ok < 0)
280 wp_works_ok = 0;
281 }
282 return;
283 }
284
285 void si_meminfo(struct sysinfo *val)
286 {
287 int i;
288
289 i = high_memory >> PAGE_SHIFT;
290 val->totalram = 0;
291 val->sharedram = 0;
292 val->freeram = nr_free_pages << PAGE_SHIFT;
293 val->bufferram = buffermem;
294 while (i-- > 0) {
295 if (PageReserved(mem_map+i))
296 continue;
297 val->totalram++;
298 if (!mem_map[i].count)
299 continue;
300 val->sharedram += mem_map[i].count-1;
301 }
302 val->totalram <<= PAGE_SHIFT;
303 val->sharedram <<= PAGE_SHIFT;
304 return;
305 }