This source file includes following definitions.
- do_page_fault
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
1
2
3
4
5
6
7 #include <linux/config.h>
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17
18 #include <asm/system.h>
19 #include <asm/segment.h>
20
21 extern unsigned long pg0[1024];
22
23 extern void scsi_mem_init(unsigned long);
24 extern void sound_mem_init(void);
25 extern void die_if_kernel(char *,struct pt_regs *,long);
26 extern void show_net_buffers(void);
27
28
29
30
31
32
33 #undef CONFIG_TEST_VERIFY_AREA
34
35
36
37
38
39
40 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
41 {
42 struct vm_area_struct * vma;
43 unsigned long address;
44 unsigned long page;
45
46
47 __asm__("movl %%cr2,%0":"=r" (address));
48 for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
49 if (!vma)
50 goto bad_area;
51 if (vma->vm_end > address)
52 break;
53 }
54 if (vma->vm_start <= address)
55 goto good_area;
56 if (!(vma->vm_flags & VM_GROWSDOWN))
57 goto bad_area;
58 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
59 goto bad_area;
60 vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
61 vma->vm_start = (address & PAGE_MASK);
62
63
64
65
66 good_area:
67 if (regs->eflags & VM_MASK) {
68 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
69 if (bit < 32)
70 current->tss.screen_bitmap |= 1 << bit;
71 }
72 if (!(vma->vm_page_prot & PAGE_USER))
73 goto bad_area;
74 if (error_code & PAGE_PRESENT) {
75 if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
76 goto bad_area;
77 #ifdef CONFIG_TEST_VERIFY_AREA
78 if (regs->cs == KERNEL_CS)
79 printk("WP fault at %08x\n", regs->eip);
80 #endif
81 do_wp_page(vma, address, error_code);
82 return;
83 }
84 do_no_page(vma, address, error_code);
85 return;
86
87
88
89
90
91 bad_area:
92 if (error_code & PAGE_USER) {
93 current->tss.cr2 = address;
94 current->tss.error_code = error_code;
95 current->tss.trap_no = 14;
96 send_sig(SIGSEGV, current, 1);
97 return;
98 }
99
100
101
102
103 if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
104 wp_works_ok = 1;
105 pg0[0] = PAGE_SHARED;
106 invalidate();
107 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
108 return;
109 }
110 if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
111 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
112 pg0[0] = PAGE_SHARED;
113 } else
114 printk(KERN_ALERT "Unable to handle kernel paging request");
115 printk(" at virtual address %08lx\n",address);
116 __asm__("movl %%cr3,%0" : "=r" (page));
117 printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
118 current->tss.cr3, page);
119 page = ((unsigned long *) page)[address >> 22];
120 printk(KERN_ALERT "*pde = %08lx\n", page);
121 if (page & PAGE_PRESENT) {
122 page &= PAGE_MASK;
123 address &= 0x003ff000;
124 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
125 printk(KERN_ALERT "*pte = %08lx\n", page);
126 }
127 die_if_kernel("Oops", regs, error_code);
128 do_exit(SIGKILL);
129 }
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 unsigned long __bad_pagetable(void)
145 {
146 extern char empty_bad_page_table[PAGE_SIZE];
147
148 __asm__ __volatile__("cld ; rep ; stosl":
149 :"a" (BAD_PAGE + PAGE_TABLE),
150 "D" ((long) empty_bad_page_table),
151 "c" (PTRS_PER_PAGE)
152 :"di","cx");
153 return (unsigned long) empty_bad_page_table;
154 }
155
156 unsigned long __bad_page(void)
157 {
158 extern char empty_bad_page[PAGE_SIZE];
159
160 __asm__ __volatile__("cld ; rep ; stosl":
161 :"a" (0),
162 "D" ((long) empty_bad_page),
163 "c" (PTRS_PER_PAGE)
164 :"di","cx");
165 return (unsigned long) empty_bad_page;
166 }
167
168 unsigned long __zero_page(void)
169 {
170 extern char empty_zero_page[PAGE_SIZE];
171
172 __asm__ __volatile__("cld ; rep ; stosl":
173 :"a" (0),
174 "D" ((long) empty_zero_page),
175 "c" (PTRS_PER_PAGE)
176 :"di","cx");
177 return (unsigned long) empty_zero_page;
178 }
179
180 void show_mem(void)
181 {
182 int i,free = 0,total = 0,reserved = 0;
183 int shared = 0;
184
185 printk("Mem-info:\n");
186 show_free_areas();
187 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
188 i = high_memory >> PAGE_SHIFT;
189 while (i-- > 0) {
190 total++;
191 if (mem_map[i] & MAP_PAGE_RESERVED)
192 reserved++;
193 else if (!mem_map[i])
194 free++;
195 else
196 shared += mem_map[i]-1;
197 }
198 printk("%d pages of RAM\n",total);
199 printk("%d free pages\n",free);
200 printk("%d reserved pages\n",reserved);
201 printk("%d pages shared\n",shared);
202 show_buffers();
203 #ifdef CONFIG_NET
204 show_net_buffers();
205 #endif
206 }
207
208 extern unsigned long free_area_init(unsigned long, unsigned long);
209
210
211
212
213
214
215
216
217 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
218 {
219 unsigned long * pg_dir;
220 unsigned long * pg_table;
221 unsigned long tmp;
222 unsigned long address;
223
224
225
226
227
228
229
230 #if 0
231 memset((void *) 0, 0, PAGE_SIZE);
232 #endif
233 start_mem = PAGE_ALIGN(start_mem);
234 address = 0;
235 pg_dir = swapper_pg_dir;
236 while (address < end_mem) {
237 tmp = *(pg_dir + 768);
238 if (!tmp) {
239 tmp = start_mem | PAGE_TABLE;
240 *(pg_dir + 768) = tmp;
241 start_mem += PAGE_SIZE;
242 }
243 *pg_dir = tmp;
244 pg_dir++;
245 pg_table = (unsigned long *) (tmp & PAGE_MASK);
246 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
247 if (address < end_mem)
248 *pg_table = address | PAGE_SHARED;
249 else
250 *pg_table = 0;
251 address += PAGE_SIZE;
252 }
253 }
254 invalidate();
255 return free_area_init(start_mem, end_mem);
256 }
257
258 void mem_init(unsigned long start_low_mem,
259 unsigned long start_mem, unsigned long end_mem)
260 {
261 int codepages = 0;
262 int reservedpages = 0;
263 int datapages = 0;
264 unsigned long tmp;
265 extern int etext;
266
267 end_mem &= PAGE_MASK;
268 high_memory = end_mem;
269
270
271 start_low_mem = PAGE_ALIGN(start_low_mem);
272 start_mem = PAGE_ALIGN(start_mem);
273
274
275
276
277
278
279 while (start_low_mem < 0x9f000) {
280 mem_map[MAP_NR(start_low_mem)] = 0;
281 start_low_mem += PAGE_SIZE;
282 }
283
284 while (start_mem < high_memory) {
285 mem_map[MAP_NR(start_mem)] = 0;
286 start_mem += PAGE_SIZE;
287 }
288 #ifdef CONFIG_SCSI
289 scsi_mem_init(high_memory);
290 #endif
291 #ifdef CONFIG_SOUND
292 sound_mem_init();
293 #endif
294 for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
295 if (mem_map[MAP_NR(tmp)]) {
296 if (tmp >= 0xA0000 && tmp < 0x100000)
297 reservedpages++;
298 else if (tmp < (unsigned long) &etext)
299 codepages++;
300 else
301 datapages++;
302 continue;
303 }
304 mem_map[MAP_NR(tmp)] = 1;
305 free_page(tmp);
306 }
307 tmp = nr_free_pages << PAGE_SHIFT;
308 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
309 tmp >> 10,
310 high_memory >> 10,
311 codepages << (PAGE_SHIFT-10),
312 reservedpages << (PAGE_SHIFT-10),
313 datapages << (PAGE_SHIFT-10));
314
315 wp_works_ok = -1;
316 pg0[0] = PAGE_READONLY;
317 invalidate();
318 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
319 pg0[0] = 0;
320 invalidate();
321 if (wp_works_ok < 0)
322 wp_works_ok = 0;
323 #ifdef CONFIG_TEST_VERIFY_AREA
324 wp_works_ok = 0;
325 #endif
326 return;
327 }
328
329 void si_meminfo(struct sysinfo *val)
330 {
331 int i;
332
333 i = high_memory >> PAGE_SHIFT;
334 val->totalram = 0;
335 val->sharedram = 0;
336 val->freeram = nr_free_pages << PAGE_SHIFT;
337 val->bufferram = buffermem;
338 while (i-- > 0) {
339 if (mem_map[i] & MAP_PAGE_RESERVED)
340 continue;
341 val->totalram++;
342 if (!mem_map[i])
343 continue;
344 val->sharedram += mem_map[i]-1;
345 }
346 val->totalram <<= PAGE_SHIFT;
347 val->sharedram <<= PAGE_SHIFT;
348 return;
349 }