This source file includes following definitions.
- oom
- free_one_table
- clear_page_tables
- free_page_tables
- copy_page_tables
- unmap_page_range
- zeromap_page_range
- remap_page_range
- put_page
- put_dirty_page
- do_wp_page
- write_verify
- get_empty_page
- try_to_share
- share_page
- get_empty_pgtable
- do_no_page
- do_page_fault
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include <asm/system.h>
32
33 #include <linux/signal.h>
34 #include <linux/sched.h>
35 #include <linux/head.h>
36 #include <linux/kernel.h>
37 #include <linux/errno.h>
38 #include <linux/string.h>
39 #include <linux/types.h>
40
41 unsigned long high_memory = 0;
42
43 extern void sound_mem_init(void);
44
45 int nr_free_pages = 0;
46 unsigned long free_page_list = 0;
47
48
49
50
51
52
53 int nr_secondary_pages = 0;
54 unsigned long secondary_page_list = 0;
55
56 #define copy_page(from,to) \
57 __asm__("cld ; rep ; movsl"::"S" (from),"D" (to),"c" (1024):"cx","di","si")
58
59 unsigned short * mem_map = NULL;
60
61 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
62
63
64
65
66
67 void oom(struct task_struct * task)
68 {
69 printk("\nout of memory\n");
70 task->sigaction[SIGKILL-1].sa_handler = NULL;
71 task->blocked &= ~(1<<(SIGKILL-1));
72 send_sig(SIGKILL,task,1);
73 }
74
75 static void free_one_table(unsigned long * page_dir)
76 {
77 int j;
78 unsigned long pg_table = *page_dir;
79 unsigned long * page_table;
80
81 if (!pg_table)
82 return;
83 *page_dir = 0;
84 if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
85 printk("Bad page table: [%08x]=%08x\n",page_dir,pg_table);
86 return;
87 }
88 if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
89 return;
90 page_table = (unsigned long *) (pg_table & 0xfffff000);
91 for (j = 0 ; j < 1024 ; j++,page_table++) {
92 unsigned long pg = *page_table;
93
94 if (!pg)
95 continue;
96 *page_table = 0;
97 if (pg & PAGE_PRESENT)
98 free_page(0xfffff000 & pg);
99 else
100 swap_free(pg);
101 }
102 free_page(0xfffff000 & pg_table);
103 }
104
105
106
107
108
109
110
111
112 void clear_page_tables(struct task_struct * tsk)
113 {
114 int i;
115 unsigned long * page_dir;
116
117 if (!tsk)
118 return;
119 if (tsk == task[0])
120 panic("task[0] (swapper) doesn't support exec()\n");
121 page_dir = (unsigned long *) tsk->tss.cr3;
122 if (!page_dir || page_dir == swapper_pg_dir) {
123 printk("Trying to clear kernel page-directory: not good\n");
124 return;
125 }
126 for (i = 0 ; i < 768 ; i++,page_dir++)
127 free_one_table(page_dir);
128 invalidate();
129 return;
130 }
131
132
133
134
135 void free_page_tables(struct task_struct * tsk)
136 {
137 int i;
138 unsigned long pg_dir;
139 unsigned long * page_dir;
140
141 if (!tsk)
142 return;
143 if (tsk == task[0]) {
144 printk("task[0] (swapper) killed: unable to recover\n");
145 panic("Trying to free up swapper memory space");
146 }
147 pg_dir = tsk->tss.cr3;
148 if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
149 printk("Trying to free kernel page-directory: not good\n");
150 return;
151 }
152 tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
153 if (tsk == current)
154 __asm__ __volatile__("movl %0,%%cr3"::"a" (tsk->tss.cr3));
155 page_dir = (unsigned long *) pg_dir;
156 for (i = 0 ; i < 1024 ; i++,page_dir++)
157 free_one_table(page_dir);
158 free_page(pg_dir);
159 invalidate();
160 }
161
162
163
164
165
166
167 int copy_page_tables(struct task_struct * tsk)
168 {
169 int i;
170 unsigned long old_pg_dir, *old_page_dir;
171 unsigned long new_pg_dir, *new_page_dir;
172
173 old_pg_dir = current->tss.cr3;
174 new_pg_dir = get_free_page(GFP_KERNEL);
175 if (!new_pg_dir)
176 return -ENOMEM;
177 tsk->tss.cr3 = new_pg_dir;
178 old_page_dir = (unsigned long *) old_pg_dir;
179 new_page_dir = (unsigned long *) new_pg_dir;
180 for (i = 0 ; i < 1024 ; i++,old_page_dir++,new_page_dir++) {
181 int j;
182 unsigned long old_pg_table, *old_page_table;
183 unsigned long new_pg_table, *new_page_table;
184
185 old_pg_table = *old_page_dir;
186 if (!old_pg_table)
187 continue;
188 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
189 printk("copy_page_tables: bad page table: "
190 "probable memory corruption");
191 *old_page_dir = 0;
192 continue;
193 }
194 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
195 *new_page_dir = old_pg_table;
196 continue;
197 }
198 new_pg_table = get_free_page(GFP_KERNEL);
199 if (!new_pg_table) {
200 free_page_tables(tsk);
201 return -ENOMEM;
202 }
203 *new_page_dir = new_pg_table | PAGE_TABLE;
204 old_page_table = (unsigned long *) (0xfffff000 & old_pg_table);
205 new_page_table = (unsigned long *) (0xfffff000 & new_pg_table);
206 for (j = 0 ; j < 1024 ; j++,old_page_table++,new_page_table++) {
207 unsigned long pg;
208 pg = *old_page_table;
209 if (!pg)
210 continue;
211 if (!(pg & PAGE_PRESENT)) {
212 *new_page_table = swap_duplicate(pg);
213 continue;
214 }
215 if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
216 pg &= ~PAGE_RW;
217 *new_page_table = pg;
218 if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
219 continue;
220 *old_page_table = pg;
221 mem_map[MAP_NR(pg)]++;
222 }
223 }
224 invalidate();
225 return 0;
226 }
227
228
229
230
231
232 int unmap_page_range(unsigned long from, unsigned long size)
233 {
234 unsigned long page, page_dir;
235 unsigned long *page_table, *dir;
236 unsigned long poff, pcnt, pc;
237
238 if (from & 0xfff) {
239 printk("unmap_page_range called with wrong alignment\n");
240 return -EINVAL;
241 }
242 size = (size + 0xfff) >> PAGE_SHIFT;
243 dir = (unsigned long *) (current->tss.cr3 + ((from >> 20) & 0xffc));
244 poff = (from >> PAGE_SHIFT) & 0x3ff;
245 if ((pcnt = 1024 - poff) > size)
246 pcnt = size;
247
248 for ( ; size > 0; ++dir, size -= pcnt,
249 pcnt = (size > 1024 ? 1024 : size)) {
250 if (!(page_dir = *dir)) {
251 poff = 0;
252 continue;
253 }
254 if (!(page_dir & PAGE_PRESENT)) {
255 printk("unmap_page_range: bad page directory.");
256 continue;
257 }
258 page_table = (unsigned long *)(0xfffff000 & page_dir);
259 if (poff) {
260 page_table += poff;
261 poff = 0;
262 }
263 for (pc = pcnt; pc--; page_table++) {
264 if ((page = *page_table) != 0) {
265 *page_table = 0;
266 if (1 & page) {
267 --current->rss;
268 free_page(0xfffff000 & page);
269 } else
270 swap_free(page);
271 }
272 }
273 if (pcnt == 1024) {
274 free_page(0xfffff000 & page_dir);
275 *dir = 0;
276 }
277 }
278 invalidate();
279 return 0;
280 }
281
282 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
283 {
284 unsigned long *page_table, *dir;
285 unsigned long poff, pcnt;
286 unsigned long page;
287
288 if (mask) {
289 if ((mask & 0xfffff001) != PAGE_PRESENT) {
290 printk("zeromap_page_range: mask = %08x\n",mask);
291 return -EINVAL;
292 }
293 mask |= ZERO_PAGE;
294 }
295 if (from & 0xfff) {
296 printk("zeromap_page_range: from = %08x\n",from);
297 return -EINVAL;
298 }
299 dir = (unsigned long *) (current->tss.cr3 + ((from >> 20) & 0xffc));
300 size = (size + 0xfff) >> PAGE_SHIFT;
301 poff = (from >> PAGE_SHIFT) & 0x3ff;
302 if ((pcnt = 1024 - poff) > size)
303 pcnt = size;
304
305 while (size > 0) {
306 if (!(PAGE_PRESENT & *dir)) {
307 if (!(page_table = (unsigned long *)get_free_page(GFP_KERNEL))) {
308 invalidate();
309 return -ENOMEM;
310 }
311 if (PAGE_PRESENT & *dir) {
312 free_page((unsigned long) page_table);
313 page_table = (unsigned long *)(0xfffff000 & *dir++);
314 } else
315 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
316 } else
317 page_table = (unsigned long *)(0xfffff000 & *dir++);
318 page_table += poff;
319 poff = 0;
320 for (size -= pcnt; pcnt-- ;) {
321 if ((page = *page_table) != 0) {
322 *page_table = 0;
323 if (page & PAGE_PRESENT) {
324 --current->rss;
325 free_page(0xfffff000 & page);
326 } else
327 swap_free(page);
328 }
329 if (mask)
330 ++current->rss;
331 *page_table++ = mask;
332 }
333 pcnt = (size > 1024 ? 1024 : size);
334 }
335 invalidate();
336 return 0;
337 }
338
339
340
341
342
343
344 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
345 {
346 unsigned long *page_table, *dir;
347 unsigned long poff, pcnt;
348 unsigned long page;
349
350 if (mask) {
351 if ((mask & 0xfffff001) != PAGE_PRESENT) {
352 printk("remap_page_range: mask = %08x\n",mask);
353 return -EINVAL;
354 }
355 }
356 if ((from & 0xfff) || (to & 0xfff)) {
357 printk("remap_page_range: from = %08x, to=%08x\n",from,to);
358 return -EINVAL;
359 }
360 dir = (unsigned long *) (current->tss.cr3 + ((from >> 20) & 0xffc));
361 size = (size + 0xfff) >> PAGE_SHIFT;
362 poff = (from >> PAGE_SHIFT) & 0x3ff;
363 if ((pcnt = 1024 - poff) > size)
364 pcnt = size;
365
366 while (size > 0) {
367 if (!(PAGE_PRESENT & *dir)) {
368 if (!(page_table = (unsigned long *)get_free_page(GFP_KERNEL))) {
369 invalidate();
370 return -1;
371 }
372 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
373 }
374 else
375 page_table = (unsigned long *)(0xfffff000 & *dir++);
376 if (poff) {
377 page_table += poff;
378 poff = 0;
379 }
380
381 for (size -= pcnt; pcnt-- ;) {
382 if ((page = *page_table) != 0) {
383 *page_table = 0;
384 if (PAGE_PRESENT & page) {
385 --current->rss;
386 free_page(0xfffff000 & page);
387 } else
388 swap_free(page);
389 }
390
391
392
393
394
395
396
397
398 if (!mask || to >= high_memory || !mem_map[MAP_NR(to)])
399 *page_table++ = 0;
400 else {
401 ++current->rss;
402 *page_table++ = (to | mask);
403 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED))
404 mem_map[MAP_NR(to)]++;
405 }
406 to += PAGE_SIZE;
407 }
408 pcnt = (size > 1024 ? 1024 : size);
409 }
410 invalidate();
411 return 0;
412 }
413
414
415
416
417
418
419
420
421 static unsigned long put_page(struct task_struct * tsk,unsigned long page,
422 unsigned long address,int prot)
423 {
424 unsigned long tmp, *page_table;
425
426
427
428 if ((prot & 0xfffff001) != PAGE_PRESENT)
429 printk("put_page: prot = %08x\n",prot);
430 if (page >= high_memory) {
431 printk("put_page: trying to put page %p at %p\n",page,address);
432 return 0;
433 }
434 tmp = mem_map[MAP_NR(page)];
435 if (!(tmp & MAP_PAGE_RESERVED) && (tmp != 1)) {
436 printk("put_page: mem_map disagrees with %p at %p\n",page,address);
437 return 0;
438 }
439 page_table = (unsigned long *) (tsk->tss.cr3 + ((address>>20) & 0xffc));
440 if ((*page_table) & PAGE_PRESENT)
441 page_table = (unsigned long *) (0xfffff000 & *page_table);
442 else {
443 printk("put_page: bad page directory entry\n");
444 oom(tsk);
445 *page_table = BAD_PAGETABLE | PAGE_TABLE;
446 return 0;
447 }
448 page_table += (address >> PAGE_SHIFT) & 0x3ff;
449 if (*page_table) {
450 printk("put_page: page already exists\n");
451 *page_table = 0;
452 invalidate();
453 }
454 *page_table = page | prot;
455
456 return page;
457 }
458
459
460
461
462
463
464
465 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
466 {
467 unsigned long tmp, *page_table;
468
469 if (page >= high_memory)
470 printk("put_dirty_page: trying to put page %p at %p\n",page,address);
471 if (mem_map[MAP_NR(page)] != 1)
472 printk("mem_map disagrees with %p at %p\n",page,address);
473 page_table = (unsigned long *) (tsk->tss.cr3 + ((address>>20) & 0xffc));
474 if ((*page_table)&PAGE_PRESENT)
475 page_table = (unsigned long *) (0xfffff000 & *page_table);
476 else {
477 if (!(tmp=get_free_page(GFP_KERNEL)))
478 return 0;
479 *page_table = tmp | PAGE_TABLE;
480 page_table = (unsigned long *) tmp;
481 }
482 page_table += (address >> PAGE_SHIFT) & 0x3ff;
483 if (*page_table) {
484 printk("put_dirty_page: page already exists\n");
485 *page_table = 0;
486 invalidate();
487 }
488 *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
489
490 return page;
491 }
492
493
494
495
496
497
498 void do_wp_page(unsigned long error_code, unsigned long address,
499 struct task_struct * tsk, unsigned long user_esp)
500 {
501 unsigned long pde, pte, old_page, prot;
502 unsigned long new_page;
503
504 new_page = __get_free_page(GFP_KERNEL);
505 pde = tsk->tss.cr3 + ((address>>20) & 0xffc);
506 pte = *(unsigned long *) pde;
507 if (!(pte & PAGE_PRESENT)) {
508 if (new_page)
509 free_page(new_page);
510 return;
511 }
512 if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory) {
513 printk("do_wp_page: bogus page-table at address %08x (%08x)\n",address,pte);
514 *(unsigned long *) pde = BAD_PAGETABLE | PAGE_TABLE;
515 send_sig(SIGKILL, tsk, 1);
516 if (new_page)
517 free_page(new_page);
518 return;
519 }
520 pte &= 0xfffff000;
521 pte += (address>>10) & 0xffc;
522 old_page = *(unsigned long *) pte;
523 if (!(old_page & PAGE_PRESENT)) {
524 if (new_page)
525 free_page(new_page);
526 return;
527 }
528 if (old_page >= high_memory) {
529 printk("do_wp_page: bogus page at address %08x (%08x)\n",address,old_page);
530 *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
531 send_sig(SIGKILL, tsk, 1);
532 if (new_page)
533 free_page(new_page);
534 return;
535 }
536 if (old_page & PAGE_RW) {
537 if (new_page)
538 free_page(new_page);
539 return;
540 }
541 if (!(old_page & PAGE_COW)) {
542 if (user_esp && tsk == current)
543 send_sig(SIGSEGV, tsk, 1);
544 }
545 tsk->min_flt++;
546 prot = (old_page & 0x00000fff) | PAGE_RW;
547 old_page &= 0xfffff000;
548 if (mem_map[MAP_NR(old_page)]==1) {
549 *(unsigned long *) pte |= 2;
550 invalidate();
551 if (new_page)
552 free_page(new_page);
553 return;
554 }
555 if (new_page)
556 copy_page(old_page,new_page);
557 else {
558 new_page = BAD_PAGE;
559 oom(tsk);
560 }
561 *(unsigned long *) pte = new_page | prot;
562 free_page(old_page);
563 invalidate();
564 }
565
566 void write_verify(unsigned long address)
567 {
568 if (address < TASK_SIZE)
569 do_wp_page(1,address,current,0);
570 }
571
572 static void get_empty_page(struct task_struct * tsk, unsigned long address)
573 {
574 unsigned long tmp;
575
576 tmp = get_free_page(GFP_KERNEL);
577 if (!tmp) {
578 oom(tsk);
579 tmp = BAD_PAGE;
580 }
581 if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
582 free_page(tmp);
583 }
584
585
586
587
588
589
590
591
592
593 static int try_to_share(unsigned long address, struct task_struct * tsk,
594 struct task_struct * p, unsigned long error_code, unsigned long newpage)
595 {
596 unsigned long from;
597 unsigned long to;
598 unsigned long from_page;
599 unsigned long to_page;
600
601 from_page = p->tss.cr3 + ((address>>20) & 0xffc);
602 to_page = tsk->tss.cr3 + ((address>>20) & 0xffc);
603
604 from = *(unsigned long *) from_page;
605 if (!(from & PAGE_PRESENT))
606 return 0;
607 from &= 0xfffff000;
608 from_page = from + ((address>>10) & 0xffc);
609 from = *(unsigned long *) from_page;
610
611 if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
612 return 0;
613 if (from >= high_memory)
614 return 0;
615 if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
616 return 0;
617
618 to = *(unsigned long *) to_page;
619 if (!(to & PAGE_PRESENT))
620 return 0;
621 to &= 0xfffff000;
622 to_page = to + ((address>>10) & 0xffc);
623 if (*(unsigned long *) to_page)
624 return 0;
625
626 if (error_code & PAGE_RW) {
627 copy_page((from & 0xfffff000),newpage);
628 to = newpage | PAGE_PRIVATE;
629 } else {
630 mem_map[MAP_NR(from)]++;
631 from &= ~PAGE_RW;
632 to = from;
633 free_page(newpage);
634 }
635 *(unsigned long *) from_page = from;
636 *(unsigned long *) to_page = to;
637 invalidate();
638 return 1;
639 }
640
641
642
643
644
645
646
647
648
649 static int share_page(struct task_struct * tsk, struct inode * inode,
650 unsigned long address, unsigned long error_code, unsigned long newpage)
651 {
652 struct task_struct ** p;
653 int i;
654
655 if (!inode || inode->i_count < 2)
656 return 0;
657 for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
658 if (!*p)
659 continue;
660 if (tsk == *p)
661 continue;
662 if (inode != (*p)->executable) {
663 for (i=0; i < (*p)->numlibraries; i++)
664 if (inode == (*p)->libraries[i].library)
665 break;
666 if (i >= (*p)->numlibraries)
667 continue;
668 }
669 if (try_to_share(address,tsk,*p,error_code,newpage))
670 return 1;
671 }
672 return 0;
673 }
674
675
676
677
678 static unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
679 {
680 unsigned long page = 0;
681 unsigned long *p;
682 repeat:
683 p = (unsigned long *) (tsk->tss.cr3 + ((address >> 20) & 0xffc));
684 if (PAGE_PRESENT & *p) {
685 free_page(page);
686 return *p;
687 }
688 if (*p) {
689 printk("get_empty_pgtable: bad page-directory entry \n");
690 *p = 0;
691 }
692 if (page) {
693 *p = page | PAGE_TABLE;
694 return *p;
695 }
696 if ((page = get_free_page(GFP_KERNEL)) != 0)
697 goto repeat;
698 oom(current);
699 *p = BAD_PAGETABLE | PAGE_TABLE;
700 return 0;
701 }
702
703 void do_no_page(unsigned long error_code, unsigned long address,
704 struct task_struct *tsk, unsigned long user_esp)
705 {
706 int nr[8], prot;
707 unsigned long tmp;
708 unsigned long page;
709 unsigned int block,i;
710 struct inode * inode;
711
712 page = get_empty_pgtable(tsk,address);
713 if (!page)
714 return;
715 page &= 0xfffff000;
716 page += (address >> 10) & 0xffc;
717 tmp = *(unsigned long *) page;
718 if (tmp & PAGE_PRESENT)
719 return;
720 ++tsk->rss;
721 if (tmp) {
722 ++tsk->maj_flt;
723 swap_in((unsigned long *) page);
724 return;
725 }
726 address &= 0xfffff000;
727 inode = NULL;
728 block = 0;
729 if (address < tsk->end_data) {
730 inode = tsk->executable;
731 block = 1 + address / BLOCK_SIZE;
732 } else {
733 i = tsk->numlibraries;
734 while (i-- > 0) {
735 if (address < tsk->libraries[i].start)
736 continue;
737 block = address - tsk->libraries[i].start;
738 if (block >= tsk->libraries[i].length + tsk->libraries[i].bss)
739 continue;
740 inode = tsk->libraries[i].library;
741 if (block < tsk->libraries[i].length)
742 block = 1 + block / BLOCK_SIZE;
743 else
744 block = 0;
745 break;
746 }
747 }
748 if (!inode) {
749 ++tsk->min_flt;
750 get_empty_page(tsk,address);
751 if (tsk != current)
752 return;
753 if (address < tsk->brk)
754 return;
755 if (address+8192 >= (user_esp & 0xfffff000))
756 return;
757 send_sig(SIGSEGV,tsk,1);
758 return;
759 }
760 page = get_free_page(GFP_KERNEL);
761 if (share_page(tsk,inode,address,error_code,page)) {
762 ++tsk->min_flt;
763 return;
764 }
765 ++tsk->maj_flt;
766 if (!page) {
767 oom(current);
768 put_page(tsk,BAD_PAGE,address,PAGE_PRIVATE);
769 return;
770 }
771 prot = PAGE_PRIVATE;
772 if (CODE_SPACE(address, tsk))
773 prot = PAGE_READONLY;
774 if (block) {
775 for (i=0 ; i<4 ; block++,i++)
776 nr[i] = bmap(inode,block);
777 page = bread_page(page,inode->i_dev,nr,1024,prot);
778 }
779 if (!(error_code & PAGE_RW) && share_page(tsk,inode,address, error_code,page))
780 return;
781 i = address + PAGE_SIZE - tsk->end_data;
782 if (i > PAGE_SIZE-1)
783 i = 0;
784 tmp = page + PAGE_SIZE;
785 while (i--) {
786 tmp--;
787 *(char *)tmp = 0;
788 }
789 if (put_page(tsk,page,address,prot))
790 return;
791 free_page(page);
792 oom(current);
793 }
794
795
796
797
798
799
800 void do_page_fault(unsigned long *esp, unsigned long error_code)
801 {
802 unsigned long address;
803 unsigned long user_esp = 0;
804 unsigned long stack_limit;
805 unsigned int bit;
806 extern void die_if_kernel(char *,long,long);
807
808
809 __asm__("movl %%cr2,%0":"=r" (address));
810 if (address < TASK_SIZE) {
811 if (error_code & 4) {
812 if (esp[2] & VM_MASK) {
813 bit = (address - 0xA0000) >> PAGE_SHIFT;
814 if (bit < 32)
815 current->screen_bitmap |= 1 << bit;
816 } else
817 user_esp = esp[3];
818 }
819 if (error_code & 1)
820 do_wp_page(error_code, address, current, user_esp);
821 else
822 do_no_page(error_code, address, current, user_esp);
823 if (!user_esp)
824 return;
825 stack_limit = current->rlim[RLIMIT_STACK].rlim_cur;
826 if (stack_limit >= RLIM_INFINITY)
827 return;
828 if (stack_limit >= current->start_stack)
829 return;
830 stack_limit = current->start_stack - stack_limit;
831 if (user_esp < stack_limit)
832 send_sig(SIGSEGV, current, 1);
833 return;
834 }
835 printk("Unable to handle kernel paging request at address %08x\n",address);
836 die_if_kernel("Oops",(long)esp,error_code);
837 do_exit(SIGKILL);
838 }
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853 unsigned long __bad_pagetable(void)
854 {
855 extern char empty_bad_page_table[PAGE_SIZE];
856
857 __asm__ __volatile__("cld ; rep ; stosl"
858 ::"a" (BAD_PAGE + PAGE_TABLE),
859 "D" ((long) empty_bad_page_table),
860 "c" (1024)
861 :"di","cx");
862 return (unsigned long) empty_bad_page_table;
863 }
864
865 unsigned long __bad_page(void)
866 {
867 extern char empty_bad_page[PAGE_SIZE];
868
869 __asm__ __volatile__("cld ; rep ; stosl"
870 ::"a" (0),
871 "D" ((long) empty_bad_page),
872 "c" (1024)
873 :"di","cx");
874 return (unsigned long) empty_bad_page;
875 }
876
877 unsigned long __zero_page(void)
878 {
879 extern char empty_zero_page[PAGE_SIZE];
880
881 __asm__ __volatile__("cld ; rep ; stosl"
882 ::"a" (0),
883 "D" ((long) empty_zero_page),
884 "c" (1024)
885 :"di","cx");
886 return (unsigned long) empty_zero_page;
887 }
888
889 void show_mem(void)
890 {
891 int i,free = 0,total = 0,reserved = 0;
892 int shared = 0;
893
894 printk("Mem-info:\n");
895 printk("Free pages: %6d\n",nr_free_pages);
896 printk("Secondary pages: %6d\n",nr_secondary_pages);
897 printk("Buffer heads: %6d\n",nr_buffer_heads);
898 printk("Buffer blocks: %6d\n",nr_buffers);
899 i = high_memory >> PAGE_SHIFT;
900 while (i-- > 0) {
901 total++;
902 if (mem_map[i] & MAP_PAGE_RESERVED)
903 reserved++;
904 else if (!mem_map[i])
905 free++;
906 else
907 shared += mem_map[i]-1;
908 }
909 printk("%d pages of RAM\n",total);
910 printk("%d free pages\n",free);
911 printk("%d reserved pages\n",reserved);
912 printk("%d pages shared\n",shared);
913 }
914
915
916
917
918
919
920
921
922 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
923 {
924 unsigned long * pg_dir;
925 unsigned long * pg_table;
926 unsigned long tmp;
927 unsigned long address;
928
929
930
931
932
933
934 memset((void *) 0, 0, 4096);
935 start_mem += 4095;
936 start_mem &= 0xfffff000;
937 address = 0;
938 pg_dir = swapper_pg_dir + 768;
939 while (address < end_mem) {
940 tmp = *pg_dir;
941 if (!tmp) {
942 tmp = start_mem;
943 *pg_dir = tmp | PAGE_TABLE;
944 start_mem += 4096;
945 }
946 pg_dir++;
947 pg_table = (unsigned long *) (tmp & 0xfffff000);
948 for (tmp = 0 ; tmp < 1024 ; tmp++,pg_table++) {
949 if (address && address < end_mem)
950 *pg_table = address | PAGE_SHARED;
951 else
952 *pg_table = 0;
953 address += 4096;
954 }
955 }
956 invalidate();
957 return start_mem;
958 }
959
960 void mem_init(unsigned long start_low_mem,
961 unsigned long start_mem, unsigned long end_mem)
962 {
963 int codepages = 0;
964 int reservedpages = 0;
965 int datapages = 0;
966 unsigned long tmp;
967 unsigned short * p;
968 extern int etext;
969
970 cli();
971 end_mem &= 0xfffff000;
972 high_memory = end_mem;
973 start_mem += 0x0000000f;
974 start_mem &= 0xfffffff0;
975 tmp = MAP_NR(end_mem);
976 mem_map = (unsigned short *) start_mem;
977 p = mem_map + tmp;
978 start_mem = (unsigned long) p;
979 while (p > mem_map)
980 *--p = MAP_PAGE_RESERVED;
981 start_low_mem += 0x00000fff;
982 start_low_mem &= 0xfffff000;
983 start_mem += 0x00000fff;
984 start_mem &= 0xfffff000;
985 while (start_low_mem < 0xA0000) {
986 mem_map[MAP_NR(start_low_mem)] = 0;
987 start_low_mem += 4096;
988 }
989 while (start_mem < end_mem) {
990 mem_map[MAP_NR(start_mem)] = 0;
991 start_mem += 4096;
992 }
993 sound_mem_init();
994 free_page_list = 0;
995 nr_free_pages = 0;
996 for (tmp = 0 ; tmp < end_mem ; tmp += 4096) {
997 if (mem_map[MAP_NR(tmp)]) {
998 if (tmp >= 0xA0000 && tmp < 0x100000)
999 reservedpages++;
1000 else if (tmp < (unsigned long) &etext)
1001 codepages++;
1002 else
1003 datapages++;
1004 continue;
1005 }
1006 *(unsigned long *) tmp = free_page_list;
1007 free_page_list = tmp;
1008 nr_free_pages++;
1009 }
1010 tmp = nr_free_pages << PAGE_SHIFT;
1011 printk("Memory: %dk/%dk available (%dk kernel code, %dk reserved, %dk data)\n",
1012 tmp >> 10,
1013 end_mem >> 10,
1014 codepages << 2,
1015 reservedpages << 2,
1016 datapages << 2);
1017 return;
1018 }
1019
1020 void si_meminfo(struct sysinfo *val)
1021 {
1022 int i;
1023
1024 i = high_memory >> PAGE_SHIFT;
1025 val->totalram = 0;
1026 val->freeram = 0;
1027 val->sharedram = 0;
1028 val->bufferram = buffermem;
1029 while (i-- > 0) {
1030 if (mem_map[i] & MAP_PAGE_RESERVED)
1031 continue;
1032 val->totalram++;
1033 if (!mem_map[i]) {
1034 val->freeram++;
1035 continue;
1036 }
1037 val->sharedram += mem_map[i]-1;
1038 }
1039 val->totalram <<= PAGE_SHIFT;
1040 val->freeram <<= PAGE_SHIFT;
1041 val->sharedram <<= PAGE_SHIFT;
1042 return;
1043 }