This source file includes following definitions.
- oom
- free_one_table
- clear_page_tables
- free_page_tables
- clone_page_tables
- copy_page_tables
- unmap_page_range
- zeromap_page_range
- remap_page_range
- put_page
- put_dirty_page
- __do_wp_page
- do_wp_page
- __verify_write
- get_empty_page
- try_to_share
- share_page
- get_empty_pgtable
- do_no_page
- do_page_fault
- __bad_pagetable
- __bad_page
- __zero_page
- show_mem
- paging_init
- mem_init
- si_meminfo
- file_mmap_nopage
- file_mmap_free
- file_mmap_share
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <asm/system.h>
37 #include <linux/config.h>
38
39 #include <linux/signal.h>
40 #include <linux/sched.h>
41 #include <linux/head.h>
42 #include <linux/kernel.h>
43 #include <linux/errno.h>
44 #include <linux/string.h>
45 #include <linux/types.h>
46 #include <linux/ptrace.h>
47 #include <linux/mman.h>
48
49 unsigned long high_memory = 0;
50
51 extern unsigned long pg0[1024];
52
53 extern void sound_mem_init(void);
54 extern void die_if_kernel(char *,struct pt_regs *,long);
55
56
57
58
59
60 int nr_swap_pages = 0;
61 int nr_free_pages = 0;
62 struct mem_list free_area_list[NR_MEM_LISTS];
63 unsigned char * free_area_map[NR_MEM_LISTS];
64
65 #define copy_page(from,to) \
66 __asm__("cld ; rep ; movsl": :"S" (from),"D" (to),"c" (1024):"cx","di","si")
67
68 unsigned short * mem_map = NULL;
69
70 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
71
72
73
74
75
76 void oom(struct task_struct * task)
77 {
78 printk("\nout of memory\n");
79 task->sigaction[SIGKILL-1].sa_handler = NULL;
80 task->blocked &= ~(1<<(SIGKILL-1));
81 send_sig(SIGKILL,task,1);
82 }
83
84 static void free_one_table(unsigned long * page_dir)
85 {
86 int j;
87 unsigned long pg_table = *page_dir;
88 unsigned long * page_table;
89
90 if (!pg_table)
91 return;
92 *page_dir = 0;
93 if (pg_table >= high_memory || !(pg_table & PAGE_PRESENT)) {
94 printk("Bad page table: [%p]=%08lx\n",page_dir,pg_table);
95 return;
96 }
97 if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
98 return;
99 page_table = (unsigned long *) (pg_table & PAGE_MASK);
100 for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++) {
101 unsigned long pg = *page_table;
102
103 if (!pg)
104 continue;
105 *page_table = 0;
106 if (pg & PAGE_PRESENT)
107 free_page(PAGE_MASK & pg);
108 else
109 swap_free(pg);
110 }
111 free_page(PAGE_MASK & pg_table);
112 }
113
114
115
116
117
118
119
120
121 void clear_page_tables(struct task_struct * tsk)
122 {
123 int i;
124 unsigned long pg_dir;
125 unsigned long * page_dir;
126
127 if (!tsk)
128 return;
129 if (tsk == task[0])
130 panic("task[0] (swapper) doesn't support exec()\n");
131 pg_dir = tsk->tss.cr3;
132 page_dir = (unsigned long *) pg_dir;
133 if (!page_dir || page_dir == swapper_pg_dir) {
134 printk("Trying to clear kernel page-directory: not good\n");
135 return;
136 }
137 if (mem_map[MAP_NR(pg_dir)] > 1) {
138 unsigned long * new_pg;
139
140 if (!(new_pg = (unsigned long*) get_free_page(GFP_KERNEL))) {
141 oom(tsk);
142 return;
143 }
144 for (i = 768 ; i < 1024 ; i++)
145 new_pg[i] = page_dir[i];
146 free_page(pg_dir);
147 tsk->tss.cr3 = (unsigned long) new_pg;
148 return;
149 }
150 for (i = 0 ; i < 768 ; i++,page_dir++)
151 free_one_table(page_dir);
152 invalidate();
153 return;
154 }
155
156
157
158
159 void free_page_tables(struct task_struct * tsk)
160 {
161 int i;
162 unsigned long pg_dir;
163 unsigned long * page_dir;
164
165 if (!tsk)
166 return;
167 if (tsk == task[0]) {
168 printk("task[0] (swapper) killed: unable to recover\n");
169 panic("Trying to free up swapper memory space");
170 }
171 pg_dir = tsk->tss.cr3;
172 if (!pg_dir || pg_dir == (unsigned long) swapper_pg_dir) {
173 printk("Trying to free kernel page-directory: not good\n");
174 return;
175 }
176 tsk->tss.cr3 = (unsigned long) swapper_pg_dir;
177 if (tsk == current)
178 __asm__ __volatile__("movl %0,%%cr3": :"a" (tsk->tss.cr3));
179 if (mem_map[MAP_NR(pg_dir)] > 1) {
180 free_page(pg_dir);
181 return;
182 }
183 page_dir = (unsigned long *) pg_dir;
184 for (i = 0 ; i < PTRS_PER_PAGE ; i++,page_dir++)
185 free_one_table(page_dir);
186 free_page(pg_dir);
187 invalidate();
188 }
189
190
191
192
193
194
195
196 int clone_page_tables(struct task_struct * tsk)
197 {
198 unsigned long pg_dir;
199
200 pg_dir = current->tss.cr3;
201 mem_map[MAP_NR(pg_dir)]++;
202 tsk->tss.cr3 = pg_dir;
203 return 0;
204 }
205
206
207
208
209
210
211 int copy_page_tables(struct task_struct * tsk)
212 {
213 int i;
214 unsigned long old_pg_dir, *old_page_dir;
215 unsigned long new_pg_dir, *new_page_dir;
216
217 if (!(new_pg_dir = get_free_page(GFP_KERNEL)))
218 return -ENOMEM;
219 old_pg_dir = current->tss.cr3;
220 tsk->tss.cr3 = new_pg_dir;
221 old_page_dir = (unsigned long *) old_pg_dir;
222 new_page_dir = (unsigned long *) new_pg_dir;
223 for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
224 int j;
225 unsigned long old_pg_table, *old_page_table;
226 unsigned long new_pg_table, *new_page_table;
227
228 old_pg_table = *old_page_dir;
229 if (!old_pg_table)
230 continue;
231 if (old_pg_table >= high_memory || !(old_pg_table & PAGE_PRESENT)) {
232 printk("copy_page_tables: bad page table: "
233 "probable memory corruption");
234 *old_page_dir = 0;
235 continue;
236 }
237 if (mem_map[MAP_NR(old_pg_table)] & MAP_PAGE_RESERVED) {
238 *new_page_dir = old_pg_table;
239 continue;
240 }
241 if (!(new_pg_table = get_free_page(GFP_KERNEL))) {
242 free_page_tables(tsk);
243 return -ENOMEM;
244 }
245 old_page_table = (unsigned long *) (PAGE_MASK & old_pg_table);
246 new_page_table = (unsigned long *) (PAGE_MASK & new_pg_table);
247 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
248 unsigned long pg;
249 pg = *old_page_table;
250 if (!pg)
251 continue;
252 if (!(pg & PAGE_PRESENT)) {
253 *new_page_table = swap_duplicate(pg);
254 continue;
255 }
256 if ((pg & (PAGE_RW | PAGE_COW)) == (PAGE_RW | PAGE_COW))
257 pg &= ~PAGE_RW;
258 *new_page_table = pg;
259 if (mem_map[MAP_NR(pg)] & MAP_PAGE_RESERVED)
260 continue;
261 *old_page_table = pg;
262 mem_map[MAP_NR(pg)]++;
263 }
264 *new_page_dir = new_pg_table | PAGE_TABLE;
265 }
266 invalidate();
267 return 0;
268 }
269
270
271
272
273
274 int unmap_page_range(unsigned long from, unsigned long size)
275 {
276 unsigned long page, page_dir;
277 unsigned long *page_table, *dir;
278 unsigned long poff, pcnt, pc;
279
280 if (from & ~PAGE_MASK) {
281 printk("unmap_page_range called with wrong alignment\n");
282 return -EINVAL;
283 }
284 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
285 dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
286 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
287 if ((pcnt = PTRS_PER_PAGE - poff) > size)
288 pcnt = size;
289
290 for ( ; size > 0; ++dir, size -= pcnt,
291 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
292 if (!(page_dir = *dir)) {
293 poff = 0;
294 continue;
295 }
296 if (!(page_dir & PAGE_PRESENT)) {
297 printk("unmap_page_range: bad page directory.");
298 continue;
299 }
300 page_table = (unsigned long *)(PAGE_MASK & page_dir);
301 if (poff) {
302 page_table += poff;
303 poff = 0;
304 }
305 for (pc = pcnt; pc--; page_table++) {
306 if ((page = *page_table) != 0) {
307 *page_table = 0;
308 if (1 & page) {
309 if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
310 if (current->rss > 0)
311 --current->rss;
312 free_page(PAGE_MASK & page);
313 } else
314 swap_free(page);
315 }
316 }
317 if (pcnt == PTRS_PER_PAGE) {
318 *dir = 0;
319 free_page(PAGE_MASK & page_dir);
320 }
321 }
322 invalidate();
323 return 0;
324 }
325
326 int zeromap_page_range(unsigned long from, unsigned long size, int mask)
327 {
328 unsigned long *page_table, *dir;
329 unsigned long poff, pcnt;
330 unsigned long page;
331
332 if (mask) {
333 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
334 printk("zeromap_page_range: mask = %08x\n",mask);
335 return -EINVAL;
336 }
337 mask |= ZERO_PAGE;
338 }
339 if (from & ~PAGE_MASK) {
340 printk("zeromap_page_range: from = %08lx\n",from);
341 return -EINVAL;
342 }
343 dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
344 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
345 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
346 if ((pcnt = PTRS_PER_PAGE - poff) > size)
347 pcnt = size;
348
349 while (size > 0) {
350 if (!(PAGE_PRESENT & *dir)) {
351
352 if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
353 invalidate();
354 return -ENOMEM;
355 }
356 if (PAGE_PRESENT & *dir) {
357 free_page((unsigned long) page_table);
358 page_table = (unsigned long *)(PAGE_MASK & *dir++);
359 } else
360 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
361 } else
362 page_table = (unsigned long *)(PAGE_MASK & *dir++);
363 page_table += poff;
364 poff = 0;
365 for (size -= pcnt; pcnt-- ;) {
366 if ((page = *page_table) != 0) {
367 *page_table = 0;
368 if (page & PAGE_PRESENT) {
369 if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
370 if (current->rss > 0)
371 --current->rss;
372 free_page(PAGE_MASK & page);
373 } else
374 swap_free(page);
375 }
376 *page_table++ = mask;
377 }
378 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
379 }
380 invalidate();
381 return 0;
382 }
383
384
385
386
387
388
389 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask)
390 {
391 unsigned long *page_table, *dir;
392 unsigned long poff, pcnt;
393 unsigned long page;
394
395 if (mask) {
396 if ((mask & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT) {
397 printk("remap_page_range: mask = %08x\n",mask);
398 return -EINVAL;
399 }
400 }
401 if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
402 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
403 return -EINVAL;
404 }
405 dir = PAGE_DIR_OFFSET(current->tss.cr3,from);
406 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
407 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
408 if ((pcnt = PTRS_PER_PAGE - poff) > size)
409 pcnt = size;
410
411 while (size > 0) {
412 if (!(PAGE_PRESENT & *dir)) {
413
414 if (!(page_table = (unsigned long*) get_free_page(GFP_KERNEL))) {
415 invalidate();
416 return -1;
417 }
418 *dir++ = ((unsigned long) page_table) | PAGE_TABLE;
419 }
420 else
421 page_table = (unsigned long *)(PAGE_MASK & *dir++);
422 if (poff) {
423 page_table += poff;
424 poff = 0;
425 }
426
427 for (size -= pcnt; pcnt-- ;) {
428 if ((page = *page_table) != 0) {
429 *page_table = 0;
430 if (PAGE_PRESENT & page) {
431 if (!(mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED))
432 if (current->rss > 0)
433 --current->rss;
434 free_page(PAGE_MASK & page);
435 } else
436 swap_free(page);
437 }
438
439
440
441
442
443
444
445 if (!mask)
446 *page_table++ = 0;
447 else if (to >= high_memory)
448 *page_table++ = (to | mask);
449 else if (!mem_map[MAP_NR(to)])
450 *page_table++ = 0;
451 else {
452 *page_table++ = (to | mask);
453 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
454 ++current->rss;
455 mem_map[MAP_NR(to)]++;
456 }
457 }
458 to += PAGE_SIZE;
459 }
460 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
461 }
462 invalidate();
463 return 0;
464 }
465
466
467
468
469
470
471
472 unsigned long put_page(struct task_struct * tsk,unsigned long page,
473 unsigned long address,int prot)
474 {
475 unsigned long *page_table;
476
477 if ((prot & (PAGE_MASK|PAGE_PRESENT)) != PAGE_PRESENT)
478 printk("put_page: prot = %08x\n",prot);
479 if (page >= high_memory) {
480 printk("put_page: trying to put page %08lx at %08lx\n",page,address);
481 return 0;
482 }
483 page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
484 if ((*page_table) & PAGE_PRESENT)
485 page_table = (unsigned long *) (PAGE_MASK & *page_table);
486 else {
487 printk("put_page: bad page directory entry\n");
488 oom(tsk);
489 *page_table = BAD_PAGETABLE | PAGE_TABLE;
490 return 0;
491 }
492 page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
493 if (*page_table) {
494 printk("put_page: page already exists\n");
495 *page_table = 0;
496 invalidate();
497 }
498 *page_table = page | prot;
499
500 return page;
501 }
502
503
504
505
506
507
508
509 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
510 {
511 unsigned long tmp, *page_table;
512
513 if (page >= high_memory)
514 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
515 if (mem_map[MAP_NR(page)] != 1)
516 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
517 page_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
518 if (PAGE_PRESENT & *page_table)
519 page_table = (unsigned long *) (PAGE_MASK & *page_table);
520 else {
521 if (!(tmp = get_free_page(GFP_KERNEL)))
522 return 0;
523 if (PAGE_PRESENT & *page_table) {
524 free_page(tmp);
525 page_table = (unsigned long *) (PAGE_MASK & *page_table);
526 } else {
527 *page_table = tmp | PAGE_TABLE;
528 page_table = (unsigned long *) tmp;
529 }
530 }
531 page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
532 if (*page_table) {
533 printk("put_dirty_page: page already exists\n");
534 *page_table = 0;
535 invalidate();
536 }
537 *page_table = page | (PAGE_DIRTY | PAGE_PRIVATE);
538
539 return page;
540 }
541
542
543
544
545
546
547
548
549
550
551
552
553 static void __do_wp_page(unsigned long error_code, unsigned long address,
554 struct task_struct * tsk, unsigned long user_esp)
555 {
556 unsigned long *pde, pte, old_page, prot;
557 unsigned long new_page;
558
559 new_page = __get_free_page(GFP_KERNEL);
560 pde = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
561 pte = *pde;
562 if (!(pte & PAGE_PRESENT))
563 goto end_wp_page;
564 if ((pte & PAGE_TABLE) != PAGE_TABLE || pte >= high_memory)
565 goto bad_wp_pagetable;
566 pte &= PAGE_MASK;
567 pte += PAGE_PTR(address);
568 old_page = *(unsigned long *) pte;
569 if (!(old_page & PAGE_PRESENT))
570 goto end_wp_page;
571 if (old_page >= high_memory)
572 goto bad_wp_page;
573 if (old_page & PAGE_RW)
574 goto end_wp_page;
575 tsk->min_flt++;
576 prot = (old_page & ~PAGE_MASK) | PAGE_RW;
577 old_page &= PAGE_MASK;
578 if (mem_map[MAP_NR(old_page)] != 1) {
579 if (new_page) {
580 if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
581 ++tsk->rss;
582 copy_page(old_page,new_page);
583 *(unsigned long *) pte = new_page | prot;
584 free_page(old_page);
585 invalidate();
586 return;
587 }
588 free_page(old_page);
589 oom(tsk);
590 *(unsigned long *) pte = BAD_PAGE | prot;
591 invalidate();
592 return;
593 }
594 *(unsigned long *) pte |= PAGE_RW;
595 invalidate();
596 if (new_page)
597 free_page(new_page);
598 return;
599 bad_wp_page:
600 printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
601 *(unsigned long *) pte = BAD_PAGE | PAGE_SHARED;
602 send_sig(SIGKILL, tsk, 1);
603 goto end_wp_page;
604 bad_wp_pagetable:
605 printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n",address,pte);
606 *pde = BAD_PAGETABLE | PAGE_TABLE;
607 send_sig(SIGKILL, tsk, 1);
608 end_wp_page:
609 if (new_page)
610 free_page(new_page);
611 return;
612 }
613
614
615
616
617
618 void do_wp_page(unsigned long error_code, unsigned long address,
619 struct task_struct * tsk, unsigned long user_esp)
620 {
621 unsigned long page;
622 unsigned long * pg_table;
623
624 pg_table = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
625 page = *pg_table;
626 if (!page)
627 return;
628 if ((page & PAGE_PRESENT) && page < high_memory) {
629 pg_table = (unsigned long *) ((page & PAGE_MASK) + PAGE_PTR(address));
630 page = *pg_table;
631 if (!(page & PAGE_PRESENT))
632 return;
633 if (page & PAGE_RW)
634 return;
635 if (!(page & PAGE_COW)) {
636 if (user_esp && tsk == current) {
637 current->tss.cr2 = address;
638 current->tss.error_code = error_code;
639 current->tss.trap_no = 14;
640 send_sig(SIGSEGV, tsk, 1);
641 return;
642 }
643 }
644 if (mem_map[MAP_NR(page)] == 1) {
645 *pg_table |= PAGE_RW | PAGE_DIRTY;
646 invalidate();
647 return;
648 }
649 __do_wp_page(error_code, address, tsk, user_esp);
650 return;
651 }
652 printk("bad page directory entry %08lx\n",page);
653 *pg_table = 0;
654 }
655
656 int __verify_write(unsigned long start, unsigned long size)
657 {
658 size--;
659 size += start & ~PAGE_MASK;
660 size >>= PAGE_SHIFT;
661 start &= PAGE_MASK;
662 do {
663 do_wp_page(1,start,current,0);
664 start += PAGE_SIZE;
665 } while (size--);
666 return 0;
667 }
668
669 static inline void get_empty_page(struct task_struct * tsk, unsigned long address)
670 {
671 unsigned long tmp;
672
673 if (!(tmp = get_free_page(GFP_KERNEL))) {
674 oom(tsk);
675 tmp = BAD_PAGE;
676 }
677 if (!put_page(tsk,tmp,address,PAGE_PRIVATE))
678 free_page(tmp);
679 }
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 static int try_to_share(unsigned long address, struct task_struct * tsk,
696 struct task_struct * p, unsigned long error_code, unsigned long newpage)
697 {
698 unsigned long from;
699 unsigned long to;
700 unsigned long from_page;
701 unsigned long to_page;
702
703 from_page = (unsigned long)PAGE_DIR_OFFSET(p->tss.cr3,address);
704 to_page = (unsigned long)PAGE_DIR_OFFSET(tsk->tss.cr3,address);
705
706 from = *(unsigned long *) from_page;
707 if (!(from & PAGE_PRESENT))
708 return 0;
709 from &= PAGE_MASK;
710 from_page = from + PAGE_PTR(address);
711 from = *(unsigned long *) from_page;
712
713 if ((from & (PAGE_PRESENT | PAGE_DIRTY)) != PAGE_PRESENT)
714 return 0;
715 if (from >= high_memory)
716 return 0;
717 if (mem_map[MAP_NR(from)] & MAP_PAGE_RESERVED)
718 return 0;
719
720 to = *(unsigned long *) to_page;
721 if (!(to & PAGE_PRESENT))
722 return 0;
723 to &= PAGE_MASK;
724 to_page = to + PAGE_PTR(address);
725 if (*(unsigned long *) to_page)
726 return 0;
727
728 if (error_code & PAGE_RW) {
729 if(!newpage)
730 return 0;
731 copy_page((from & PAGE_MASK),newpage);
732 to = newpage | PAGE_PRIVATE;
733 } else {
734 mem_map[MAP_NR(from)]++;
735 from &= ~PAGE_RW;
736 to = from;
737 if(newpage)
738 free_page(newpage);
739 }
740 *(unsigned long *) from_page = from;
741 *(unsigned long *) to_page = to;
742 invalidate();
743 return 1;
744 }
745
746
747
748
749
750
751
752
753
754 int share_page(struct vm_area_struct * area, struct task_struct * tsk,
755 struct inode * inode,
756 unsigned long address, unsigned long error_code, unsigned long newpage)
757 {
758 struct task_struct ** p;
759
760 if (!inode || inode->i_count < 2 || !area->vm_ops)
761 return 0;
762 for (p = &LAST_TASK ; p > &FIRST_TASK ; --p) {
763 if (!*p)
764 continue;
765 if (tsk == *p)
766 continue;
767 if (inode != (*p)->executable) {
768 if(!area) continue;
769
770
771 if(area){
772 struct vm_area_struct * mpnt;
773 for (mpnt = (*p)->mmap; mpnt; mpnt = mpnt->vm_next) {
774 if (mpnt->vm_ops == area->vm_ops &&
775 mpnt->vm_inode->i_ino == area->vm_inode->i_ino&&
776 mpnt->vm_inode->i_dev == area->vm_inode->i_dev){
777 if (mpnt->vm_ops->share(mpnt, area, address))
778 break;
779 };
780 };
781 if (!mpnt) continue;
782 };
783 }
784 if (try_to_share(address,tsk,*p,error_code,newpage))
785 return 1;
786 }
787 return 0;
788 }
789
790
791
792
793 static inline unsigned long get_empty_pgtable(struct task_struct * tsk,unsigned long address)
794 {
795 unsigned long page;
796 unsigned long *p;
797
798 p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
799 if (PAGE_PRESENT & *p)
800 return *p;
801 if (*p) {
802 printk("get_empty_pgtable: bad page-directory entry \n");
803 *p = 0;
804 }
805 page = get_free_page(GFP_KERNEL);
806 p = PAGE_DIR_OFFSET(tsk->tss.cr3,address);
807 if (PAGE_PRESENT & *p) {
808 free_page(page);
809 return *p;
810 }
811 if (*p) {
812 printk("get_empty_pgtable: bad page-directory entry \n");
813 *p = 0;
814 }
815 if (page) {
816 *p = page | PAGE_TABLE;
817 return *p;
818 }
819 oom(current);
820 *p = BAD_PAGETABLE | PAGE_TABLE;
821 return 0;
822 }
823
824 void do_no_page(unsigned long error_code, unsigned long address,
825 struct task_struct *tsk, unsigned long user_esp)
826 {
827 unsigned long tmp;
828 unsigned long page;
829 struct vm_area_struct * mpnt;
830
831 page = get_empty_pgtable(tsk,address);
832 if (!page)
833 return;
834 page &= PAGE_MASK;
835 page += PAGE_PTR(address);
836 tmp = *(unsigned long *) page;
837 if (tmp & PAGE_PRESENT)
838 return;
839 ++tsk->rss;
840 if (tmp) {
841 ++tsk->maj_flt;
842 swap_in((unsigned long *) page);
843 return;
844 }
845 address &= 0xfffff000;
846 tmp = 0;
847 for (mpnt = tsk->mmap; mpnt != NULL; mpnt = mpnt->vm_next) {
848 if (address < mpnt->vm_start)
849 break;
850 if (address >= mpnt->vm_end) {
851 tmp = mpnt->vm_end;
852 continue;
853 }
854 if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) {
855 ++tsk->min_flt;
856 get_empty_page(tsk,address);
857 return;
858 }
859 mpnt->vm_ops->nopage(error_code, mpnt, address);
860 return;
861 }
862 if (tsk != current)
863 goto ok_no_page;
864 if (address >= tsk->end_data && address < tsk->brk)
865 goto ok_no_page;
866 if (mpnt && mpnt == tsk->stk_vma &&
867 address - tmp > mpnt->vm_start - address &&
868 tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) {
869 mpnt->vm_start = address;
870 goto ok_no_page;
871 }
872 tsk->tss.cr2 = address;
873 current->tss.error_code = error_code;
874 current->tss.trap_no = 14;
875 send_sig(SIGSEGV,tsk,1);
876 if (error_code & 4)
877 return;
878 ok_no_page:
879 ++tsk->min_flt;
880 get_empty_page(tsk,address);
881 }
882
883
884
885
886
887
888 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
889 {
890 unsigned long address;
891 unsigned long user_esp = 0;
892 unsigned int bit;
893
894
895 __asm__("movl %%cr2,%0":"=r" (address));
896 if (address < TASK_SIZE) {
897 if (error_code & 4) {
898 if (regs->eflags & VM_MASK) {
899 bit = (address - 0xA0000) >> PAGE_SHIFT;
900 if (bit < 32)
901 current->screen_bitmap |= 1 << bit;
902 } else
903 user_esp = regs->esp;
904 }
905 if (error_code & PAGE_PRESENT)
906 do_wp_page(error_code, address, current, user_esp);
907 else
908 do_no_page(error_code, address, current, user_esp);
909 return;
910 }
911 address -= TASK_SIZE;
912 if (wp_works_ok < 0 && address == 0 && (error_code & PAGE_PRESENT)) {
913 wp_works_ok = 1;
914 pg0[0] = PAGE_SHARED;
915 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
916 return;
917 }
918 if (address < PAGE_SIZE) {
919 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
920 pg0[0] = PAGE_SHARED;
921 } else
922 printk(KERN_ALERT "Unable to handle kernel paging request");
923 printk(" at kernel address %08lx\n",address);
924 address += TASK_SIZE;
925 __asm__("movl %%cr3,%0" : "=r" (user_esp));
926 printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
927 current->tss.cr3, user_esp);
928 user_esp = ((unsigned long *) user_esp)[address >> 22];
929 printk(KERN_ALERT "*pde = %08lx\n", user_esp);
930 if (user_esp & PAGE_PRESENT) {
931 user_esp &= PAGE_MASK;
932 address &= 0x003ff000;
933 user_esp = ((unsigned long *) user_esp)[address >> PAGE_SHIFT];
934 printk(KERN_ALERT "*pte = %08lx\n", user_esp);
935 }
936 die_if_kernel("Oops", regs, error_code);
937 do_exit(SIGKILL);
938 }
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953 unsigned long __bad_pagetable(void)
954 {
955 extern char empty_bad_page_table[PAGE_SIZE];
956
957 __asm__ __volatile__("cld ; rep ; stosl":
958 :"a" (BAD_PAGE + PAGE_TABLE),
959 "D" ((long) empty_bad_page_table),
960 "c" (PTRS_PER_PAGE)
961 :"di","cx");
962 return (unsigned long) empty_bad_page_table;
963 }
964
965 unsigned long __bad_page(void)
966 {
967 extern char empty_bad_page[PAGE_SIZE];
968
969 __asm__ __volatile__("cld ; rep ; stosl":
970 :"a" (0),
971 "D" ((long) empty_bad_page),
972 "c" (PTRS_PER_PAGE)
973 :"di","cx");
974 return (unsigned long) empty_bad_page;
975 }
976
977 unsigned long __zero_page(void)
978 {
979 extern char empty_zero_page[PAGE_SIZE];
980
981 __asm__ __volatile__("cld ; rep ; stosl":
982 :"a" (0),
983 "D" ((long) empty_zero_page),
984 "c" (PTRS_PER_PAGE)
985 :"di","cx");
986 return (unsigned long) empty_zero_page;
987 }
988
989 void show_mem(void)
990 {
991 int i,free = 0,total = 0,reserved = 0;
992 int shared = 0;
993
994 printk("Mem-info:\n");
995 show_free_areas();
996 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
997 i = high_memory >> PAGE_SHIFT;
998 while (i-- > 0) {
999 total++;
1000 if (mem_map[i] & MAP_PAGE_RESERVED)
1001 reserved++;
1002 else if (!mem_map[i])
1003 free++;
1004 else
1005 shared += mem_map[i]-1;
1006 }
1007 printk("%d pages of RAM\n",total);
1008 printk("%d free pages\n",free);
1009 printk("%d reserved pages\n",reserved);
1010 printk("%d pages shared\n",shared);
1011 show_buffers();
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
1022 {
1023 unsigned long * pg_dir;
1024 unsigned long * pg_table;
1025 unsigned long tmp;
1026 unsigned long address;
1027
1028
1029
1030
1031
1032
1033
1034 #if 0
1035 memset((void *) 0, 0, PAGE_SIZE);
1036 #endif
1037 start_mem = PAGE_ALIGN(start_mem);
1038 address = 0;
1039 pg_dir = swapper_pg_dir;
1040 while (address < end_mem) {
1041 tmp = *(pg_dir + 768);
1042 if (!tmp) {
1043 tmp = start_mem | PAGE_TABLE;
1044 *(pg_dir + 768) = tmp;
1045 start_mem += PAGE_SIZE;
1046 }
1047 *pg_dir = tmp;
1048 pg_dir++;
1049 pg_table = (unsigned long *) (tmp & PAGE_MASK);
1050 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
1051 if (address < end_mem)
1052 *pg_table = address | PAGE_SHARED;
1053 else
1054 *pg_table = 0;
1055 address += PAGE_SIZE;
1056 }
1057 }
1058 invalidate();
1059 return start_mem;
1060 }
1061
1062 void mem_init(unsigned long start_low_mem,
1063 unsigned long start_mem, unsigned long end_mem)
1064 {
1065 int codepages = 0;
1066 int reservedpages = 0;
1067 int datapages = 0;
1068 unsigned long tmp, mask;
1069 unsigned short * p;
1070 extern int etext;
1071
1072 cli();
1073 end_mem &= PAGE_MASK;
1074 high_memory = end_mem;
1075 start_mem += 0x0000000f;
1076 start_mem &= ~0x0000000f;
1077 tmp = MAP_NR(end_mem);
1078 mem_map = (unsigned short *) start_mem;
1079 p = mem_map + tmp;
1080 start_mem = (unsigned long) p;
1081 while (p > mem_map)
1082 *--p = MAP_PAGE_RESERVED;
1083
1084
1085 for (mask = PAGE_MASK, tmp = 0 ; tmp < NR_MEM_LISTS ; tmp++, mask <<= 1) {
1086 unsigned long bitmap_size;
1087 free_area_list[tmp].prev = free_area_list[tmp].next = &free_area_list[tmp];
1088 end_mem = (end_mem + ~mask) & mask;
1089 bitmap_size = end_mem >> (PAGE_SHIFT + tmp);
1090 bitmap_size = (bitmap_size + 7) >> 3;
1091 free_area_map[tmp] = (unsigned char *) start_mem;
1092 memset((void *) start_mem, 0, bitmap_size);
1093 start_mem += bitmap_size;
1094 }
1095
1096
1097 start_low_mem = PAGE_ALIGN(start_low_mem);
1098 start_mem = PAGE_ALIGN(start_mem);
1099
1100
1101
1102
1103
1104
1105 while (start_low_mem < 0x9f000) {
1106 mem_map[MAP_NR(start_low_mem)] = 0;
1107 start_low_mem += PAGE_SIZE;
1108 }
1109
1110 while (start_mem < high_memory) {
1111 mem_map[MAP_NR(start_mem)] = 0;
1112 start_mem += PAGE_SIZE;
1113 }
1114 #ifdef CONFIG_SOUND
1115 sound_mem_init();
1116 #endif
1117 for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
1118 if (mem_map[MAP_NR(tmp)]) {
1119 if (tmp >= 0xA0000 && tmp < 0x100000)
1120 reservedpages++;
1121 else if (tmp < (unsigned long) &etext)
1122 codepages++;
1123 else
1124 datapages++;
1125 continue;
1126 }
1127 mem_map[MAP_NR(tmp)] = 1;
1128 free_page(tmp);
1129 }
1130 tmp = nr_free_pages << PAGE_SHIFT;
1131 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
1132 tmp >> 10,
1133 high_memory >> 10,
1134 codepages << (PAGE_SHIFT-10),
1135 reservedpages << (PAGE_SHIFT-10),
1136 datapages << (PAGE_SHIFT-10));
1137
1138 wp_works_ok = -1;
1139 pg0[0] = PAGE_READONLY;
1140 invalidate();
1141 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
1142 pg0[0] = 0;
1143 invalidate();
1144 if (wp_works_ok < 0)
1145 wp_works_ok = 0;
1146 return;
1147 }
1148
1149 void si_meminfo(struct sysinfo *val)
1150 {
1151 int i;
1152
1153 i = high_memory >> PAGE_SHIFT;
1154 val->totalram = 0;
1155 val->sharedram = 0;
1156 val->freeram = nr_free_pages << PAGE_SHIFT;
1157 val->bufferram = buffermem;
1158 while (i-- > 0) {
1159 if (mem_map[i] & MAP_PAGE_RESERVED)
1160 continue;
1161 val->totalram++;
1162 if (!mem_map[i])
1163 continue;
1164 val->sharedram += mem_map[i]-1;
1165 }
1166 val->totalram <<= PAGE_SHIFT;
1167 val->sharedram <<= PAGE_SHIFT;
1168 return;
1169 }
1170
1171
1172
1173 void file_mmap_nopage(int error_code, struct vm_area_struct * area, unsigned long address)
1174 {
1175 struct inode * inode = area->vm_inode;
1176 unsigned int block;
1177 unsigned long page;
1178 int nr[8];
1179 int i, j;
1180 int prot = area->vm_page_prot;
1181
1182 address &= PAGE_MASK;
1183 block = address - area->vm_start + area->vm_offset;
1184 block >>= inode->i_sb->s_blocksize_bits;
1185
1186 page = get_free_page(GFP_KERNEL);
1187 if (share_page(area, area->vm_task, inode, address, error_code, page)) {
1188 ++area->vm_task->min_flt;
1189 return;
1190 }
1191
1192 ++area->vm_task->maj_flt;
1193 if (!page) {
1194 oom(current);
1195 put_page(area->vm_task, BAD_PAGE, address, PAGE_PRIVATE);
1196 return;
1197 }
1198 for (i=0, j=0; i< PAGE_SIZE ; j++, block++, i += inode->i_sb->s_blocksize)
1199 nr[j] = bmap(inode,block);
1200 if (error_code & PAGE_RW)
1201 prot |= PAGE_RW | PAGE_DIRTY;
1202 page = bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, prot);
1203
1204 if (!(prot & PAGE_RW)) {
1205 if (share_page(area, area->vm_task, inode, address, error_code, page))
1206 return;
1207 }
1208 if (put_page(area->vm_task,page,address,prot))
1209 return;
1210 free_page(page);
1211 oom(current);
1212 }
1213
1214 void file_mmap_free(struct vm_area_struct * area)
1215 {
1216 if (area->vm_inode)
1217 iput(area->vm_inode);
1218 #if 0
1219 if (area->vm_inode)
1220 printk("Free inode %x:%d (%d)\n",area->vm_inode->i_dev,
1221 area->vm_inode->i_ino, area->vm_inode->i_count);
1222 #endif
1223 }
1224
1225
1226
1227
1228
1229 int file_mmap_share(struct vm_area_struct * area1,
1230 struct vm_area_struct * area2,
1231 unsigned long address)
1232 {
1233 if (area1->vm_inode != area2->vm_inode)
1234 return 0;
1235 if (area1->vm_start != area2->vm_start)
1236 return 0;
1237 if (area1->vm_end != area2->vm_end)
1238 return 0;
1239 if (area1->vm_offset != area2->vm_offset)
1240 return 0;
1241 if (area1->vm_page_prot != area2->vm_page_prot)
1242 return 0;
1243 return 1;
1244 }
1245
1246 struct vm_operations_struct file_mmap = {
1247 NULL,
1248 file_mmap_free,
1249 file_mmap_nopage,
1250 NULL,
1251 file_mmap_share,
1252 NULL,
1253 };