This source file includes following definitions.
- oom
- free_one_pte
- free_one_table
- clear_page_tables
- free_page_tables
- clone_page_tables
- copy_page_tables
- unmap_page_range
- zeromap_page_range
- remap_page_range
- put_page
- put_dirty_page
- do_wp_page
- verify_area
- get_empty_page
- try_to_share
- share_page
- get_empty_pgtable
- do_swap_page
- do_no_page
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/config.h>
37 #include <linux/signal.h>
38 #include <linux/sched.h>
39 #include <linux/head.h>
40 #include <linux/kernel.h>
41 #include <linux/errno.h>
42 #include <linux/string.h>
43 #include <linux/types.h>
44 #include <linux/ptrace.h>
45 #include <linux/mman.h>
46 #include <linux/mm.h>
47
48 #include <asm/system.h>
49 #include <asm/segment.h>
50
51 unsigned long high_memory = 0;
52
53
54
55
56
57 int nr_swap_pages = 0;
58 int nr_free_pages = 0;
59 struct mem_list free_area_list[NR_MEM_LISTS];
60 unsigned char * free_area_map[NR_MEM_LISTS];
61
62 #define copy_page(from,to) memcpy((void *) to, (void *) from, PAGE_SIZE)
63
64 mem_map_t * mem_map = NULL;
65
66 #define CODE_SPACE(addr,p) ((addr) < (p)->end_code)
67
68
69
70
71
72 void oom(struct task_struct * task)
73 {
74 printk("\nOut of memory for %s.\n", current->comm);
75 task->sigaction[SIGKILL-1].sa_handler = NULL;
76 task->blocked &= ~(1<<(SIGKILL-1));
77 send_sig(SIGKILL,task,1);
78 }
79
80 static inline void free_one_pte(pte_t * page_table)
81 {
82 pte_t page = *page_table;
83
84 if (pte_none(page))
85 return;
86 pte_clear(page_table);
87 if (!pte_present(page)) {
88 swap_free(pte_val(page));
89 return;
90 }
91 free_page(pte_page(page));
92 return;
93 }
94
95 static void free_one_table(pgd_t * page_dir)
96 {
97 int j;
98 pgd_t pg_table = *page_dir;
99 pte_t * page_table;
100 unsigned long page;
101
102 if (pgd_none(pg_table))
103 return;
104 pgd_clear(page_dir);
105 if (pgd_bad(pg_table)) {
106 printk("Bad page table: [%p]=%08lx\n",page_dir,pgd_val(pg_table));
107 return;
108 }
109 page = pgd_page(pg_table);
110 if (mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED)
111 return;
112 page_table = (pte_t *) page;
113 for (j = 0 ; j < PTRS_PER_PAGE ; j++,page_table++)
114 free_one_pte(page_table);
115 free_page(page);
116 }
117
118
119
120
121
122
123
124
125 void clear_page_tables(struct task_struct * tsk)
126 {
127 int i;
128 pgd_t * page_dir;
129
130 if (!tsk)
131 return;
132 if (tsk == task[0])
133 panic("task[0] (swapper) doesn't support exec()\n");
134 page_dir = PAGE_DIR_OFFSET(tsk, 0);
135 if (!page_dir || page_dir == swapper_pg_dir) {
136 printk("Trying to clear kernel page-directory: not good\n");
137 return;
138 }
139 if (mem_map[MAP_NR((unsigned long) page_dir)] > 1) {
140 pgd_t * new_pg;
141
142 if (!(new_pg = (pgd_t *) get_free_page(GFP_KERNEL))) {
143 oom(tsk);
144 return;
145 }
146 for (i = 768 ; i < 1024 ; i++)
147 new_pg[i] = page_dir[i];
148 free_page((unsigned long) page_dir);
149 SET_PAGE_DIR(tsk, new_pg);
150 return;
151 }
152 for (i = 0 ; i < 768 ; i++,page_dir++)
153 free_one_table(page_dir);
154 invalidate();
155 return;
156 }
157
158
159
160
161 void free_page_tables(struct task_struct * tsk)
162 {
163 int i;
164 pgd_t * page_dir;
165
166 if (!tsk)
167 return;
168 if (tsk == task[0]) {
169 printk("task[0] (swapper) killed: unable to recover\n");
170 panic("Trying to free up swapper memory space");
171 }
172 page_dir = PAGE_DIR_OFFSET(tsk, 0);
173 if (!page_dir || page_dir == swapper_pg_dir) {
174 printk("Trying to free kernel page-directory: not good\n");
175 return;
176 }
177 SET_PAGE_DIR(tsk, swapper_pg_dir);
178 if (mem_map[MAP_NR((unsigned long) page_dir)] > 1) {
179 free_page((unsigned long) page_dir);
180 return;
181 }
182 for (i = 0 ; i < PTRS_PER_PAGE ; i++)
183 free_one_table(page_dir + i);
184 free_page((unsigned long) page_dir);
185 invalidate();
186 }
187
188
189
190
191
192
193
194 int clone_page_tables(struct task_struct * tsk)
195 {
196 pgd_t * pg_dir;
197
198 pg_dir = PAGE_DIR_OFFSET(current, 0);
199 mem_map[MAP_NR((unsigned long) pg_dir)]++;
200 SET_PAGE_DIR(tsk, pg_dir);
201 return 0;
202 }
203
204
205
206
207
208
209 int copy_page_tables(struct task_struct * tsk)
210 {
211 int i;
212 pgd_t *old_page_dir;
213 pgd_t *new_page_dir;
214
215 new_page_dir = (pgd_t *) get_free_page(GFP_KERNEL);
216 if (!new_page_dir)
217 return -ENOMEM;
218 old_page_dir = PAGE_DIR_OFFSET(current, 0);
219 SET_PAGE_DIR(tsk, new_page_dir);
220 for (i = 0 ; i < PTRS_PER_PAGE ; i++,old_page_dir++,new_page_dir++) {
221 int j;
222 pgd_t old_pg_table;
223 pte_t *old_page_table, *new_page_table;
224
225 old_pg_table = *old_page_dir;
226 if (pgd_none(old_pg_table))
227 continue;
228 if (pgd_bad(old_pg_table)) {
229 printk("copy_page_tables: bad page table: "
230 "probable memory corruption\n");
231 pgd_clear(old_page_dir);
232 continue;
233 }
234 if (mem_map[MAP_NR(pgd_page(old_pg_table))] & MAP_PAGE_RESERVED) {
235 *new_page_dir = old_pg_table;
236 continue;
237 }
238 if (!(new_page_table = (pte_t *) get_free_page(GFP_KERNEL))) {
239 free_page_tables(tsk);
240 return -ENOMEM;
241 }
242 old_page_table = (pte_t *) pgd_page(old_pg_table);
243 pgd_set(new_page_dir, new_page_table);
244 for (j = 0 ; j < PTRS_PER_PAGE ; j++,old_page_table++,new_page_table++) {
245 pte_t pte = *old_page_table;
246 if (pte_none(pte))
247 continue;
248 if (!pte_present(pte)) {
249 swap_duplicate(pte_val(pte));
250 *new_page_table = pte;
251 continue;
252 }
253 if (pte_page(pte) > high_memory || (mem_map[MAP_NR(pte_page(pte))] & MAP_PAGE_RESERVED)) {
254 *new_page_table = pte;
255 continue;
256 }
257 if (pte_cow(pte))
258 pte = pte_wrprotect(pte);
259 if (delete_from_swap_cache(pte_page(pte)))
260 pte = pte_mkdirty(pte);
261 *new_page_table = pte;
262 *old_page_table = pte;
263 mem_map[MAP_NR(pte_page(pte))]++;
264 }
265 }
266 invalidate();
267 return 0;
268 }
269
270
271
272
273
274 int unmap_page_range(unsigned long from, unsigned long size)
275 {
276 pgd_t page_dir, * dir;
277 pte_t page, * page_table;
278 unsigned long poff, pcnt, pc;
279
280 if (from & ~PAGE_MASK) {
281 printk("unmap_page_range called with wrong alignment\n");
282 return -EINVAL;
283 }
284 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
285 dir = PAGE_DIR_OFFSET(current,from);
286 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
287 if ((pcnt = PTRS_PER_PAGE - poff) > size)
288 pcnt = size;
289
290 for ( ; size > 0; ++dir, size -= pcnt,
291 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
292 page_dir = *dir;
293 if (pgd_none(page_dir)) {
294 poff = 0;
295 continue;
296 }
297 if (pgd_bad(page_dir)) {
298 printk("unmap_page_range: bad page directory.");
299 continue;
300 }
301 page_table = (pte_t *) pgd_page(page_dir);
302 if (poff) {
303 page_table += poff;
304 poff = 0;
305 }
306 for (pc = pcnt; pc--; page_table++) {
307 page = *page_table;
308 if (!pte_none(page)) {
309 pte_clear(page_table);
310 if (pte_present(page)) {
311 if (!(mem_map[MAP_NR(pte_page(page))] & MAP_PAGE_RESERVED))
312 if (current->mm->rss > 0)
313 --current->mm->rss;
314 free_page(pte_page(page));
315 } else
316 swap_free(pte_val(page));
317 }
318 }
319 if (pcnt == PTRS_PER_PAGE) {
320 pgd_clear(dir);
321 free_page(pgd_page(page_dir));
322 }
323 }
324 invalidate();
325 return 0;
326 }
327
328 int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot)
329 {
330 pgd_t * dir;
331 pte_t * page_table;
332 unsigned long poff, pcnt;
333 pte_t zero_pte;
334
335 if (from & ~PAGE_MASK) {
336 printk("zeromap_page_range: from = %08lx\n",from);
337 return -EINVAL;
338 }
339 zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
340 dir = PAGE_DIR_OFFSET(current,from);
341 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
342 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
343 if ((pcnt = PTRS_PER_PAGE - poff) > size)
344 pcnt = size;
345
346 while (size > 0) {
347 if (!pgd_present(*dir)) {
348 if (!(page_table = (pte_t *) get_free_page(GFP_KERNEL))) {
349 invalidate();
350 return -ENOMEM;
351 }
352 if (pgd_present(*dir)) {
353 free_page((unsigned long) page_table);
354 page_table = (pte_t *) pgd_page(*dir);
355 } else
356 pgd_set(dir, page_table);
357 } else
358 page_table = (pte_t *) pgd_page(*dir);
359 dir++;
360 page_table += poff;
361 poff = 0;
362 for (size -= pcnt; pcnt-- ;) {
363 pte_t page = *page_table;
364 if (!pte_none(page)) {
365 pte_clear(page_table);
366 if (pte_present(page)) {
367 if (!(mem_map[MAP_NR(pte_page(page))] & MAP_PAGE_RESERVED))
368 if (current->mm->rss > 0)
369 --current->mm->rss;
370 free_page(pte_page(page));
371 } else
372 swap_free(pte_val(page));
373 }
374 *page_table++ = zero_pte;
375 }
376 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
377 }
378 invalidate();
379 return 0;
380 }
381
382
383
384
385
386
387 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
388 {
389 pgd_t * dir;
390 pte_t * page_table;
391 unsigned long poff, pcnt;
392
393 if ((from & ~PAGE_MASK) || (to & ~PAGE_MASK)) {
394 printk("remap_page_range: from = %08lx, to=%08lx\n",from,to);
395 return -EINVAL;
396 }
397 dir = PAGE_DIR_OFFSET(current,from);
398 size = (size + ~PAGE_MASK) >> PAGE_SHIFT;
399 poff = (from >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
400 if ((pcnt = PTRS_PER_PAGE - poff) > size)
401 pcnt = size;
402
403 while (size > 0) {
404 if (!pgd_present(*dir)) {
405 if (!(page_table = (pte_t *) get_free_page(GFP_KERNEL))) {
406 invalidate();
407 return -1;
408 }
409 if (pgd_present(*dir)) {
410 free_page((unsigned long) page_table);
411 page_table = (pte_t *) pgd_page(*dir);
412 } else
413 pgd_set(dir, page_table);
414 } else
415 page_table = (pte_t *) pgd_page(*dir);
416 dir++;
417 page_table += poff;
418 poff = 0;
419
420 for (size -= pcnt; pcnt-- ;) {
421 pte_t page = *page_table;
422 if (!pte_none(page)) {
423 pte_clear(page_table);
424 if (pte_present(page)) {
425 if (!(mem_map[MAP_NR(pte_page(page))] & MAP_PAGE_RESERVED))
426 if (current->mm->rss > 0)
427 --current->mm->rss;
428 free_page(pte_page(page));
429 } else
430 swap_free(pte_val(page));
431 }
432 if (to >= high_memory)
433 *page_table = mk_pte(to, prot);
434 else if (mem_map[MAP_NR(to)]) {
435 *page_table = mk_pte(to, prot);
436 if (!(mem_map[MAP_NR(to)] & MAP_PAGE_RESERVED)) {
437 ++current->mm->rss;
438 mem_map[MAP_NR(to)]++;
439 }
440 }
441 page_table++;
442 to += PAGE_SIZE;
443 }
444 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size);
445 }
446 invalidate();
447 return 0;
448 }
449
450
451
452
453 static void put_page(pte_t * page_table, pte_t pte)
454 {
455 if (!pte_none(*page_table)) {
456 printk("put_page: page already exists\n");
457 free_page(pte_page(pte));
458 return;
459 }
460
461 *page_table = pte;
462 }
463
464
465
466
467
468 unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
469 {
470 pgd_t * page_dir;
471 pte_t * page_table;
472
473 if (page >= high_memory)
474 printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
475 if (mem_map[MAP_NR(page)] != 1)
476 printk("mem_map disagrees with %08lx at %08lx\n",page,address);
477 page_dir = PAGE_DIR_OFFSET(tsk,address);
478 if (pgd_present(*page_dir)) {
479 page_table = (pte_t *) pgd_page(*page_dir);
480 } else {
481 if (!(page_table = (pte_t *) get_free_page(GFP_KERNEL)))
482 return 0;
483 if (pgd_present(*page_dir)) {
484 free_page((unsigned long) page_table);
485 page_table = (pte_t *) pgd_page(*page_dir);
486 } else {
487 pgd_set(page_dir, page_table);
488 }
489 }
490 page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
491 if (!pte_none(*page_table)) {
492 printk("put_dirty_page: page already exists\n");
493 pte_clear(page_table);
494 invalidate();
495 }
496 *page_table = pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY)));
497
498 return page;
499 }
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518 void do_wp_page(struct vm_area_struct * vma, unsigned long address,
519 int write_access)
520 {
521 pgd_t *page_dir;
522 pte_t *page_table, pte;
523 unsigned long old_page, new_page;
524
525 new_page = __get_free_page(GFP_KERNEL);
526 page_dir = PAGE_DIR_OFFSET(vma->vm_task,address);
527 if (pgd_none(*page_dir))
528 goto end_wp_page;
529 if (pgd_bad(*page_dir))
530 goto bad_wp_pagetable;
531 page_table = (pte_t *) pgd_page(*page_dir);
532 page_table += (address >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
533 pte = *page_table;
534 if (!pte_present(pte))
535 goto end_wp_page;
536 if (pte_write(pte))
537 goto end_wp_page;
538 old_page = pte_page(pte);
539 if (old_page >= high_memory)
540 goto bad_wp_page;
541 vma->vm_task->mm->min_flt++;
542
543
544
545 if (mem_map[MAP_NR(old_page)] != 1) {
546 if (new_page) {
547 if (mem_map[MAP_NR(old_page)] & MAP_PAGE_RESERVED)
548 ++vma->vm_task->mm->rss;
549 copy_page(old_page,new_page);
550 *page_table = pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)));
551 free_page(old_page);
552 invalidate();
553 return;
554 }
555 free_page(old_page);
556 oom(vma->vm_task);
557 *page_table = BAD_PAGE;
558 invalidate();
559 return;
560 }
561 *page_table = pte_mkdirty(pte_mkwrite(pte));
562 invalidate();
563 if (new_page)
564 free_page(new_page);
565 return;
566 bad_wp_page:
567 printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
568 *page_table = BAD_PAGE;
569 send_sig(SIGKILL, vma->vm_task, 1);
570 goto end_wp_page;
571 bad_wp_pagetable:
572 printk("do_wp_page: bogus page-table at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
573 pgd_set(page_dir, BAD_PAGETABLE);
574 send_sig(SIGKILL, vma->vm_task, 1);
575 end_wp_page:
576 if (new_page)
577 free_page(new_page);
578 return;
579 }
580
581
582
583
584 int verify_area(int type, const void * addr, unsigned long size)
585 {
586 struct vm_area_struct * vma;
587 unsigned long start = (unsigned long) addr;
588
589
590
591
592
593 if (get_fs() == get_ds())
594 return 0;
595
596 vma = find_vma(current, start);
597 if (!vma)
598 goto bad_area;
599 if (vma->vm_start <= start)
600 goto good_area;
601 if (!(vma->vm_flags & VM_GROWSDOWN))
602 goto bad_area;
603 if (vma->vm_end - start > current->rlim[RLIMIT_STACK].rlim_cur)
604 goto bad_area;
605
606 good_area:
607 if (type == VERIFY_WRITE)
608 goto check_write;
609 for (;;) {
610 struct vm_area_struct * next;
611 if (!(vma->vm_flags & VM_READ))
612 goto bad_area;
613 if (vma->vm_end - start >= size)
614 return 0;
615 next = vma->vm_next;
616 if (!next || vma->vm_end != next->vm_start)
617 goto bad_area;
618 vma = next;
619 }
620
621 check_write:
622 if (!(vma->vm_flags & VM_WRITE))
623 goto bad_area;
624 if (!wp_works_ok)
625 goto check_wp_fault_by_hand;
626 for (;;) {
627 if (vma->vm_end - start >= size)
628 break;
629 if (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
630 goto bad_area;
631 vma = vma->vm_next;
632 if (!(vma->vm_flags & VM_WRITE))
633 goto bad_area;
634 }
635 return 0;
636
637 check_wp_fault_by_hand:
638 size--;
639 size += start & ~PAGE_MASK;
640 size >>= PAGE_SHIFT;
641 start &= PAGE_MASK;
642
643 for (;;) {
644 do_wp_page(vma, start, 1);
645 if (!size)
646 break;
647 size--;
648 start += PAGE_SIZE;
649 if (start < vma->vm_end)
650 continue;
651 vma = vma->vm_next;
652 if (!vma || vma->vm_start != start)
653 goto bad_area;
654 if (!(vma->vm_flags & VM_WRITE))
655 goto bad_area;;
656 }
657 return 0;
658
659 bad_area:
660 return -EFAULT;
661 }
662
663 static inline void get_empty_page(struct vm_area_struct * vma, pte_t * page_table)
664 {
665 unsigned long tmp;
666
667 if (!(tmp = get_free_page(GFP_KERNEL))) {
668 oom(vma->vm_task);
669 put_page(page_table, BAD_PAGE);
670 return;
671 }
672 put_page(page_table, pte_mkwrite(mk_pte(tmp, vma->vm_page_prot)));
673 }
674
675
676
677
678
679
680
681
682
683 static int try_to_share(unsigned long to_address, struct vm_area_struct * to_area,
684 unsigned long from_address, struct vm_area_struct * from_area,
685 unsigned long newpage)
686 {
687 pgd_t * from_dir, * to_dir;
688 pte_t * from_table, * to_table;
689 pte_t from, to;
690
691 from_dir = PAGE_DIR_OFFSET(from_area->vm_task,from_address);
692
693 if (!pgd_present(*from_dir))
694 return 0;
695 from_table = (pte_t *) (pgd_page(*from_dir) + PAGE_PTR(from_address));
696 from = *from_table;
697
698 if (!pte_present(from))
699 return 0;
700
701 if (pte_dirty(from)) {
702 if (!(from_area->vm_flags & VM_SHARED))
703 return 0;
704 if (pte_write(from)) {
705 printk("nonwritable, but dirty, shared page\n");
706 return 0;
707 }
708 }
709
710 if (pte_page(from) >= high_memory)
711 return 0;
712 if (mem_map[MAP_NR(pte_page(from))] & MAP_PAGE_RESERVED)
713 return 0;
714
715 to_dir = PAGE_DIR_OFFSET(to_area->vm_task,to_address);
716 if (!pgd_present(*to_dir))
717 return 0;
718 to_table = (pte_t *) (pgd_page(*to_dir) + PAGE_PTR(to_address));
719 to = *to_table;
720 if (!pte_none(to))
721 return 0;
722
723 if (newpage) {
724
725
726 if (in_swap_cache(pte_page(from))) {
727 if (!(from_area->vm_flags & VM_SHARED))
728 return 0;
729 if (!pte_write(from)) {
730 printk("nonwritable, but dirty, shared page\n");
731 return 0;
732 }
733 }
734 copy_page(pte_page(from), newpage);
735 *to_table = mk_pte(newpage, to_area->vm_page_prot);
736 return 1;
737 }
738
739
740
741
742
743
744
745
746
747 if (in_swap_cache(pte_page(from))) {
748 if (!(from_area->vm_flags & VM_SHARED))
749 return 0;
750 *from_table = pte_mkdirty(from);
751 delete_from_swap_cache(pte_page(from));
752 }
753 mem_map[MAP_NR(pte_page(from))]++;
754 *to_table = mk_pte(pte_page(from), to_area->vm_page_prot);
755
756 if (!pte_write(from))
757 return 1;
758 if (from_area->vm_flags & VM_SHARED)
759 return 1;
760
761 *from_table = pte_wrprotect(from);
762 invalidate();
763 return 1;
764 }
765
766
767
768
769
770
771
772
773 static int share_page(struct vm_area_struct * area, unsigned long address,
774 int write_access, unsigned long newpage)
775 {
776 struct inode * inode;
777 unsigned long offset;
778 unsigned long from_address;
779 unsigned long give_page;
780 struct vm_area_struct * mpnt;
781
782 if (!area || !(inode = area->vm_inode) || inode->i_count < 2)
783 return 0;
784
785 give_page = 0;
786 if (write_access && !(area->vm_flags & VM_SHARED)) {
787 if (!newpage)
788 return 0;
789 give_page = newpage;
790 }
791 offset = address - area->vm_start + area->vm_offset;
792
793
794 for (mpnt = area->vm_next_share; mpnt != area; mpnt = mpnt->vm_next_share) {
795
796 if (mpnt->vm_inode != inode) {
797 printk("Aiee! Corrupt vm_area_struct i_mmap ring\n");
798 break;
799 }
800
801 if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK)
802 continue;
803
804 from_address = offset + mpnt->vm_start - mpnt->vm_offset;
805 if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end)
806 continue;
807
808 if (!try_to_share(address, area, from_address, mpnt, give_page))
809 continue;
810
811 if (give_page || !newpage)
812 return 1;
813 free_page(newpage);
814 return 1;
815 }
816 return 0;
817 }
818
819
820
821
822 static inline pte_t * get_empty_pgtable(struct task_struct * tsk,unsigned long address)
823 {
824 pgd_t *p;
825 unsigned long page;
826
827 p = PAGE_DIR_OFFSET(tsk,address);
828 if (pgd_present(*p))
829 return (pte_t *) (PAGE_PTR(address) + pgd_page(*p));
830 if (!pgd_none(*p)) {
831 printk("get_empty_pgtable: bad page-directory entry \n");
832 pgd_clear(p);
833 }
834 page = get_free_page(GFP_KERNEL);
835 if (pgd_present(*p)) {
836 free_page(page);
837 return (pte_t *) (PAGE_PTR(address) + pgd_page(*p));
838 }
839 if (!pgd_none(*p)) {
840 printk("get_empty_pgtable: bad page-directory entry \n");
841 pgd_clear(p);
842 }
843 if (page) {
844 pgd_set(p, (pte_t *) page);
845 return (pte_t *) (PAGE_PTR(address) + page);
846 }
847 oom(current);
848 pgd_set(p, BAD_PAGETABLE);
849 return NULL;
850 }
851
852 static inline void do_swap_page(struct vm_area_struct * vma, unsigned long address,
853 pte_t * page_table, pte_t entry, int write_access)
854 {
855 pte_t page;
856
857 if (!vma->vm_ops || !vma->vm_ops->swapin) {
858 swap_in(vma, page_table, pte_val(entry), write_access);
859 return;
860 }
861 page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
862 if (pte_val(*page_table) != pte_val(entry)) {
863 free_page(pte_page(page));
864 return;
865 }
866 if (mem_map[MAP_NR(pte_page(page))] > 1 && !(vma->vm_flags & VM_SHARED))
867 page = pte_wrprotect(page);
868 ++vma->vm_task->mm->rss;
869 ++vma->vm_task->mm->maj_flt;
870 *page_table = page;
871 return;
872 }
873
874
875
876
877
878
879
880 void do_no_page(struct vm_area_struct * vma, unsigned long address,
881 int write_access)
882 {
883 pte_t * page_table;
884 pte_t entry;
885 unsigned long page;
886
887 page_table = get_empty_pgtable(vma->vm_task,address);
888 if (!page_table)
889 return;
890 entry = *page_table;
891 if (pte_present(entry))
892 return;
893 if (!pte_none(entry)) {
894 do_swap_page(vma, address, page_table, entry, write_access);
895 return;
896 }
897 address &= PAGE_MASK;
898
899 if (!vma->vm_ops || !vma->vm_ops->nopage) {
900 ++vma->vm_task->mm->rss;
901 ++vma->vm_task->mm->min_flt;
902 get_empty_page(vma, page_table);
903 return;
904 }
905 page = get_free_page(GFP_KERNEL);
906 if (share_page(vma, address, write_access, page)) {
907 ++vma->vm_task->mm->min_flt;
908 ++vma->vm_task->mm->rss;
909 return;
910 }
911 if (!page) {
912 oom(current);
913 put_page(page_table, BAD_PAGE);
914 return;
915 }
916 ++vma->vm_task->mm->maj_flt;
917 ++vma->vm_task->mm->rss;
918
919
920
921
922
923 page = vma->vm_ops->nopage(vma, address, page,
924 write_access && !(vma->vm_flags & VM_SHARED));
925 if (share_page(vma, address, write_access, 0)) {
926 free_page(page);
927 return;
928 }
929
930
931
932
933
934
935
936
937
938
939 entry = mk_pte(page, vma->vm_page_prot);
940 if (write_access) {
941 entry = pte_mkwrite(pte_mkdirty(entry));
942 } else if (mem_map[MAP_NR(page)] > 1 && !(vma->vm_flags & VM_SHARED))
943 entry = pte_wrprotect(entry);
944 put_page(page_table, entry);
945 }