root/arch/i386/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault

   1 /*
   2  *  linux/arch/i386/mm/fault.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/signal.h>
   8 #include <linux/sched.h>
   9 #include <linux/head.h>
  10 #include <linux/kernel.h>
  11 #include <linux/errno.h>
  12 #include <linux/string.h>
  13 #include <linux/types.h>
  14 #include <linux/ptrace.h>
  15 #include <linux/mman.h>
  16 #include <linux/mm.h>
  17 
  18 #include <asm/system.h>
  19 #include <asm/segment.h>
  20 #include <asm/pgtable.h>
  21 
  22 extern void die_if_kernel(const char *,struct pt_regs *,long);
  23 
  24 /*
  25  * This routine handles page faults.  It determines the address,
  26  * and the problem, and then passes it off to one of the appropriate
  27  * routines.
  28  *
  29  * error_code:
  30  *      bit 0 == 0 means no page found, 1 means protection fault
  31  *      bit 1 == 0 means read, 1 means write
  32  *      bit 2 == 0 means kernel, 1 means user-mode
  33  */
  34 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36         struct vm_area_struct * vma;
  37         unsigned long address;
  38         unsigned long page;
  39 
  40         /* get the address */
  41         __asm__("movl %%cr2,%0":"=r" (address));
  42         vma = find_vma(current, address);
  43         if (!vma)
  44                 goto bad_area;
  45         if (vma->vm_start <= address)
  46                 goto good_area;
  47         if (!(vma->vm_flags & VM_GROWSDOWN))
  48                 goto bad_area;
  49         if (error_code & 4) {
  50                 /*
  51                  * accessing the stack below %esp is always a bug.
  52                  * The "+ 32" is there due to some instructions (like
  53                  * pusha) doing pre-decrement on the stack and that
  54                  * doesn't show up until later..
  55                  */
  56                 if (address + 32 < regs->esp)
  57                         goto bad_area;
  58         }
  59         if (expand_stack(vma, address))
  60                 goto bad_area;
  61 /*
  62  * Ok, we have a good vm_area for this memory access, so
  63  * we can handle it..
  64  */
  65 good_area:
  66         /*
  67          * was it a write?
  68          */
  69         if (error_code & 2) {
  70                 if (!(vma->vm_flags & VM_WRITE))
  71                         goto bad_area;
  72         } else {
  73                 /* read with protection fault? */
  74                 if (error_code & 1)
  75                         goto bad_area;
  76                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  77                         goto bad_area;
  78         }
  79         /*
  80          * Did it hit the DOS screen memory VA from vm86 mode?
  81          */
  82         if (regs->eflags & VM_MASK) {
  83                 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
  84                 if (bit < 32)
  85                         current->tss.screen_bitmap |= 1 << bit;
  86         }
  87         if (error_code & 1) {
  88 #ifdef TEST_VERIFY_AREA
  89                 if (regs->cs == KERNEL_CS)
  90                         printk("WP fault at %08x\n", regs->eip);
  91 #endif
  92                 do_wp_page(current, vma, address, error_code & 2);
  93                 return;
  94         }
  95         do_no_page(current, vma, address, error_code & 2);
  96         return;
  97 
  98 /*
  99  * Something tried to access memory that isn't in our memory map..
 100  * Fix it, but check if it's kernel or user first..
 101  */
 102 bad_area:
 103         if (error_code & 4) {
 104                 current->tss.cr2 = address;
 105                 current->tss.error_code = error_code;
 106                 current->tss.trap_no = 14;
 107                 send_sig(SIGSEGV, current, 1);
 108                 return;
 109         }
 110 /*
 111  * Oops. The kernel tried to access some bad page. We'll have to
 112  * terminate things with extreme prejudice.
 113  *
 114  * First we check if it was the bootup rw-test, though..
 115  */
 116         if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & 1)) {
 117                 wp_works_ok = 1;
 118                 pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
 119                 flush_tlb();
 120                 printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
 121                 return;
 122         }
 123         if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
 124                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
 125                 pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
 126         } else
 127                 printk(KERN_ALERT "Unable to handle kernel paging request");
 128         printk(" at virtual address %08lx\n",address);
 129         __asm__("movl %%cr3,%0" : "=r" (page));
 130         printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
 131                 current->tss.cr3, page);
 132         page = ((unsigned long *) page)[address >> 22];
 133         printk(KERN_ALERT "*pde = %08lx\n", page);
 134         if (page & 1) {
 135                 page &= PAGE_MASK;
 136                 address &= 0x003ff000;
 137                 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
 138                 printk(KERN_ALERT "*pte = %08lx\n", page);
 139         }
 140         die_if_kernel("Oops", regs, error_code);
 141         do_exit(SIGKILL);
 142 }

/* [previous][next][first][last][top][bottom][index][help] */