root/arch/alpha/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault

   1 /*
   2  *  linux/arch/alpha/mm/fault.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/signal.h>
   8 #include <linux/sched.h>
   9 #include <linux/head.h>
  10 #include <linux/kernel.h>
  11 #include <linux/errno.h>
  12 #include <linux/string.h>
  13 #include <linux/types.h>
  14 #include <linux/ptrace.h>
  15 #include <linux/mman.h>
  16 #include <linux/mm.h>
  17 
  18 #include <asm/system.h>
  19 #include <asm/segment.h>
  20 #include <asm/pgtable.h>
  21 
  22 extern void die_if_kernel(char *,struct pt_regs *,long);
  23 extern void tbi(unsigned long type, unsigned long arg);
  24 #define tbisi(x) tbi(1,(x))
  25 #define tbisd(x) tbi(2,(x))
  26 #define tbis(x)  tbi(3,(x))
  27 
  28 /*
  29  * This routine handles page faults.  It determines the address,
  30  * and the problem, and then passes it off to handle_mm_fault().
  31  *
  32  * mmcsr:
  33  *      0 = translation not valid
  34  *      1 = access violation
  35  *      2 = fault-on-read
  36  *      3 = fault-on-execute
  37  *      4 = fault-on-write
  38  *
  39  * cause:
  40  *      -1 = instruction fetch
  41  *      0 = load
  42  *      1 = store
  43  */
  44 asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause,
     /* [previous][next][first][last][top][bottom][index][help] */
  45         unsigned long a3, unsigned long a4, unsigned long a5,
  46         struct pt_regs regs)
  47 {
  48         struct vm_area_struct * vma;
  49 
  50         vma = find_vma(current, address);
  51         if (!vma)
  52                 goto bad_area;
  53         if (vma->vm_start <= address)
  54                 goto good_area;
  55         if (!(vma->vm_flags & VM_GROWSDOWN))
  56                 goto bad_area;
  57         if (expand_stack(vma, address))
  58                 goto bad_area;
  59 /*
  60  * Ok, we have a good vm_area for this memory access, so
  61  * we can handle it..
  62  */
  63 good_area:
  64         if (cause < 0) {
  65                 if (!(vma->vm_flags & VM_EXEC))
  66                         goto bad_area;
  67         } else if (!cause) {
  68                 /* Allow reads even for write-only mappings */
  69                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  70                         goto bad_area;
  71         } else {
  72                 if (!(vma->vm_flags & VM_WRITE))
  73                         goto bad_area;
  74         }
  75         tbis(address);
  76         handle_mm_fault(vma, address, cause > 0);
  77         return;
  78 
  79 /*
  80  * Something tried to access memory that isn't in our memory map..
  81  * Fix it, but check if it's kernel or user first..
  82  */
  83 bad_area:
  84         if (user_mode(&regs)) {
  85                 printk("memory violation at pc=%08lx (%08lx)\n", regs.pc, address);
  86                 die_if_kernel("oops", &regs, cause);
  87                 send_sig(SIGSEGV, current, 1);
  88                 return;
  89         }
  90 /*
  91  * Oops. The kernel tried to access some bad page. We'll have to
  92  * terminate things with extreme prejudice.
  93  */
  94         printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",address);
  95         die_if_kernel("Oops", &regs, cause);
  96         do_exit(SIGKILL);
  97 }

/* [previous][next][first][last][top][bottom][index][help] */