root/arch/alpha/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault

   1 /*
   2  *  linux/arch/alpha/mm/fault.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/signal.h>
   8 #include <linux/sched.h>
   9 #include <linux/head.h>
  10 #include <linux/kernel.h>
  11 #include <linux/errno.h>
  12 #include <linux/string.h>
  13 #include <linux/types.h>
  14 #include <linux/ptrace.h>
  15 #include <linux/mman.h>
  16 #include <linux/mm.h>
  17 
  18 #include <asm/system.h>
  19 #include <asm/segment.h>
  20 #include <asm/pgtable.h>
  21 
  22 extern void die_if_kernel(char *,struct pt_regs *,long);
  23 extern void tbi(unsigned long type, unsigned long arg);
  24 #define tbisi(x) tbi(1,(x))
  25 #define tbisd(x) tbi(2,(x))
  26 #define tbis(x)  tbi(3,(x))
  27 
  28 /*
  29  * This routine handles page faults.  It determines the address,
  30  * and the problem, and then passes it off to handle_mm_fault().
  31  *
  32  * mmcsr:
  33  *      0 = translation not valid
  34  *      1 = access violation
  35  *      2 = fault-on-read
  36  *      3 = fault-on-execute
  37  *      4 = fault-on-write
  38  *
  39  * cause:
  40  *      -1 = instruction fetch
  41  *      0 = load
  42  *      1 = store
  43  */
  44 asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause,
     /* [previous][next][first][last][top][bottom][index][help] */
  45         unsigned long a3, unsigned long a4, unsigned long a5,
  46         struct pt_regs regs)
  47 {
  48         struct vm_area_struct * vma;
  49 
  50         vma = find_vma(current, address);
  51         if (!vma)
  52                 goto bad_area;
  53         if (vma->vm_start <= address)
  54                 goto good_area;
  55         if (!(vma->vm_flags & VM_GROWSDOWN))
  56                 goto bad_area;
  57         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
  58                 goto bad_area;
  59         vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
  60         vma->vm_start = (address & PAGE_MASK);
  61 /*
  62  * Ok, we have a good vm_area for this memory access, so
  63  * we can handle it..
  64  */
  65 good_area:
  66         if (cause < 0) {
  67                 if (!(vma->vm_flags & VM_EXEC))
  68                         goto bad_area;
  69         } else if (!cause) {
  70                 /* Allow reads even for write-only mappings */
  71                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  72                         goto bad_area;
  73         } else {
  74                 if (!(vma->vm_flags & VM_WRITE))
  75                         goto bad_area;
  76         }
  77         tbis(address);
  78         handle_mm_fault(vma, address, cause > 0);
  79         return;
  80 
  81 /*
  82  * Something tried to access memory that isn't in our memory map..
  83  * Fix it, but check if it's kernel or user first..
  84  */
  85 bad_area:
  86         if (user_mode(&regs)) {
  87                 printk("memory violation at pc=%08lx (%08lx)\n", regs.pc, address);
  88                 die_if_kernel("oops", &regs, cause);
  89                 send_sig(SIGSEGV, current, 1);
  90                 return;
  91         }
  92 /*
  93  * Oops. The kernel tried to access some bad page. We'll have to
  94  * terminate things with extreme prejudice.
  95  */
  96         printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",address);
  97         die_if_kernel("Oops", &regs, cause);
  98         do_exit(SIGKILL);
  99 }

/* [previous][next][first][last][top][bottom][index][help] */