root/arch/alpha/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault

   1 /*
   2  *  linux/arch/alpha/mm/fault.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/segment.h>
  21 #include <asm/pgtable.h>
  22 
  23 extern void die_if_kernel(char *,struct pt_regs *,long);
  24 extern void tbi(unsigned long type, unsigned long arg);
  25 #define tbisi(x) tbi(1,(x))
  26 #define tbisd(x) tbi(2,(x))
  27 #define tbis(x)  tbi(3,(x))
  28 
  29 /*
  30  * This routine handles page faults.  It determines the address,
  31  * and the problem, and then passes it off to handle_mm_fault().
  32  *
  33  * mmcsr:
  34  *      0 = translation not valid
  35  *      1 = access violation
  36  *      2 = fault-on-read
  37  *      3 = fault-on-execute
  38  *      4 = fault-on-write
  39  *
  40  * cause:
  41  *      -1 = instruction fetch
  42  *      0 = load
  43  *      1 = store
  44  */
  45 asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause,
     /* [previous][next][first][last][top][bottom][index][help] */
  46         unsigned long a3, unsigned long a4, unsigned long a5,
  47         struct pt_regs regs)
  48 {
  49         struct vm_area_struct * vma;
  50 
  51         vma = find_vma(current, address);
  52         if (!vma)
  53                 goto bad_area;
  54         if (vma->vm_start <= address)
  55                 goto good_area;
  56         if (!(vma->vm_flags & VM_GROWSDOWN))
  57                 goto bad_area;
  58         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
  59                 goto bad_area;
  60         vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
  61         vma->vm_start = (address & PAGE_MASK);
  62 /*
  63  * Ok, we have a good vm_area for this memory access, so
  64  * we can handle it..
  65  */
  66 good_area:
  67         if (cause < 0) {
  68                 if (!(vma->vm_flags & VM_EXEC))
  69                         goto bad_area;
  70         } else if (!cause) {
  71                 /* Allow reads even for write-only mappings */
  72                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  73                         goto bad_area;
  74         } else {
  75                 if (!(vma->vm_flags & VM_WRITE))
  76                         goto bad_area;
  77         }
  78         tbis(address);
  79         handle_mm_fault(vma, address, cause > 0);
  80         return;
  81 
  82 /*
  83  * Something tried to access memory that isn't in our memory map..
  84  * Fix it, but check if it's kernel or user first..
  85  */
  86 bad_area:
  87         if (user_mode(&regs)) {
  88                 printk("memory violation at pc=%08lx (%08lx)\n", regs.pc, address);
  89                 die_if_kernel("oops", &regs, cause);
  90                 send_sig(SIGSEGV, current, 1);
  91                 return;
  92         }
  93 /*
  94  * Oops. The kernel tried to access some bad page. We'll have to
  95  * terminate things with extreme prejudice.
  96  */
  97         printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",address);
  98         die_if_kernel("Oops", &regs, cause);
  99         do_exit(SIGKILL);
 100 }

/* [previous][next][first][last][top][bottom][index][help] */