root/arch/alpha/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_new_asn_and_reload
  2. do_page_fault

   1 /*
   2  *  linux/arch/alpha/mm/fault.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/signal.h>
   8 #include <linux/sched.h>
   9 #include <linux/head.h>
  10 #include <linux/kernel.h>
  11 #include <linux/errno.h>
  12 #include <linux/string.h>
  13 #include <linux/types.h>
  14 #include <linux/ptrace.h>
  15 #include <linux/mman.h>
  16 #include <linux/mm.h>
  17 
  18 #include <asm/system.h>
  19 #include <asm/segment.h>
  20 #include <asm/pgtable.h>
  21 #include <asm/mmu_context.h>
  22 
  23 unsigned long asn_cache = ASN_FIRST_VERSION;
  24 
  25 #ifndef BROKEN_ASN
  26 /*
  27  * Select a new ASN and reload the context. This is
  28  * not inlined as this expands to a pretty large
  29  * function.
  30  */
  31 void get_new_asn_and_reload(struct task_struct *tsk, struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         get_new_mmu_context(tsk, mm, asn_cache);
  34         reload_context(tsk);
  35 }
  36 #endif
  37 
  38 extern void die_if_kernel(char *,struct pt_regs *,long);
  39 
  40 /*
  41  * This routine handles page faults.  It determines the address,
  42  * and the problem, and then passes it off to handle_mm_fault().
  43  *
  44  * mmcsr:
  45  *      0 = translation not valid
  46  *      1 = access violation
  47  *      2 = fault-on-read
  48  *      3 = fault-on-execute
  49  *      4 = fault-on-write
  50  *
  51  * cause:
  52  *      -1 = instruction fetch
  53  *      0 = load
  54  *      1 = store
  55  */
  56 asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause,
     /* [previous][next][first][last][top][bottom][index][help] */
  57         unsigned long a3, unsigned long a4, unsigned long a5,
  58         struct pt_regs regs)
  59 {
  60         struct vm_area_struct * vma;
  61 
  62         vma = find_vma(current, address);
  63         if (!vma)
  64                 goto bad_area;
  65         if (vma->vm_start <= address)
  66                 goto good_area;
  67         if (!(vma->vm_flags & VM_GROWSDOWN))
  68                 goto bad_area;
  69         if (expand_stack(vma, address))
  70                 goto bad_area;
  71 /*
  72  * Ok, we have a good vm_area for this memory access, so
  73  * we can handle it..
  74  */
  75 good_area:
  76         if (cause < 0) {
  77                 if (!(vma->vm_flags & VM_EXEC))
  78                         goto bad_area;
  79         } else if (!cause) {
  80                 /* Allow reads even for write-only mappings */
  81                 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  82                         goto bad_area;
  83         } else {
  84                 if (!(vma->vm_flags & VM_WRITE))
  85                         goto bad_area;
  86         }
  87         tbis(address);
  88         handle_mm_fault(vma, address, cause > 0);
  89         return;
  90 
  91 /*
  92  * Something tried to access memory that isn't in our memory map..
  93  * Fix it, but check if it's kernel or user first..
  94  */
  95 bad_area:
  96         if (user_mode(&regs)) {
  97                 printk("%s: memory violation at pc=%08lx rp=%08lx (bad address = %08lx)\n",
  98                         current->comm, regs.pc, regs.r26, address);
  99                 die_if_kernel("oops", &regs, cause);
 100                 force_sig(SIGSEGV, current);
 101                 return;
 102         }
 103 /*
 104  * Oops. The kernel tried to access some bad page. We'll have to
 105  * terminate things with extreme prejudice.
 106  */
 107         printk(KERN_ALERT 
 108                "Unable to handle kernel paging request at virtual address %016lx\n", address);
 109         die_if_kernel("Oops", &regs, cause);
 110         do_exit(SIGKILL);
 111 }

/* [previous][next][first][last][top][bottom][index][help] */