root/arch/mips/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_page_fault
  2. __bad_pagetable
  3. __bad_page
  4. __zero_page
  5. show_mem
  6. paging_init
  7. mem_init
  8. si_meminfo

   1 /*
   2  *  arch/mips/mm/memory.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to MIPS by Ralf Baechle
   6  */
   7 
   8 #include <linux/config.h>
   9 #include <linux/signal.h>
  10 #include <linux/sched.h>
  11 #include <linux/head.h>
  12 #include <linux/kernel.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/types.h>
  16 #include <linux/ptrace.h>
  17 #include <linux/mman.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/segment.h>
  21 #include <asm/mipsconfig.h>
  22 
  23 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  24 
  25 extern void scsi_mem_init(unsigned long);
  26 extern void sound_mem_init(void);
  27 extern void die_if_kernel(char *,struct pt_regs *,long);
  28 extern void show_net_buffers(void);
  29 
  30 /*
  31  * This routine handles page faults.  It determines the address,
  32  * and the problem, and then passes it off to one of the appropriate
  33  * routines.
  34  */
  35 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
  36 {
  37         struct vm_area_struct * vma;
  38         unsigned long address;
  39         unsigned long page;
  40 
  41         /* get the address */
  42         __asm__("dmfc0\t%0,$8"
  43                 : "=r" (address));
  44 
  45         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
  46                 if (!vma)
  47                         goto bad_area;
  48                 if (vma->vm_end > address)
  49                         break;
  50         }
  51         if (vma->vm_start <= address)
  52                 goto good_area;
  53         if (!(vma->vm_flags & VM_GROWSDOWN))
  54                 goto bad_area;
  55         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
  56                 goto bad_area;
  57         vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
  58         vma->vm_start = (address & PAGE_MASK);
  59 /*
  60  * Ok, we have a good vm_area for this memory access, so
  61  * we can handle it..
  62  */
  63 good_area:
  64 #if 0
  65         if (regs->eflags & VM_MASK) {
  66                 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
  67                 if (bit < 32)
  68                         current->tss.screen_bitmap |= 1 << bit;
  69         }
  70 #endif
  71         if (!(vma->vm_page_prot & PAGE_USER))
  72                 goto bad_area;
  73         if (error_code & PAGE_PRESENT) {
  74                 if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
  75                         goto bad_area;
  76                 do_wp_page(vma, address, error_code);
  77                 return;
  78         }
  79 printk("do_page_fault: do_no_page(%x, %x, %d)", vma, address, error_code);
  80         do_no_page(vma, address, error_code);
  81         return;
  82 
  83 /*
  84  * Something tried to access memory that isn't in our memory map..
  85  * Fix it, but check if it's kernel or user first..
  86  */
  87 bad_area:
  88 printk("Bad Area...\n");
  89         if (error_code & PAGE_USER) {
  90                 current->tss.cp0_badvaddr = address;
  91                 current->tss.error_code = error_code;
  92                 current->tss.trap_no = 14;
  93                 send_sig(SIGSEGV, current, 1);
  94                 return;
  95         }
  96 /*
  97  * Oops. The kernel tried to access some bad page. We'll have to
  98  * terminate things with extreme prejudice.
  99  */
 100         printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
 101         if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
 102                 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
 103                 pg0[0] = PAGE_SHARED;
 104         } else
 105                 printk(KERN_ALERT "Unable to handle kernel paging request");
 106         printk(" at virtual address %08lx\n",address);
 107         page = current->tss.pg_dir;
 108         printk(KERN_ALERT "current->tss.pg_dir = %08lx\n", page);
 109         page = ((unsigned long *) page)[address >> 22];
 110         printk(KERN_ALERT "*pde = %08lx\n", page);
 111         if (page & PAGE_PRESENT) {
 112                 page &= PAGE_MASK;
 113                 address &= 0x003ff000;
 114                 page = ((unsigned long *) page)[address >> PAGE_SHIFT];
 115                 printk(KERN_ALERT "*pte = %08lx\n", page);
 116         }
 117         die_if_kernel("Oops", regs, error_code);
 118         do_exit(SIGKILL);
 119 }
 120 
 121 /*
 122  * BAD_PAGE is the page that is used for page faults when linux
 123  * is out-of-memory. Older versions of linux just did a
 124  * do_exit(), but using this instead means there is less risk
 125  * for a process dying in kernel mode, possibly leaving a inode
 126  * unused etc..
 127  *
 128  * BAD_PAGETABLE is the accompanying page-table: it is initialized
 129  * to point to BAD_PAGE entries.
 130  *
 131  * ZERO_PAGE is a special page that is used for zero-initialized
 132  * data and COW.
 133  */
 134 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         extern char empty_bad_page_table[PAGE_SIZE];
 137         unsigned long dummy;
 138 
 139         __asm__ __volatile__(
 140                 ".set\tnoreorder\n\t"
 141                 "1:\tsw\t%2,(%0)\n\t"
 142                 "subu\t%1,%1,1\n\t"
 143                 "bne\t$0,%1,1b\n\t"
 144                 "addiu\t%0,%0,1\n\t"
 145                 ".set\treorder"
 146                 :"=r" (dummy),
 147                  "=r" (dummy)
 148                 :"r" (BAD_PAGE + PAGE_TABLE),
 149                  "0" ((long) empty_bad_page_table),
 150                  "1" (PTRS_PER_PAGE));
 151 
 152         return (unsigned long) empty_bad_page_table;
 153 }
 154 
 155 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 156 {
 157         extern char empty_bad_page[PAGE_SIZE];
 158         unsigned long dummy;
 159 
 160         __asm__ __volatile__(
 161                 ".set\tnoreorder\n\t"
 162                 "1:\tsw\t$0,(%0)\n\t"
 163                 "subu\t%1,%1,1\n\t"
 164                 "bne\t$0,%1,1b\n\t"
 165                 "addiu\t%0,%0,1\n\t"
 166                 ".set\treorder"
 167                 :"=r" (dummy),
 168                  "=r" (dummy)
 169                 :"0" ((long) empty_bad_page),
 170                  "1" (PTRS_PER_PAGE));
 171 
 172         return (unsigned long) empty_bad_page;
 173 }
 174 
 175 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         extern char empty_zero_page[PAGE_SIZE];
 178         unsigned long dummy;
 179 
 180         __asm__ __volatile__(
 181                 ".set\tnoreorder\n\t"
 182                 "1:\tsw\t$0,(%0)\n\t"
 183                 "subu\t%1,%1,1\n\t"
 184                 "bne\t$0,%1,1b\n\t"
 185                 "addiu\t%0,%0,1\n\t"
 186                 ".set\treorder"
 187                 :"=r" (dummy),
 188                  "=r" (dummy)
 189                 :"0" ((long) empty_zero_page),
 190                  "1" (PTRS_PER_PAGE));
 191 
 192         return (unsigned long) empty_zero_page;
 193 }
 194 
 195 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int i,free = 0,total = 0,reserved = 0;
 198         int shared = 0;
 199 
 200         printk("Mem-info:\n");
 201         show_free_areas();
 202         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 203         i = high_memory >> PAGE_SHIFT;
 204         while (i-- > 0) {
 205                 total++;
 206                 if (mem_map[i] & MAP_PAGE_RESERVED)
 207                         reserved++;
 208                 else if (!mem_map[i])
 209                         free++;
 210                 else
 211                         shared += mem_map[i]-1;
 212         }
 213         printk("%d pages of RAM\n",total);
 214         printk("%d free pages\n",free);
 215         printk("%d reserved pages\n",reserved);
 216         printk("%d pages shared\n",shared);
 217         show_buffers();
 218 #ifdef CONFIG_NET
 219         show_net_buffers();
 220 #endif
 221 }
 222 
 223 extern unsigned long free_area_init(unsigned long, unsigned long);
 224 
 225 /*
 226  * paging_init() sets up the page tables - note that the first 4MB are
 227  * already mapped by head.S.
 228  *
 229  * This routines also unmaps the page at virtual kernel address 0, so
 230  * that we can trap those pesky NULL-reference errors in the kernel.
 231  */
 232 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         unsigned long * pg_dir;
 235         unsigned long * pg_table;
 236         unsigned long tmp;
 237         unsigned long address;
 238 
 239         start_mem = PAGE_ALIGN(start_mem);
 240         address = 0;
 241         pg_dir = swapper_pg_dir;
 242         while (address < end_mem) {
 243                 tmp = *pg_dir;
 244                 tmp &= PAGE_MASK;
 245                 if (!tmp) {
 246                         tmp = start_mem;
 247                         start_mem += PAGE_SIZE;
 248                 }
 249                 /*
 250                  * also map it in at 0x00000000 for init
 251                  */
 252                 *pg_dir = tmp | PAGE_TABLE;
 253                 pg_dir++;
 254                 pg_table = (unsigned long *) (tmp & PAGE_MASK);
 255                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
 256                         if (address < end_mem)
 257                                 *pg_table = address | PAGE_SHARED;
 258                         else
 259                                 *pg_table = 0;
 260                         address += PAGE_SIZE;
 261                 }
 262         }
 263 #if KERNELBASE == KSEG0
 264         cacheflush();
 265 #endif
 266         invalidate();
 267         return free_area_init(start_mem, end_mem);
 268 }
 269 
 270 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 271 {
 272         int codepages = 0;
 273         int reservedpages = 0;
 274         int datapages = 0;
 275         unsigned long tmp;
 276         extern int etext;
 277 
 278         end_mem &= PAGE_MASK;
 279         high_memory = end_mem;
 280 
 281         /* mark usable pages in the mem_map[] */
 282         start_mem = PAGE_ALIGN(start_mem);
 283 
 284         while (start_mem < high_memory) {
 285                 mem_map[MAP_NR(start_mem)] = 0;
 286                 start_mem += PAGE_SIZE;
 287         }
 288 #ifdef CONFIG_SCSI
 289         scsi_mem_init(high_memory);
 290 #endif
 291 #ifdef CONFIG_SOUND
 292         sound_mem_init();
 293 #endif
 294         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
 295                 if (mem_map[MAP_NR(tmp)]) {
 296                         /*
 297                          * We don't have any reserved pages on the
 298                          * MIPS systems supported until now
 299                          */
 300                         if (0)
 301                                 reservedpages++;
 302                         else if (tmp < ((unsigned long) &etext - KERNELBASE))
 303                                 codepages++;
 304                         else
 305                                 datapages++;
 306                         continue;
 307                 }
 308                 mem_map[MAP_NR(tmp)] = 1;
 309                 free_page(tmp);
 310         }
 311         tmp = nr_free_pages << PAGE_SHIFT;
 312         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 313                 tmp >> 10,
 314                 high_memory >> 10,
 315                 codepages << (PAGE_SHIFT-10),
 316                 reservedpages << (PAGE_SHIFT-10),
 317                 datapages << (PAGE_SHIFT-10));
 318 
 319         invalidate();
 320         return;
 321 }
 322 
 323 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         int i;
 326 
 327         i = high_memory >> PAGE_SHIFT;
 328         val->totalram = 0;
 329         val->sharedram = 0;
 330         val->freeram = nr_free_pages << PAGE_SHIFT;
 331         val->bufferram = buffermem;
 332         while (i-- > 0)  {
 333                 if (mem_map[i] & MAP_PAGE_RESERVED)
 334                         continue;
 335                 val->totalram++;
 336                 if (!mem_map[i])
 337                         continue;
 338                 val->sharedram += mem_map[i]-1;
 339         }
 340         val->totalram <<= PAGE_SHIFT;
 341         val->sharedram <<= PAGE_SHIFT;
 342         return;
 343 }

/* [previous][next][first][last][top][bottom][index][help] */