root/arch/sparc/mm/fault.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. probe_memory
  2. map_the_prom
  3. do_page_fault
  4. __bad_pagetable
  5. __bad_page
  6. __zero_page
  7. show_mem
  8. paging_init
  9. mem_init
  10. si_meminfo

   1 #include <linux/string.h>
   2 #include <linux/types.h>
   3 #include <linux/ptrace.h>
   4 #include <linux/mman.h>
   5 #include <linux/signal.h>
   6 #include <linux/mm.h>
   7 
   8 #include <asm/system.h>
   9 #include <asm/segment.h>
  10 #include <asm/openprom.h>
  11 #include <asm/page.h>
  12 
  13 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  14 
  15 extern void scsi_mem_init(unsigned long);
  16 extern void sound_mem_init(void);
  17 extern void die_if_kernel(char *,struct pt_regs *,long);
  18 extern void show_net_buffers(void);
  19 
  20 /* Sparc stuff... I know this is a ugly place to put the PROM vector, don't
  21  * remind me.
  22  */
  23 extern char* trapbase;
  24 extern unsigned int end[], etext[], msgbuf[];
  25 struct linux_romvec *romvec;
  26 
  27 /* foo */
  28 
  29 int tbase_needs_unmapping;
  30 
  31 /* At boot time we determine these two values necessary for setting
  32  * up the segment maps and page table entries (pte's).
  33  */
  34 
  35 int num_segmaps, num_contexts;
  36 int invalid_segment;
  37 
  38 /* various Virtual Address Cache parameters we find at boot time... */
  39 
  40 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
  41 int vac_entries_per_context, vac_entries_per_segment;
  42 int vac_entries_per_page;
  43 
  44 /*
  45  * Define this if things work differently on a i386 and a i486:
  46  * it will (on a i486) warn about kernel memory accesses that are
  47  * done without a 'verify_area(VERIFY_WRITE,..)'
  48  */
  49 #undef CONFIG_TEST_VERIFY_AREA
  50 
  51 /* Traverse the memory lists in the prom to see how much physical we
  52  * have.
  53  */
  54 
  55 unsigned long
  56 probe_memory(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  57 {
  58   register struct linux_romvec *lprom;
  59   register struct linux_mlist_v0 *mlist;
  60   register unsigned long bytes, base_paddr;
  61   register int i;
  62 
  63   bytes = 0;
  64   base_paddr = 0;
  65   lprom = romvec;
  66   switch(lprom->pv_romvers)
  67     {
  68     case 0:
  69       mlist=(*(lprom->pv_v0mem.v0_totphys));
  70       bytes=mlist->num_bytes;
  71       base_paddr = (unsigned long) mlist->start_adr;
  72       printk("Bank 1: starting at 0x%x holding %d bytes\n", 
  73              (unsigned int) base_paddr, (int) bytes);
  74       i=1;
  75       if(mlist->theres_more != (void *)0)
  76         {
  77           i++;
  78           mlist=mlist->theres_more;
  79           bytes+=mlist->num_bytes;
  80           printk("Bank %d: starting at 0x%x holding %d bytes\n", i,
  81                  (unsigned int) mlist->start_adr, (int) mlist->num_bytes);
  82         }
  83       break;
  84     case 2:
  85       printk("no v2 memory probe support yet.\n");
  86       (*(lprom->pv_halt))();
  87       break;
  88     }
  89   printk("Physical memory: %d bytes  starting at va 0x%x\n",
  90          (unsigned int) bytes, (int) base_paddr);
  91 
  92   return bytes;
  93 }
  94 
  95 /* Sparc routine to reserve the mapping of the open boot prom */
  96 
  97 /* uncomment this for FAME and FORTUNE! */
  98 /* #define DEBUG_MAP_PROM */
  99 
 100 int
 101 map_the_prom(int curr_num_segs)
     /* [previous][next][first][last][top][bottom][index][help] */
 102 {
 103   register unsigned long prom_va_begin;
 104   register unsigned long prom_va_end;
 105   register int segmap_entry, i;
 106 
 107   prom_va_begin = LINUX_OPPROM_BEGVM;
 108   prom_va_end   = LINUX_OPPROM_ENDVM;
 109 
 110 #ifdef DEBUG_MAP_PROM
 111   printk("\ncurr_num_segs = 0x%x\n", curr_num_segs);
 112 #endif
 113 
 114   while( prom_va_begin < prom_va_end)
 115     {
 116       segmap_entry=get_segmap(prom_va_begin);
 117 
 118       curr_num_segs = ((segmap_entry<curr_num_segs) 
 119                        ? segmap_entry : curr_num_segs);
 120 
 121       for(i = num_contexts; --i > 0;)
 122           (*romvec->pv_setctxt)(i, (char *) prom_va_begin,
 123                                 segmap_entry);
 124 
 125       if(segmap_entry == invalid_segment)
 126         {
 127 
 128 #ifdef DEBUG_MAP_PROM
 129           printk("invalid_segments, virt_addr 0x%x\n", prom_va_begin);
 130 #endif
 131 
 132           prom_va_begin += 0x40000;  /* num bytes per segment entry */
 133           continue;
 134         }
 135 
 136       /* DUH, prom maps itself so that users can access it. This is
 137        * broken.
 138        */
 139 
 140 #ifdef DEBUG_MAP_PROM
 141       printk("making segmap for prom privileged, va = 0x%x\n",
 142              prom_va_begin);
 143 #endif
 144 
 145       for(i = 0x40; --i >= 0; prom_va_begin+=4096)
 146         {
 147           put_pte(prom_va_begin, get_pte(prom_va_begin) | 0x20000000);
 148         }
 149 
 150     }
 151 
 152   printk("Mapped the PROM in all contexts...\n");
 153 
 154 #ifdef DEBUG_MAP_PROM
 155   printk("curr_num_segs = 0x%x\n", curr_num_segs);
 156 #endif
 157 
 158   return curr_num_segs;
 159 
 160 }
 161 
 162 /*
 163  * This routine handles page faults.  It determines the address,
 164  * and the problem, and then passes it off to one of the appropriate
 165  * routines.
 166  */
 167 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 168 {
 169         die_if_kernel("Oops", regs, error_code);
 170         do_exit(SIGKILL);
 171 }
 172 
 173 /*
 174  * BAD_PAGE is the page that is used for page faults when linux
 175  * is out-of-memory. Older versions of linux just did a
 176  * do_exit(), but using this instead means there is less risk
 177  * for a process dying in kernel mode, possibly leaving a inode
 178  * unused etc..
 179  *
 180  * BAD_PAGETABLE is the accompanying page-table: it is initialized
 181  * to point to BAD_PAGE entries.
 182  *
 183  * ZERO_PAGE is a special page that is used for zero-initialized
 184  * data and COW.
 185  */
 186 unsigned long __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         extern char empty_bad_page_table[PAGE_SIZE];
 189 
 190         return (unsigned long) empty_bad_page_table;
 191 }
 192 
 193 unsigned long __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         extern char empty_bad_page[PAGE_SIZE];
 196 
 197         return (unsigned long) empty_bad_page;
 198 }
 199 
 200 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         extern char empty_zero_page[PAGE_SIZE];
 203 
 204         return (unsigned long) empty_zero_page;
 205 }
 206 
 207 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 208 {
 209         int i=0,free = 0,total = 0,reserved = 0;
 210         int shared = 0;
 211 
 212         printk("Mem-info:\n");
 213         show_free_areas();
 214         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 215         i = high_memory >> PAGE_SHIFT;
 216         while (i-- > 0) {
 217                 total++;
 218                 if (mem_map[i] & MAP_PAGE_RESERVED)
 219                         reserved++;
 220                 else if (!mem_map[i])
 221                         free++;
 222                 else
 223                         shared += mem_map[i]-1;
 224         }
 225         printk("%d pages of RAM\n",total);
 226         printk("%d free pages\n",free);
 227         printk("%d reserved pages\n",reserved);
 228         printk("%d pages shared\n",shared);
 229         show_buffers();
 230 #ifdef CONFIG_NET
 231         show_net_buffers();
 232 #endif
 233 }
 234 
 235 extern unsigned long free_area_init(unsigned long, unsigned long);
 236 
 237 /*
 238  * paging_init() sets up the page tables - note that the first 4MB are
 239  * already mapped by head.S.
 240  *
 241  * This routines also unmaps the page at virtual kernel address 0, so
 242  * that we can trap those pesky NULL-reference errors in the kernel.
 243  */
 244 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 245 {
 246         int pg_segmap;
 247         unsigned long i, a, b, mask=0;
 248         register int num_segs, num_ctx;
 249         register char * c;
 250 
 251         num_segs = num_segmaps;
 252         num_ctx = num_contexts;
 253 
 254         num_segs -= 1;
 255         invalid_segment = num_segs;
 256 
 257 /* On the sparc we first need to allocate the segmaps for the
 258  * PROM's virtual space, and make those segmaps unusable. We
 259  * map the PROM in ALL contexts therefore the break key and the
 260  * sync command work no matter what state you took the machine
 261  * out of
 262  */
 263 
 264         printk("mapping the prom...\n");
 265         num_segs = map_the_prom(num_segs);
 266 
 267         start_mem = PAGE_ALIGN(start_mem);
 268 
 269         /* ok, allocate the kernel pages, map them in all contexts
 270          * (with help from the prom), and lock them. Isn't the sparc
 271          * fun kiddies? TODO
 272          */
 273 
 274         b=PGDIR_ALIGN(start_mem)>>18;
 275         c= (char *)0x0;
 276 
 277         printk("mapping kernel in all contexts...\n");
 278 
 279         for(a=0; a<b; a++)
 280           {
 281             for(i=1; i<num_contexts; i++)
 282               {
 283                 /* map the kernel virt_addrs */
 284                 (*(romvec->pv_setctxt))(i, (char *) c, a);
 285                 c += 4096;
 286               }
 287           }
 288 
 289         /* Ok, since now mapped in all contexts, we can free up
 290          * context zero to be used amongst user processes.
 291          */
 292 
 293         /* free context 0 here TODO */
 294 
 295         /* invalidate all user pages and initialize the pte struct 
 296          * for userland. TODO
 297          */
 298 
 299         /* Make the kernel text unwritable and cacheable, the prom
 300          * loaded out text as writable, only sneaky sunos kernels need
 301          * self-modifying code.
 302          */
 303 
 304         a= (unsigned long) etext;
 305         b=PAGE_ALIGN((unsigned long) msgbuf);
 306         mask=~(PTE_NC|PTE_W);    /* make cacheable + not writable */
 307 
 308         printk("changing kernel text perms...\n");
 309 
 310 
 311         /* must do for every segment since kernel uses all contexts
 312          * and unlike some sun kernels I know of, we can't hard wire
 313          * context 0 just for the kernel, that is unnecessary.
 314          */
 315 
 316         for(i=0; i<8; i++)
 317           {
 318             b=PAGE_ALIGN((unsigned long) trapbase);
 319 
 320             switch_to_context(i);
 321 
 322             for(;b<a; b+=4096)
 323               {
 324                 put_pte(b, (get_pte(b) & mask));
 325               }
 326           }
 327 
 328         invalidate(); /* flush the virtual address cache */
 329 
 330         printk("\nCurrently in context - ");
 331         for(i=0; i<num_contexts; i++)
 332           {
 333             switch_to_context(i);
 334             printk("%d ", (int) i);
 335           }
 336 
 337         switch_to_context(0);
 338 
 339         /* invalidate all user segmaps for virt addrs 0-KERNBASE */
 340 
 341         /* WRONG, now I just let the kernel sit in low addresses only
 342          * from 0 -- end_kernel just like i386-linux. This will make
 343          * mem-code a bit easier to cope with.
 344          */
 345 
 346         printk("\ninvalidating user segmaps\n");
 347         for(i = 0; i<8; i++)
 348           {
 349             switch_to_context(i);
 350             a=((unsigned long) &end);
 351             for(a+=524288, pg_segmap=0; ++pg_segmap<=3584; a+=(1<<18))
 352               put_segmap((unsigned long *) a, (invalid_segment&0x7f));
 353           }
 354 
 355         printk("wheee! have I sold out yet?\n");
 356 
 357         invalidate();
 358         return free_area_init(start_mem, end_mem);
 359 }
 360 
 361 void mem_init(unsigned long start_low_mem,
     /* [previous][next][first][last][top][bottom][index][help] */
 362               unsigned long start_mem, unsigned long end_mem)
 363 {
 364         return;
 365 }
 366 
 367 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         int i;
 370 
 371         i = high_memory >> PAGE_SHIFT;
 372         val->totalram = 0;
 373         val->sharedram = 0;
 374         val->freeram = nr_free_pages << PAGE_SHIFT;
 375         val->bufferram = buffermem;
 376         while (i-- > 0)  {
 377                 if (mem_map[i] & MAP_PAGE_RESERVED)
 378                         continue;
 379                 val->totalram++;
 380                 if (!mem_map[i])
 381                         continue;
 382                 val->sharedram += mem_map[i]-1;
 383         }
 384         val->totalram <<= PAGE_SHIFT;
 385         val->sharedram <<= PAGE_SHIFT;
 386         return;
 387 }

/* [previous][next][first][last][top][bottom][index][help] */