root/arch/sparc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo

   1 /*
   2  *  linux/arch/sparc/mm/init.c
   3  *
   4  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/segment.h>
  21 #include <asm/vac-ops.h>
  22 #include <asm/page.h>
  23 #include <asm/pgtable.h>
  24 
  25 extern void scsi_mem_init(unsigned long);
  26 extern void sound_mem_init(void);
  27 extern void die_if_kernel(char *,struct pt_regs *,long);
  28 extern void show_net_buffers(void);
  29 
  30 extern int map_the_prom(int);
  31 
  32 extern int invalid_segment, num_segmaps, num_contexts;
  33 
  34 /*
  35  * BAD_PAGE is the page that is used for page faults when linux
  36  * is out-of-memory. Older versions of linux just did a
  37  * do_exit(), but using this instead means there is less risk
  38  * for a process dying in kernel mode, possibly leaving a inode
  39  * unused etc..
  40  *
  41  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  42  * to point to BAD_PAGE entries.
  43  *
  44  * ZERO_PAGE is a special page that is used for zero-initialized
  45  * data and COW.
  46  */
  47 pte_t *__bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
  50         return (pte_t *) EMPTY_PGT;
  51 }
  52 
  53 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  54 {
  55         memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
  56         return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
  57 }
  58 
  59 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         memset((void *) ZERO_PGE, 0, PAGE_SIZE);
  62         return ZERO_PGE;
  63 }
  64 
  65 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  66 {
  67         int i,free = 0,total = 0,reserved = 0;
  68         int shared = 0;
  69 
  70         printk("Mem-info:\n");
  71         show_free_areas();
  72         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  73         i = high_memory >> PAGE_SHIFT;
  74         while (i-- > 0) {
  75                 total++;
  76                 if (mem_map[i] & MAP_PAGE_RESERVED)
  77                         reserved++;
  78                 else if (!mem_map[i])
  79                         free++;
  80                 else
  81                         shared += mem_map[i]-1;
  82         }
  83         printk("%d pages of RAM\n",total);
  84         printk("%d free pages\n",free);
  85         printk("%d reserved pages\n",reserved);
  86         printk("%d pages shared\n",shared);
  87         show_buffers();
  88 #ifdef CONFIG_NET
  89         show_net_buffers();
  90 #endif
  91 }
  92 
  93 extern unsigned long free_area_init(unsigned long, unsigned long);
  94 
  95 /*
  96  * paging_init() sets up the page tables: in the alpha version this actually
  97  * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
  98  *
  99  * The bootup sequence put the virtual page table into high memory: that
 100  * means that we can change the L1 page table by just using VL1p below.
 101  */
 102 
 103 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 104 {
 105         int pg_segmap = 0;
 106         unsigned long i, a, b, mask=0;
 107         register int num_segs, num_ctx;
 108         register char * c;
 109 
 110         num_segs = num_segmaps;
 111         num_ctx = num_contexts;
 112 
 113         num_segs -= 1;
 114         invalid_segment = num_segs;
 115 
 116 /* On the sparc we first need to allocate the segmaps for the
 117  * PROM's virtual space, and make those segmaps unusable. We
 118  * map the PROM in ALL contexts therefore the break key and the
 119  * sync command work no matter what state you took the machine
 120  * out of
 121  */
 122 
 123         printk("mapping the prom...\n");
 124         num_segs = map_the_prom(num_segs);
 125 
 126         start_mem = PAGE_ALIGN(start_mem);
 127 
 128         /* ok, allocate the kernel pages, map them in all contexts
 129          * (with help from the prom), and lock them. Isn't the sparc
 130          * fun kiddies? TODO
 131          */
 132 
 133         b=PGDIR_ALIGN(start_mem)>>18;
 134         c= (char *)0x0;
 135 
 136         printk("mapping kernel in all contexts...\n");
 137 
 138         for(a=0; a<b; a++)
 139           {
 140             for(i=0; i<num_contexts; i++)
 141               {
 142                 /* map the kernel virt_addrs */
 143                 (*(romvec->pv_setctxt))(i, (char *) c, a);
 144               }
 145             c += 0x40000;
 146           }
 147 
 148         /* Ok, since now mapped in all contexts, we can free up
 149          * context zero to be used amongst user processes.
 150          */
 151 
 152         /* free context 0 here TODO */
 153 
 154         /* invalidate all user pages and initialize the pte struct 
 155          * for userland. TODO
 156          */
 157 
 158         /* Make the kernel text unwritable and cacheable, the prom
 159          * loaded our text as writable, only sneaky sunos kernels need
 160          * self-modifying code.
 161          */
 162 
 163         a= (unsigned long) &etext;
 164         mask=~(PTE_NC|PTE_W);    /* make cacheable + not writable */
 165 
 166         printk("changing kernel text perms...\n");
 167 
 168 
 169         /* must do for every segment since kernel uses all contexts
 170          * and unlike some sun kernels I know of, we can't hard wire
 171          * context 0 just for the kernel, that is unnecessary.
 172          */
 173 
 174         for(i=0; i<8; i++)
 175           {
 176             b=PAGE_ALIGN((unsigned long) &trapbase);
 177 
 178             switch_to_context(i);
 179 
 180             for(;b<a; b+=4096)
 181               {
 182                 put_pte(b, (get_pte(b) & mask));
 183               }
 184           }
 185 
 186         invalidate(); /* flush the virtual address cache */
 187 
 188         printk("\nCurrently in context - ");
 189         for(i=0; i<num_contexts; i++)
 190           {
 191             switch_to_context(i);
 192             printk("%d ", (int) i);
 193           }
 194 
 195         switch_to_context(0);
 196 
 197         /* invalidate all user segmaps for virt addrs 0-KERNBASE */
 198 
 199         /* WRONG, now I just let the kernel sit in low addresses only
 200          * from 0 -- end_kernel just like i386-linux. This will make
 201          * mem-code a bit easier to cope with.
 202          */
 203 
 204         printk("\ninvalidating user segmaps\n");
 205         for(i = 0; i<8; i++)
 206           {
 207             switch_to_context(i);
 208             a=((unsigned long) &end);
 209             for(a+=524288, pg_segmap=0; ++pg_segmap<=3584; a+=(1<<18))
 210               put_segmap((unsigned long *) a, (invalid_segment&0x7f));
 211           }
 212 
 213         printk("wheee! have I sold out yet?\n");
 214 
 215         invalidate();
 216         return free_area_init(start_mem, end_mem);
 217 }
 218 
 219 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 {
 221         return;
 222 }
 223 
 224 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 {
 226         int i;
 227 
 228         i = high_memory >> PAGE_SHIFT;
 229         val->totalram = 0;
 230         val->sharedram = 0;
 231         val->freeram = nr_free_pages << PAGE_SHIFT;
 232         val->bufferram = buffermem;
 233         while (i-- > 0)  {
 234                 if (mem_map[i] & MAP_PAGE_RESERVED)
 235                         continue;
 236                 val->totalram++;
 237                 if (!mem_map[i])
 238                         continue;
 239                 val->sharedram += mem_map[i]-1;
 240         }
 241         val->totalram <<= PAGE_SHIFT;
 242         val->sharedram <<= PAGE_SHIFT;
 243         return;
 244 }

/* [previous][next][first][last][top][bottom][index][help] */