root/arch/sparc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo

   1 /*
   2  *  linux/arch/sparc/mm/init.c
   3  *
   4  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 
  19 #include <asm/system.h>
  20 #include <asm/segment.h>
  21 #include <asm/vac-ops.h>
  22 #include <asm/page.h>
  23 #include <asm/pgtable.h>
  24 
  25 extern void scsi_mem_init(unsigned long);
  26 extern void sound_mem_init(void);
  27 extern void die_if_kernel(char *,struct pt_regs *,long);
  28 extern void show_net_buffers(void);
  29 
  30 extern int map_the_prom(int);
  31 
  32 struct sparc_phys_banks sp_banks[14];
  33 unsigned long *sun4c_mmu_table;
  34 extern int invalid_segment, num_segmaps, num_contexts;
  35 
  36 /*
  37  * BAD_PAGE is the page that is used for page faults when linux
  38  * is out-of-memory. Older versions of linux just did a
  39  * do_exit(), but using this instead means there is less risk
  40  * for a process dying in kernel mode, possibly leaving a inode
  41  * unused etc..
  42  *
  43  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  44  * to point to BAD_PAGE entries.
  45  *
  46  * ZERO_PAGE is a special page that is used for zero-initialized
  47  * data and COW.
  48  */
  49 pte_t *__bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  50 {
  51         memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
  52         return (pte_t *) EMPTY_PGT;
  53 }
  54 
  55 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  56 {
  57         memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
  58         return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
  59 }
  60 
  61 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  62 {
  63         memset((void *) ZERO_PGE, 0, PAGE_SIZE);
  64         return ZERO_PGE;
  65 }
  66 
  67 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  68 {
  69         int i,free = 0,total = 0,reserved = 0;
  70         int shared = 0;
  71 
  72         printk("Mem-info:\n");
  73         show_free_areas();
  74         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  75         i = high_memory >> PAGE_SHIFT;
  76         while (i-- > 0) {
  77                 total++;
  78                 if (mem_map[i] & MAP_PAGE_RESERVED)
  79                         reserved++;
  80                 else if (!mem_map[i])
  81                         free++;
  82                 else
  83                         shared += mem_map[i]-1;
  84         }
  85         printk("%d pages of RAM\n",total);
  86         printk("%d free pages\n",free);
  87         printk("%d reserved pages\n",reserved);
  88         printk("%d pages shared\n",shared);
  89         show_buffers();
  90 #ifdef CONFIG_NET
  91         show_net_buffers();
  92 #endif
  93 }
  94 
  95 extern unsigned long free_area_init(unsigned long, unsigned long);
  96 
  97 /*
  98  * paging_init() sets up the page tables: in the alpha version this actually
  99  * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
 100  *
 101  * The bootup sequence put the virtual page table into high memory: that
 102  * means that we can change the L1 page table by just using VL1p below.
 103  */
 104 
 105 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 106 {
 107         unsigned long i, a, b, mask=0;
 108         unsigned long curseg, curpte, num_inval;
 109         unsigned long address;
 110         pte_t *pg_table;
 111 
 112         register int num_segs, num_ctx;
 113         register char * c;
 114 
 115         num_segs = num_segmaps;
 116         num_ctx = num_contexts;
 117 
 118         num_segs -= 1;
 119         invalid_segment = num_segs;
 120 
 121         start_mem = free_area_init(start_mem, end_mem);
 122 
 123 /* On the sparc we first need to allocate the segmaps for the
 124  * PROM's virtual space, and make those segmaps unusable. We
 125  * map the PROM in ALL contexts therefore the break key and the
 126  * sync command work no matter what state you took the machine
 127  * out of
 128  */
 129 
 130         printk("mapping the prom...\n");
 131         num_segs = map_the_prom(num_segs);
 132 
 133         start_mem = PAGE_ALIGN(start_mem);
 134 
 135         /* Set up static page tables in kernel space, this will be used
 136          * so that the low-level page fault handler can fill in missing
 137          * TLB entries since all mmu entries cannot be loaded at once
 138          * on the sun4c.
 139          */
 140 
 141 #if 0
 142         /* ugly debugging code */
 143         for(i=0; i<40960; i+=PAGE_SIZE)
 144           printk("address=0x%x  vseg=%d  pte=0x%x\n", (unsigned int) i,
 145                  (int) get_segmap(i), (unsigned int) get_pte(i));
 146 #endif
 147 
 148         printk("Setting up kernel static mmu table... bounce bounce\n");
 149 
 150         address = 0; /* ((unsigned long) &end) + 524288; */
 151         sun4c_mmu_table = (unsigned long *) start_mem;
 152         pg_table = (pte_t *) start_mem;
 153         curseg = curpte = num_inval = 0;
 154         while(address < end_mem) {
 155           if(curpte == 0)
 156             put_segmap((address&PGDIR_MASK), curseg);
 157           for(i=0; sp_banks[i].num_bytes != 0; i++)
 158             if((address >= sp_banks[i].base_addr) && 
 159                (address <= (sp_banks[i].base_addr + sp_banks[i].num_bytes)))
 160               goto good_address;
 161           /* No physical memory here, so set the virtual segment to
 162            * the invalid one, and put an invalid pte in the static
 163            * kernel table.
 164            */
 165           *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_INVALID);
 166           pg_table++; curpte++; num_inval++;
 167           if(curpte > 63) {
 168             if(curpte == num_inval) {
 169               put_segmap((address&PGDIR_MASK), invalid_segment);
 170             } else {
 171               put_segmap((address&PGDIR_MASK), curseg);
 172               curseg++;
 173             }
 174             curpte = num_inval = 0;
 175           }
 176           address += PAGE_SIZE;
 177           continue;
 178 
 179           good_address:
 180           /* create pte entry */
 181           if(address < (((unsigned long) &end) + 524288)) {
 182             pte_val(*pg_table) = get_pte(address);
 183           } else {
 184             *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_KERNEL);
 185             put_pte(address, pte_val(*pg_table));
 186           }
 187 
 188           pg_table++; curpte++;
 189           if(curpte > 63) {
 190             put_segmap((address&PGDIR_MASK), curseg);
 191             curpte = num_inval = 0;
 192             curseg++;
 193           }
 194           address += PAGE_SIZE;
 195         }         
 196 
 197         start_mem = (unsigned long) pg_table;
 198         /* ok, allocate the kernel pages, map them in all contexts
 199          * (with help from the prom), and lock them. Isn't the sparc
 200          * fun kiddies? TODO
 201          */
 202 
 203 #if 0
 204         /* ugly debugging code */
 205         for(i=0x1a3000; i<(0x1a3000+40960); i+=PAGE_SIZE)
 206           printk("address=0x%x  vseg=%d  pte=0x%x\n", (unsigned int) i,
 207                  (int) get_segmap(i), (unsigned int) get_pte(i));
 208         halt();
 209 #endif
 210 
 211         b=PGDIR_ALIGN(start_mem)>>18;
 212         c= (char *)0x0;
 213 
 214         printk("mapping kernel in all contexts...\n");
 215 
 216         for(a=0; a<b; a++)
 217           {
 218             for(i=0; i<num_contexts; i++)
 219               {
 220                 /* map the kernel virt_addrs */
 221                 (*(romvec->pv_setctxt))(i, (char *) c, a);
 222               }
 223             c += 0x40000;
 224           }
 225 
 226         /* Ok, since now mapped in all contexts, we can free up
 227          * context zero to be used amongst user processes.
 228          */
 229 
 230         /* free context 0 here TODO */
 231 
 232         /* invalidate all user pages and initialize the pte struct 
 233          * for userland. TODO
 234          */
 235 
 236         /* Make the kernel text unwritable and cacheable, the prom
 237          * loaded our text as writable, only sneaky sunos kernels need
 238          * self-modifying code.
 239          */
 240 
 241         a= (unsigned long) &etext;
 242         mask=~(PTE_NC|PTE_W);    /* make cacheable + not writable */
 243 
 244         /* must do for every segment since kernel uses all contexts
 245          * and unlike some sun kernels I know of, we can't hard wire
 246          * context 0 just for the kernel, that is unnecessary.
 247          */
 248 
 249         for(i=0; i<8; i++)
 250           {
 251             b=PAGE_ALIGN((unsigned long) &trapbase);
 252 
 253             switch_to_context(i);
 254 
 255             for(;b<a; b+=4096)
 256               {
 257                 put_pte(b, (get_pte(b) & mask));
 258               }
 259           }
 260 
 261         invalidate(); /* flush the virtual address cache */
 262 
 263         printk("\nCurrently in context - ");
 264         for(i=0; i<num_contexts; i++)
 265           {
 266             switch_to_context(i);
 267             printk("%d ", (int) i);
 268           }
 269         printk("\n");
 270 
 271         switch_to_context(0);
 272 
 273         invalidate();
 274         return start_mem;
 275 }
 276 
 277 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279   unsigned long start_low_mem = PAGE_SIZE;
 280   int codepages = 0;
 281   int reservedpages = 0;
 282   int datapages = 0;
 283   int i = 0;
 284   unsigned long tmp, limit, tmp2, addr;
 285   extern char etext;
 286 
 287   end_mem &= PAGE_MASK;
 288   high_memory = end_mem;
 289 
 290   start_low_mem = PAGE_ALIGN(start_low_mem);
 291   start_mem = PAGE_ALIGN(start_mem);
 292 
 293   for(i = 0; sp_banks[i].num_bytes != 0; i++) {
 294     tmp = sp_banks[i].base_addr;
 295     limit = (sp_banks[i].base_addr + sp_banks[i].num_bytes);
 296     if(tmp<start_mem) {
 297       if(limit>start_mem)
 298         tmp = start_mem;
 299       else continue;
 300     }
 301 
 302     while(tmp<limit) {
 303       mem_map[MAP_NR(tmp)] = 0;
 304       tmp += PAGE_SIZE;
 305     }
 306     if(sp_banks[i+1].num_bytes != 0)
 307       while(tmp < sp_banks[i+1].base_addr) {
 308         mem_map[MAP_NR(tmp)] = MAP_PAGE_RESERVED;
 309         tmp += PAGE_SIZE;
 310       }
 311   }
 312 
 313 #ifdef CONFIG_SCSI
 314   scsi_mem_init(high_memory);
 315 #endif
 316 
 317   for (addr = 0; addr < high_memory; addr += PAGE_SIZE) {
 318     if(mem_map[MAP_NR(addr)]) {
 319       if (addr < (unsigned long) &etext)
 320         codepages++;
 321       else if(addr < start_mem)
 322         datapages++;
 323       else
 324         reservedpages++;
 325       continue;
 326     }
 327     mem_map[MAP_NR(addr)] = 1;
 328     free_page(addr);
 329   }
 330 
 331   tmp2 = nr_free_pages << PAGE_SHIFT;
 332 
 333   printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 334          tmp2 >> 10,
 335          high_memory >> 10,
 336          codepages << (PAGE_SHIFT-10),
 337          reservedpages << (PAGE_SHIFT-10),
 338          datapages << (PAGE_SHIFT-10));
 339 
 340   invalidate();
 341   return;
 342 }
 343 
 344 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346         int i;
 347 
 348         i = high_memory >> PAGE_SHIFT;
 349         val->totalram = 0;
 350         val->sharedram = 0;
 351         val->freeram = nr_free_pages << PAGE_SHIFT;
 352         val->bufferram = buffermem;
 353         while (i-- > 0)  {
 354                 if (mem_map[i] & MAP_PAGE_RESERVED)
 355                         continue;
 356                 val->totalram++;
 357                 if (!mem_map[i])
 358                         continue;
 359                 val->sharedram += mem_map[i]-1;
 360         }
 361         val->totalram <<= PAGE_SHIFT;
 362         val->sharedram <<= PAGE_SHIFT;
 363         return;
 364 }

/* [previous][next][first][last][top][bottom][index][help] */