root/arch/sparc/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. show_mem
  4. sparc_context_init
  5. paging_init
  6. mem_init
  7. si_meminfo

   1 /*  $Id: init.c,v 1.33 1996/03/01 07:16:20 davem Exp $
   2  *  linux/arch/sparc/mm/init.c
   3  *
   4  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 #include <linux/swap.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/segment.h>
  22 #include <asm/vac-ops.h>
  23 #include <asm/page.h>
  24 #include <asm/pgtable.h>
  25 #include <asm/vaddrs.h>
  26 
  27 extern void show_net_buffers(void);
  28 
  29 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
  30 
  31 /*
  32  * BAD_PAGE is the page that is used for page faults when linux
  33  * is out-of-memory. Older versions of linux just did a
  34  * do_exit(), but using this instead means there is less risk
  35  * for a process dying in kernel mode, possibly leaving a inode
  36  * unused etc..
  37  *
  38  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  39  * to point to BAD_PAGE entries.
  40  *
  41  * ZERO_PAGE is a special page that is used for zero-initialized
  42  * data and COW.
  43  */
  44 pte_t *__bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46         memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
  47         return (pte_t *) EMPTY_PGT;
  48 }
  49 
  50 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  51 {
  52         memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
  53         return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
  54 }
  55 
  56 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  57 {
  58         int i,free = 0,total = 0,reserved = 0;
  59         int shared = 0;
  60 
  61         printk("\nMem-info:\n");
  62         show_free_areas();
  63         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  64         i = MAP_NR(high_memory);
  65         while (i-- > 0) {
  66                 total++;
  67                 if (mem_map[i].reserved)
  68                         reserved++;
  69                 else if (!mem_map[i].count)
  70                         free++;
  71                 else
  72                         shared += mem_map[i].count-1;
  73         }
  74         printk("%d pages of RAM\n",total);
  75         printk("%d free pages\n",free);
  76         printk("%d reserved pages\n",reserved);
  77         printk("%d pages shared\n",shared);
  78         show_buffers();
  79 #ifdef CONFIG_NET
  80         show_net_buffers();
  81 #endif
  82 }
  83 
  84 extern pgprot_t protection_map[16];
  85 
  86 unsigned long sparc_context_init(unsigned long start_mem, int numctx)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         int ctx;
  89 
  90         ctx_list_pool = (struct ctx_list *) start_mem;
  91         start_mem += (numctx * sizeof(struct ctx_list));
  92         for(ctx = 0; ctx < numctx; ctx++) {
  93                 struct ctx_list *clist;
  94 
  95                 clist = (ctx_list_pool + ctx);
  96                 clist->ctx_number = ctx;
  97                 clist->ctx_mm = 0;
  98         }
  99         ctx_free.next = ctx_free.prev = &ctx_free;
 100         ctx_used.next = ctx_used.prev = &ctx_used;
 101         for(ctx = 0; ctx < numctx; ctx++)
 102                 add_to_free_ctxlist(ctx_list_pool + ctx);
 103         return start_mem;
 104 }
 105 
 106 /*
 107  * paging_init() sets up the page tables: We call the MMU specific
 108  * init routine based upon the Sun model type on the Sparc.
 109  *
 110  */
 111 extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
 112 extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
 113 extern unsigned long device_scan(unsigned long);
 114 
 115 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 116 {
 117         switch(sparc_cpu_model) {
 118         case sun4c:
 119         case sun4e:
 120                 start_mem = sun4c_paging_init(start_mem, end_mem);
 121                 break;
 122         case sun4m:
 123         case sun4d:
 124                 start_mem = srmmu_paging_init(start_mem, end_mem);
 125                 break;
 126         default:
 127                 prom_printf("paging_init: Cannot init paging on this Sparc\n");
 128                 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
 129                 prom_printf("paging_init: Halting...\n");
 130                 prom_halt();
 131         };
 132 
 133         /* Initialize the protection map with non-constant values
 134          * MMU dependant values.
 135          */
 136         protection_map[0] = PAGE_NONE;
 137         protection_map[1] = PAGE_READONLY;
 138         protection_map[2] = PAGE_COPY;
 139         protection_map[3] = PAGE_COPY;
 140         protection_map[4] = PAGE_READONLY;
 141         protection_map[5] = PAGE_READONLY;
 142         protection_map[6] = PAGE_COPY;
 143         protection_map[7] = PAGE_COPY;
 144         protection_map[8] = PAGE_NONE;
 145         protection_map[9] = PAGE_READONLY;
 146         protection_map[10] = PAGE_SHARED;
 147         protection_map[11] = PAGE_SHARED;
 148         protection_map[12] = PAGE_READONLY;
 149         protection_map[13] = PAGE_READONLY;
 150         protection_map[14] = PAGE_SHARED;
 151         protection_map[15] = PAGE_SHARED;
 152         return device_scan(start_mem);
 153 }
 154 
 155 extern void sun4c_test_wp(void);
 156 extern void srmmu_test_wp(void);
 157 
 158 struct cache_palias *sparc_aliases;
 159 
 160 extern int min_free_pages;
 161 extern int free_pages_low;
 162 extern int free_pages_high;
 163 
 164 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166         int codepages = 0;
 167         int datapages = 0;
 168         unsigned long tmp2, addr;
 169         extern char etext;
 170 
 171         /* Saves us work later. */
 172         memset((void *) ZERO_PAGE, 0, PAGE_SIZE);
 173 
 174         end_mem &= PAGE_MASK;
 175         high_memory = end_mem;
 176 
 177         start_mem = PAGE_ALIGN(start_mem);
 178 
 179         addr = PAGE_OFFSET;
 180         while(addr < start_mem) {
 181                 mem_map[MAP_NR(addr)].reserved = 1;
 182                 addr += PAGE_SIZE;
 183         }
 184 
 185         for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
 186                 for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
 187                         unsigned long phys_addr = (addr - PAGE_OFFSET);
 188                         unsigned long base = sp_banks[tmp2].base_addr;
 189                         unsigned long limit = base + sp_banks[tmp2].num_bytes;
 190 
 191                         if((phys_addr >= base) && (phys_addr < limit) &&
 192                            ((phys_addr + PAGE_SIZE) < limit))
 193                                 mem_map[MAP_NR(addr)].reserved = 0;
 194                 }
 195         }
 196         for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
 197                 if(mem_map[MAP_NR(addr)].reserved) {
 198                         if (addr < (unsigned long) &etext)
 199                                 codepages++;
 200                         else if(addr < start_mem)
 201                                 datapages++;
 202                         continue;
 203                 }
 204                 mem_map[MAP_NR(addr)].count = 1;
 205                 free_page(addr);
 206         }
 207 
 208         tmp2 = nr_free_pages << PAGE_SHIFT;
 209 
 210         printk("Memory: %luk available (%dk kernel code, %dk data)\n",
 211                tmp2 >> 10,
 212                codepages << (PAGE_SHIFT-10),
 213                datapages << (PAGE_SHIFT-10));
 214 
 215         min_free_pages = nr_free_pages >> 7;
 216         if(min_free_pages < 16)
 217                 min_free_pages = 16;
 218         free_pages_low = min_free_pages + (min_free_pages >> 1);
 219         free_pages_high = min_free_pages + min_free_pages;
 220 
 221         switch(sparc_cpu_model) {
 222         case sun4c:
 223         case sun4e:
 224                 sun4c_test_wp();
 225                 break;
 226         case sun4m:
 227         case sun4d:
 228                 srmmu_test_wp();
 229                 break;
 230         default:
 231                 printk("mem_init: Could not test WP bit on this machine.\n");
 232                 printk("mem_init: sparc_cpu_model = %d\n", sparc_cpu_model);
 233                 printk("mem_init: Halting...\n");
 234                 panic("mem_init()");
 235         };
 236 }
 237 
 238 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 239 {
 240         int i;
 241 
 242         i = MAP_NR(high_memory);
 243         val->totalram = 0;
 244         val->sharedram = 0;
 245         val->freeram = nr_free_pages << PAGE_SHIFT;
 246         val->bufferram = buffermem;
 247         while (i-- > 0)  {
 248                 if (mem_map[i].reserved)
 249                         continue;
 250                 val->totalram++;
 251                 if (!mem_map[i].count)
 252                         continue;
 253                 val->sharedram += mem_map[i].count-1;
 254         }
 255         val->totalram <<= PAGE_SHIFT;
 256         val->sharedram <<= PAGE_SHIFT;
 257 }

/* [previous][next][first][last][top][bottom][index][help] */