root/arch/mips/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. __zero_page
  4. show_mem
  5. paging_init
  6. mem_init
  7. si_meminfo

   1 /*
   2  *  arch/mips/mm/init.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to MIPS by Ralf Baechle
   6  */
   7 
   8 #include <linux/config.h>
   9 #include <linux/signal.h>
  10 #include <linux/sched.h>
  11 #include <linux/head.h>
  12 #include <linux/kernel.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/types.h>
  16 #include <linux/ptrace.h>
  17 #include <linux/mman.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/segment.h>
  22 #include <asm/mipsconfig.h>
  23 
  24 extern unsigned long pg0[1024];         /* page table for 0-4MB for everybody */
  25 
  26 extern void deskstation_tyne_dma_init(void);
  27 extern void scsi_mem_init(unsigned long);
  28 extern void sound_mem_init(void);
  29 extern void die_if_kernel(char *,struct pt_regs *,long);
  30 extern void show_net_buffers(void);
  31 
  32 /*
  33  * BAD_PAGE is the page that is used for page faults when linux
  34  * is out-of-memory. Older versions of linux just did a
  35  * do_exit(), but using this instead means there is less risk
  36  * for a process dying in kernel mode, possibly leaving a inode
  37  * unused etc..
  38  *
  39  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  40  * to point to BAD_PAGE entries.
  41  *
  42  * ZERO_PAGE is a special page that is used for zero-initialized
  43  * data and COW.
  44  */
  45 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  46 {
  47         extern char empty_bad_page_table[PAGE_SIZE];
  48         unsigned long dummy;
  49 
  50         __asm__ __volatile__(
  51                 ".set\tnoreorder\n\t"
  52                 "1:\tsw\t%2,(%0)\n\t"
  53                 "subu\t%1,%1,1\n\t"
  54                 "bne\t$0,%1,1b\n\t"
  55                 "addiu\t%0,%0,1\n\t"
  56                 ".set\treorder"
  57                 :"=r" (dummy),
  58                  "=r" (dummy)
  59                 :"r" (pte_val(BAD_PAGE)),
  60                  "0" ((long) empty_bad_page_table),
  61                  "1" (PTRS_PER_PAGE));
  62 
  63         return (pte_t *) empty_bad_page_table;
  64 }
  65 
  66 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  67 {
  68         extern char empty_bad_page[PAGE_SIZE];
  69         unsigned long dummy;
  70 
  71         __asm__ __volatile__(
  72                 ".set\tnoreorder\n\t"
  73                 "1:\tsw\t$0,(%0)\n\t"
  74                 "subu\t%1,%1,1\n\t"
  75                 "bne\t$0,%1,1b\n\t"
  76                 "addiu\t%0,%0,1\n\t"
  77                 ".set\treorder"
  78                 :"=r" (dummy),
  79                  "=r" (dummy)
  80                 :"0" ((long) empty_bad_page),
  81                  "1" (PTRS_PER_PAGE));
  82 
  83         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
  84 }
  85 
  86 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         extern char empty_zero_page[PAGE_SIZE];
  89         unsigned long dummy;
  90 
  91         __asm__ __volatile__(
  92                 ".set\tnoreorder\n\t"
  93                 "1:\tsw\t$0,(%0)\n\t"
  94                 "subu\t%1,%1,1\n\t"
  95                 "bne\t$0,%1,1b\n\t"
  96                 "addiu\t%0,%0,1\n\t"
  97                 ".set\treorder"
  98                 :"=r" (dummy),
  99                  "=r" (dummy)
 100                 :"0" ((long) empty_zero_page),
 101                  "1" (PTRS_PER_PAGE));
 102 
 103         return (unsigned long) empty_zero_page;
 104 }
 105 
 106 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 107 {
 108         int i,free = 0,total = 0,reserved = 0;
 109         int shared = 0;
 110 
 111         printk("Mem-info:\n");
 112         show_free_areas();
 113         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 114         i = high_memory >> PAGE_SHIFT;
 115         while (i-- > 0) {
 116                 total++;
 117                 if (mem_map[i] & MAP_PAGE_RESERVED)
 118                         reserved++;
 119                 else if (!mem_map[i])
 120                         free++;
 121                 else
 122                         shared += mem_map[i]-1;
 123         }
 124         printk("%d pages of RAM\n",total);
 125         printk("%d free pages\n",free);
 126         printk("%d reserved pages\n",reserved);
 127         printk("%d pages shared\n",shared);
 128         show_buffers();
 129 #ifdef CONFIG_NET
 130         show_net_buffers();
 131 #endif
 132 }
 133 
 134 extern unsigned long free_area_init(unsigned long, unsigned long);
 135 
 136 /*
 137  * paging_init() sets up the page tables - note that the first 4MB are
 138  * already mapped by head.S.
 139  *
 140  * This routines also unmaps the page at virtual kernel address 0, so
 141  * that we can trap those pesky NULL-reference errors in the kernel.
 142  */
 143 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         pgd_t * pg_dir;
 146         pte_t * pg_table;
 147         unsigned long tmp;
 148         unsigned long address;
 149 
 150         start_mem = PAGE_ALIGN(start_mem);
 151         address = 0;
 152         pg_dir = swapper_pg_dir;
 153         while (address < end_mem) {
 154                 if (pgd_none(pg_dir[0])) {
 155                         pgd_set(pg_dir, (pte_t *) start_mem);
 156                         start_mem += PAGE_SIZE;
 157                 }
 158                 /*
 159                  * also map it in at 0x00000000 for init
 160                  */
 161                 pg_table = (pte_t *) pgd_page(pg_dir[0]);
 162                 pgd_set(pg_dir, pg_table);
 163                 pg_dir++;
 164                 for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
 165                         if (address < end_mem)
 166                                 *pg_table = mk_pte(address, PAGE_SHARED);
 167                         else
 168                                 pte_clear(pg_table);
 169                         address += PAGE_SIZE;
 170                 }
 171         }
 172 #if KERNELBASE == KSEG0
 173         cacheflush();
 174 #endif
 175         invalidate();
 176         return free_area_init(start_mem, end_mem);
 177 }
 178 
 179 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         int codepages = 0;
 182         int reservedpages = 0;
 183         int datapages = 0;
 184         unsigned long tmp;
 185         extern int etext;
 186 
 187         end_mem &= PAGE_MASK;
 188         high_memory = end_mem;
 189 
 190         /* mark usable pages in the mem_map[] */
 191         start_mem = PAGE_ALIGN(start_mem);
 192 
 193         while (start_mem < high_memory) {
 194                 mem_map[MAP_NR(start_mem)] = 0;
 195                 start_mem += PAGE_SIZE;
 196         }
 197 #ifdef CONFIG_DESKSTATION_TYNE
 198         deskstation_tyne_dma_init();
 199 #endif
 200 #ifdef CONFIG_SCSI
 201         scsi_mem_init(high_memory);
 202 #endif
 203 #ifdef CONFIG_SOUND
 204         sound_mem_init();
 205 #endif
 206         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
 207                 if (mem_map[MAP_NR(tmp)]) {
 208                         /*
 209                          * We don't have any reserved pages on the
 210                          * MIPS systems supported until now
 211                          */
 212                         if (0)
 213                                 reservedpages++;
 214                         else if (tmp < ((unsigned long) &etext - KERNELBASE))
 215                                 codepages++;
 216                         else
 217                                 datapages++;
 218                         continue;
 219                 }
 220                 mem_map[MAP_NR(tmp)] = 1;
 221                 free_page(tmp);
 222         }
 223         tmp = nr_free_pages << PAGE_SHIFT;
 224         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 225                 tmp >> 10,
 226                 high_memory >> 10,
 227                 codepages << (PAGE_SHIFT-10),
 228                 reservedpages << (PAGE_SHIFT-10),
 229                 datapages << (PAGE_SHIFT-10));
 230         pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
 231 
 232         invalidate();
 233         return;
 234 }
 235 
 236 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         int i;
 239 
 240         i = high_memory >> PAGE_SHIFT;
 241         val->totalram = 0;
 242         val->sharedram = 0;
 243         val->freeram = nr_free_pages << PAGE_SHIFT;
 244         val->bufferram = buffermem;
 245         while (i-- > 0)  {
 246                 if (mem_map[i] & MAP_PAGE_RESERVED)
 247                         continue;
 248                 val->totalram++;
 249                 if (!mem_map[i])
 250                         continue;
 251                 val->sharedram += mem_map[i]-1;
 252         }
 253         val->totalram <<= PAGE_SHIFT;
 254         val->sharedram <<= PAGE_SHIFT;
 255         return;
 256 }

/* [previous][next][first][last][top][bottom][index][help] */