root/arch/mips/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __zeropage
  3. zeropage
  4. __bad_page
  5. __zero_page
  6. __copy_page
  7. show_mem
  8. paging_init
  9. mem_init
  10. si_meminfo

   1 /*
   2  *  arch/mips/mm/init.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Ported to MIPS by Ralf Baechle
   6  */
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 
  19 #include <asm/cachectl.h>
  20 #include <asm/jazzdma.h>
  21 #include <asm/vector.h>
  22 #include <asm/system.h>
  23 #include <asm/segment.h>
  24 #include <asm/pgtable.h>
  25 
  26 extern void deskstation_tyne_dma_init(void);
  27 extern void scsi_mem_init(unsigned long);
  28 extern void sound_mem_init(void);
  29 extern void die_if_kernel(char *,struct pt_regs *,long);
  30 extern void show_net_buffers(void);
  31 
  32 extern char empty_zero_page[PAGE_SIZE];
  33 
  34 /*
  35  * BAD_PAGE is the page that is used for page faults when linux
  36  * is out-of-memory. Older versions of linux just did a
  37  * do_exit(), but using this instead means there is less risk
  38  * for a process dying in kernel mode, possibly leaving a inode
  39  * unused etc..
  40  *
  41  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  42  * to point to BAD_PAGE entries.
  43  *
  44  * ZERO_PAGE is a special page that is used for zero-initialized
  45  * data and COW.
  46  */
  47 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         extern char empty_bad_page_table[PAGE_SIZE];
  50         unsigned long page;
  51         unsigned long dummy1, dummy2;
  52 
  53         page = ((unsigned long)empty_bad_page_table) + (PT_OFFSET - PAGE_OFFSET);
  54 #if __mips__ >= 3
  55         /*
  56          * Use 64bit code even for Linux/MIPS 32bit on R4000
  57          */
  58         __asm__ __volatile__(
  59                 ".set\tnoreorder\n"
  60                 ".set\tnoat\n\t"
  61                 ".set\tmips3\n\t"
  62                 "dsll32\t$1,%2,0\n\t"
  63                 "dsrl32\t%2,$1,0\n\t"
  64                 "or\t%2,$1\n"
  65                 "1:\tsd\t%2,(%0)\n\t"
  66                 "subu\t%1,1\n\t"
  67                 "bnez\t%1,1b\n\t"
  68                 "addiu\t%0,8\n\t"
  69                 ".set\tmips0\n\t"
  70                 ".set\tat\n"
  71                 ".set\treorder"
  72                 :"=r" (dummy1),
  73                  "=r" (dummy2)
  74                 :"r" (pte_val(BAD_PAGE)),
  75                  "0" (page),
  76                  "1" (PAGE_SIZE/8));
  77 #else
  78         __asm__ __volatile__(
  79                 ".set\tnoreorder\n"
  80                 "1:\tsw\t%2,(%0)\n\t"
  81                 "subu\t%1,1\n\t"
  82                 "bnez\t%1,1b\n\t"
  83                 "addiu\t%0,4\n\t"
  84                 ".set\treorder"
  85                 :"=r" (dummy1),
  86                  "=r" (dummy2)
  87                 :"r" (pte_val(BAD_PAGE)),
  88                  "0" (page),
  89                  "1" (PAGE_SIZE/4));
  90 #endif
  91 
  92         return (pte_t *)page;
  93 }
  94 
  95 static inline void
  96 __zeropage(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98         unsigned long dummy1, dummy2;
  99 
 100 #ifdef __R4000__
 101         /*
 102          * Use 64bit code even for Linux/MIPS 32bit on R4000
 103          */
 104         __asm__ __volatile__(
 105                 ".set\tnoreorder\n"
 106                 ".set\tnoat\n\t"
 107                 ".set\tmips3\n"
 108                 "1:\tsd\t$0,(%0)\n\t"
 109                 "subu\t%1,1\n\t"
 110                 "bnez\t%1,1b\n\t"
 111                 "addiu\t%0,8\n\t"
 112                 ".set\tmips0\n\t"
 113                 ".set\tat\n"
 114                 ".set\treorder"
 115                 :"=r" (dummy1),
 116                  "=r" (dummy2)
 117                 :"0" (page),
 118                  "1" (PAGE_SIZE/8));
 119 #else
 120         __asm__ __volatile__(
 121                 ".set\tnoreorder\n"
 122                 "1:\tsw\t$0,(%0)\n\t"
 123                 "subu\t%1,1\n\t"
 124                 "bnez\t%1,1b\n\t"
 125                 "addiu\t%0,4\n\t"
 126                 ".set\treorder"
 127                 :"=r" (dummy1),
 128                  "=r" (dummy2)
 129                 :"0" (page),
 130                  "1" (PAGE_SIZE/4));
 131 #endif
 132 }
 133 
 134 static inline void
 135 zeropage(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 136 {
 137         sys_cacheflush((void *)page, PAGE_SIZE, BCACHE);
 138         sync_mem();
 139         __zeropage(page + (PT_OFFSET - PAGE_OFFSET));
 140 }
 141 
 142 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         extern char empty_bad_page[PAGE_SIZE];
 145         unsigned long page = (unsigned long)empty_bad_page;
 146 
 147         zeropage(page);
 148         return pte_mkdirty(mk_pte(page, PAGE_SHARED));
 149 }
 150 
 151 unsigned long __zero_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         unsigned long page = (unsigned long) empty_zero_page;
 154 
 155         zeropage(page);
 156         return page;
 157 }
 158 
 159 /*
 160  * This is horribly inefficient ...
 161  */
 162 void __copy_page(unsigned long from, unsigned long to)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164         /*
 165          * Now copy page from uncached KSEG1 to KSEG0.  The copy destination
 166          * is in KSEG0 so that we keep stupid L2 caches happy.
 167          */
 168         if(from == (unsigned long) empty_zero_page)
 169         {
 170                 /*
 171                  * The page copied most is the COW empty_zero_page.  Since we
 172                  * know it's contents we can avoid the writeback reading of
 173                  * the page.  Speeds up the standard case alot.
 174                  */
 175                 __zeropage(to);
 176         }
 177         else
 178         {
 179                 /*
 180                  * Force writeback of old page to memory.  We don't know the
 181                  * virtual address, so we have to flush the entire cache ...
 182                  */
 183                 sys_cacheflush(0, ~0, DCACHE);
 184                 sync_mem();
 185                 memcpy((void *) to,
 186                        (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE);
 187         }
 188         /*
 189          * Now writeback the page again if colour has changed.
 190          * Actually this does a Hit_Writeback, but due to an artifact in
 191          * the R4xx0 implementation this should be slightly faster.
 192          * Then sweep chipset controlled secondary caches and the ICACHE.
 193          */
 194         if (page_colour(from) != page_colour(to))
 195                 sys_cacheflush(0, ~0, DCACHE);
 196         sys_cacheflush(0, ~0, ICACHE);
 197 }
 198 
 199 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201         int i, free = 0, total = 0, reserved = 0;
 202         int shared = 0;
 203 
 204         printk("Mem-info:\n");
 205         show_free_areas();
 206         printk("Free swap:       %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
 207         i = (high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
 208         while (i-- > 0) {
 209                 total++;
 210                 if (mem_map[i].reserved)
 211                         reserved++;
 212                 else if (!mem_map[i].count)
 213                         free++;
 214                 else
 215                         shared += mem_map[i].count-1;
 216         }
 217         printk("%d pages of RAM\n", total);
 218         printk("%d free pages\n", free);
 219         printk("%d reserved pages\n", reserved);
 220         printk("%d pages shared\n", shared);
 221         show_buffers();
 222 #ifdef CONFIG_NET
 223         show_net_buffers();
 224 #endif
 225 }
 226 
 227 extern unsigned long free_area_init(unsigned long, unsigned long);
 228 
 229 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         pgd_init((unsigned long)swapper_pg_dir - (PT_OFFSET - PAGE_OFFSET));
 232         return free_area_init(start_mem, end_mem);
 233 }
 234 
 235 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237         int codepages = 0;
 238         int datapages = 0;
 239         unsigned long tmp;
 240         extern int _etext;
 241 
 242 #ifdef CONFIG_MIPS_JAZZ
 243         start_mem = vdma_init(start_mem, end_mem);
 244 #endif
 245 
 246         end_mem &= PAGE_MASK;
 247         high_memory = end_mem;
 248 
 249         /* mark usable pages in the mem_map[] */
 250         start_mem = PAGE_ALIGN(start_mem);
 251 
 252         tmp = start_mem;
 253         while (tmp < high_memory) {
 254                 mem_map[MAP_NR(tmp)].reserved = 0;
 255                 tmp += PAGE_SIZE;
 256         }
 257 
 258 #ifdef CONFIG_DESKSTATION_TYNE
 259         deskstation_tyne_dma_init();
 260 #endif
 261 #ifdef CONFIG_SCSI
 262         scsi_mem_init(high_memory);
 263 #endif
 264 #ifdef CONFIG_SOUND
 265         sound_mem_init();
 266 #endif
 267         for (tmp = PAGE_OFFSET ; tmp < high_memory ; tmp += PAGE_SIZE) {
 268                 if (mem_map[MAP_NR(tmp)].reserved) {
 269                         if (tmp < (unsigned long) &_etext)
 270                                 codepages++;
 271                         else if (tmp < start_mem)
 272                                 datapages++;
 273                         continue;
 274                 }
 275                 mem_map[MAP_NR(tmp)].count = 1;
 276                 free_page(tmp);
 277         }
 278         tmp = nr_free_pages << PAGE_SHIFT;
 279         printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
 280                 tmp >> 10,
 281                 (high_memory - PAGE_OFFSET) >> 10,
 282                 codepages << (PAGE_SHIFT-10),
 283                 datapages << (PAGE_SHIFT-10));
 284 
 285         return;
 286 }
 287 
 288 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         int i;
 291 
 292         i = high_memory >> PAGE_SHIFT;
 293         val->totalram = 0;
 294         val->sharedram = 0;
 295         val->freeram = nr_free_pages << PAGE_SHIFT;
 296         val->bufferram = buffermem;
 297         while (i-- > 0)  {
 298                 if (mem_map[i].reserved)
 299                         continue;
 300                 val->totalram++;
 301                 if (!mem_map[i].count)
 302                         continue;
 303                 val->sharedram += mem_map[i].count-1;
 304         }
 305         val->totalram <<= PAGE_SHIFT;
 306         val->sharedram <<= PAGE_SHIFT;
 307         return;
 308 }

/* [previous][next][first][last][top][bottom][index][help] */