root/arch/i386/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. show_mem
  4. paging_init
  5. mem_init
  6. si_meminfo

   1 /*
   2  *  linux/arch/i386/mm/init.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 #include <linux/swap.h>
  19 #include <linux/smp.h>
  20 #ifdef CONFIG_BLK_DEV_INITRD
  21 #include <linux/blk.h>
  22 #endif
  23 
  24 #include <asm/system.h>
  25 #include <asm/segment.h>
  26 #include <asm/pgtable.h>
  27 #include <asm/dma.h>
  28 
  29 /*
  30  * The SMP kernel can't handle the 4MB page table optimizations yet
  31  */
  32 #ifdef __SMP__
  33 #undef USE_PENTIUM_MM
  34 #endif
  35 
  36 extern void die_if_kernel(char *,struct pt_regs *,long);
  37 extern void show_net_buffers(void);
  38 
  39 /*
  40  * BAD_PAGE is the page that is used for page faults when linux
  41  * is out-of-memory. Older versions of linux just did a
  42  * do_exit(), but using this instead means there is less risk
  43  * for a process dying in kernel mode, possibly leaving a inode
  44  * unused etc..
  45  *
  46  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  47  * to point to BAD_PAGE entries.
  48  *
  49  * ZERO_PAGE is a special page that is used for zero-initialized
  50  * data and COW.
  51  */
  52 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         extern char empty_bad_page_table[PAGE_SIZE];
  55 
  56         __asm__ __volatile__("cld ; rep ; stosl":
  57                 :"a" (pte_val(BAD_PAGE)),
  58                  "D" ((long) empty_bad_page_table),
  59                  "c" (PAGE_SIZE/4)
  60                 :"di","cx");
  61         return (pte_t *) empty_bad_page_table;
  62 }
  63 
  64 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         extern char empty_bad_page[PAGE_SIZE];
  67 
  68         __asm__ __volatile__("cld ; rep ; stosl":
  69                 :"a" (0),
  70                  "D" ((long) empty_bad_page),
  71                  "c" (PAGE_SIZE/4)
  72                 :"di","cx");
  73         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
  74 }
  75 
  76 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         int i,free = 0,total = 0,reserved = 0;
  79         int shared = 0;
  80 
  81         printk("Mem-info:\n");
  82         show_free_areas();
  83         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  84         i = high_memory >> PAGE_SHIFT;
  85         while (i-- > 0) {
  86                 total++;
  87                 if (mem_map[i].reserved)
  88                         reserved++;
  89                 else if (!mem_map[i].count)
  90                         free++;
  91                 else
  92                         shared += mem_map[i].count-1;
  93         }
  94         printk("%d pages of RAM\n",total);
  95         printk("%d free pages\n",free);
  96         printk("%d reserved pages\n",reserved);
  97         printk("%d pages shared\n",shared);
  98         show_buffers();
  99 #ifdef CONFIG_NET
 100         show_net_buffers();
 101 #endif
 102 }
 103 
 104 extern unsigned long free_area_init(unsigned long, unsigned long);
 105 
 106 /*
 107  * paging_init() sets up the page tables - note that the first 4MB are
 108  * already mapped by head.S.
 109  *
 110  * This routines also unmaps the page at virtual kernel address 0, so
 111  * that we can trap those pesky NULL-reference errors in the kernel.
 112  */
 113 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115         pgd_t * pg_dir;
 116         pte_t * pg_table;
 117         unsigned long tmp;
 118         unsigned long address;
 119 
 120 /*
 121  * Physical page 0 is special; it's not touched by Linux since BIOS
 122  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
 123  * and write protected to detect null pointer references in the
 124  * kernel.
 125  * It may also hold the MP configuration table when we are booting SMP.
 126  */
 127 #if 0
 128         memset((void *) 0, 0, PAGE_SIZE);
 129 #endif
 130 #ifdef __SMP__
 131         smp_scan_config(0x0,0x400);     /* Scan the bottom 1K for a signature */
 132         /*
 133          *      FIXME: Linux assumes you have 640K of base ram.. this continues
 134          *      the error...
 135          */
 136         smp_scan_config(639*0x400,0x400);       /* Scan the top 1K of base RAM */
 137         smp_scan_config(0xF0000,0x10000);       /* Scan the 64K of bios */
 138         /*
 139          *      If it is an SMP machine we should know now, unless the configuration
 140          *      is in an EISA/MCA bus machine with an extended bios data area. I don't
 141          *      have such a machine so someone else can fill in the check of the EBDA
 142          *      here.
 143          */
 144 /*      smp_alloc_memory(8192); */
 145 #endif
 146 #ifdef TEST_VERIFY_AREA
 147         wp_works_ok = 0;
 148 #endif
 149         start_mem = PAGE_ALIGN(start_mem);
 150         address = 0;
 151         pg_dir = swapper_pg_dir;
 152         while (address < end_mem) {
 153 #ifdef USE_PENTIUM_MM
 154                 if (address <= end_mem + 4*1024*1024 &&
 155                     (x86_capability & 8)) {
 156 #ifdef GAS_KNOWS_CR4
 157                         __asm__("movl %%cr4,%%eax\n\t"
 158                                 "orl $16,%%eax\n\t"
 159                                 "movl %%eax,%%cr4"
 160                                 : : :"ax");
 161 #else
 162                         __asm__(".byte 0x0f,0x20,0xe0\n\t"
 163                                 "orl $16,%%eax\n\t"
 164                                 ".byte 0x0f,0x22,0xe0"
 165                                 : : :"ax");
 166 #endif
 167                         wp_works_ok = 1;
 168                         pgd_val(pg_dir[0]) = _PAGE_TABLE | _PAGE_4M | address;
 169                         pgd_val(pg_dir[768]) = _PAGE_TABLE | _PAGE_4M | address;
 170                         pg_dir++;
 171                         address += 4*1024*1024;
 172                         continue;
 173                 }
 174 #endif
 175                 /* map the memory at virtual addr 0xC0000000 */
 176                 pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[768]));
 177                 if (!pg_table) {
 178                         pg_table = (pte_t *) start_mem;
 179                         start_mem += PAGE_SIZE;
 180                 }
 181 
 182                 /* also map it temporarily at 0x0000000 for init */
 183                 pgd_val(pg_dir[0])   = _PAGE_TABLE | (unsigned long) pg_table;
 184                 pgd_val(pg_dir[768]) = _PAGE_TABLE | (unsigned long) pg_table;
 185                 pg_dir++;
 186                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
 187                         if (address < end_mem)
 188                                 set_pte(pg_table, mk_pte(address, PAGE_SHARED));
 189                         else
 190                                 pte_clear(pg_table);
 191                         address += PAGE_SIZE;
 192                 }
 193         }
 194         invalidate();
 195         return free_area_init(start_mem, end_mem);
 196 }
 197 
 198 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         unsigned long start_low_mem = PAGE_SIZE;
 201         int codepages = 0;
 202         int reservedpages = 0;
 203         int datapages = 0;
 204         unsigned long tmp;
 205         extern int _etext;
 206 
 207         end_mem &= PAGE_MASK;
 208         high_memory = end_mem;
 209 
 210         /* clear the zero-page */
 211         memset(empty_zero_page, 0, PAGE_SIZE);
 212 
 213         /* mark usable pages in the mem_map[] */
 214         start_low_mem = PAGE_ALIGN(start_low_mem);
 215 
 216 #ifdef __SMP__
 217         /*
 218          * But first pinch a few for the stack/trampoline stuff
 219          */
 220         start_low_mem += PAGE_SIZE;                             /* 32bit startup code */
 221         start_low_mem = smp_alloc_memory(start_low_mem);        /* AP processor stacks */
 222 #endif
 223         start_mem = PAGE_ALIGN(start_mem);
 224 
 225         /*
 226          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
 227          * They seem to have done something stupid with the floppy
 228          * controller as well..
 229          */
 230         while (start_low_mem < 0x9f000) {
 231                 mem_map[MAP_NR(start_low_mem)].reserved = 0;
 232                 start_low_mem += PAGE_SIZE;
 233         }
 234 
 235         while (start_mem < high_memory) {
 236                 mem_map[MAP_NR(start_mem)].reserved = 0;
 237                 start_mem += PAGE_SIZE;
 238         }
 239         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
 240                 if (tmp >= MAX_DMA_ADDRESS)
 241                         mem_map[MAP_NR(tmp)].dma = 0;
 242                 if (mem_map[MAP_NR(tmp)].reserved) {
 243                         if (tmp >= 0xA0000 && tmp < 0x100000)
 244                                 reservedpages++;
 245                         else if (tmp < (unsigned long) &_etext)
 246                                 codepages++;
 247                         else
 248                                 datapages++;
 249                         continue;
 250                 }
 251                 mem_map[MAP_NR(tmp)].count = 1;
 252 #ifdef CONFIG_BLK_DEV_INITRD
 253                 if (!initrd_start || (tmp < initrd_start || tmp >=
 254                     initrd_end))
 255 #endif
 256                         free_page(tmp);
 257         }
 258         tmp = nr_free_pages << PAGE_SHIFT;
 259         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 260                 tmp >> 10,
 261                 high_memory >> 10,
 262                 codepages << (PAGE_SHIFT-10),
 263                 reservedpages << (PAGE_SHIFT-10),
 264                 datapages << (PAGE_SHIFT-10));
 265 /* test if the WP bit is honoured in supervisor mode */
 266         if (wp_works_ok < 0) {
 267                 pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
 268                 invalidate();
 269                 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
 270                 pg0[0] = 0;
 271                 invalidate();
 272                 if (wp_works_ok < 0)
 273                         wp_works_ok = 0;
 274         }
 275         return;
 276 }
 277 
 278 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         int i;
 281 
 282         i = high_memory >> PAGE_SHIFT;
 283         val->totalram = 0;
 284         val->sharedram = 0;
 285         val->freeram = nr_free_pages << PAGE_SHIFT;
 286         val->bufferram = buffermem;
 287         while (i-- > 0)  {
 288                 if (mem_map[i].reserved)
 289                         continue;
 290                 val->totalram++;
 291                 if (!mem_map[i].count)
 292                         continue;
 293                 val->sharedram += mem_map[i].count-1;
 294         }
 295         val->totalram <<= PAGE_SHIFT;
 296         val->sharedram <<= PAGE_SHIFT;
 297         return;
 298 }

/* [previous][next][first][last][top][bottom][index][help] */