root/arch/i386/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. show_mem
  4. paging_init
  5. mem_init
  6. si_meminfo

   1 /*
   2  *  linux/arch/i386/mm/init.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 #include <linux/swap.h>
  19 #include <linux/smp.h>
  20 #ifdef CONFIG_BLK_DEV_INITRD
  21 #include <linux/blk.h>
  22 #endif
  23 
  24 #include <asm/system.h>
  25 #include <asm/segment.h>
  26 #include <asm/pgtable.h>
  27 
  28 /*
  29  * The SMP kernel can't handle the 4MB page table optimizations yet
  30  */
  31 #ifdef __SMP__
  32 #undef USE_PENTIUM_MM
  33 #endif
  34 
  35 extern void die_if_kernel(char *,struct pt_regs *,long);
  36 extern void show_net_buffers(void);
  37 
  38 /*
  39  * BAD_PAGE is the page that is used for page faults when linux
  40  * is out-of-memory. Older versions of linux just did a
  41  * do_exit(), but using this instead means there is less risk
  42  * for a process dying in kernel mode, possibly leaving a inode
  43  * unused etc..
  44  *
  45  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  46  * to point to BAD_PAGE entries.
  47  *
  48  * ZERO_PAGE is a special page that is used for zero-initialized
  49  * data and COW.
  50  */
  51 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53         extern char empty_bad_page_table[PAGE_SIZE];
  54 
  55         __asm__ __volatile__("cld ; rep ; stosl":
  56                 :"a" (pte_val(BAD_PAGE)),
  57                  "D" ((long) empty_bad_page_table),
  58                  "c" (PAGE_SIZE/4)
  59                 :"di","cx");
  60         return (pte_t *) empty_bad_page_table;
  61 }
  62 
  63 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  64 {
  65         extern char empty_bad_page[PAGE_SIZE];
  66 
  67         __asm__ __volatile__("cld ; rep ; stosl":
  68                 :"a" (0),
  69                  "D" ((long) empty_bad_page),
  70                  "c" (PAGE_SIZE/4)
  71                 :"di","cx");
  72         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
  73 }
  74 
  75 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         int i,free = 0,total = 0,reserved = 0;
  78         int shared = 0;
  79 
  80         printk("Mem-info:\n");
  81         show_free_areas();
  82         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  83         i = high_memory >> PAGE_SHIFT;
  84         while (i-- > 0) {
  85                 total++;
  86                 if (mem_map[i].reserved)
  87                         reserved++;
  88                 else if (!mem_map[i].count)
  89                         free++;
  90                 else
  91                         shared += mem_map[i].count-1;
  92         }
  93         printk("%d pages of RAM\n",total);
  94         printk("%d free pages\n",free);
  95         printk("%d reserved pages\n",reserved);
  96         printk("%d pages shared\n",shared);
  97         show_buffers();
  98 #ifdef CONFIG_NET
  99         show_net_buffers();
 100 #endif
 101 }
 102 
 103 extern unsigned long free_area_init(unsigned long, unsigned long);
 104 
 105 /*
 106  * paging_init() sets up the page tables - note that the first 4MB are
 107  * already mapped by head.S.
 108  *
 109  * This routines also unmaps the page at virtual kernel address 0, so
 110  * that we can trap those pesky NULL-reference errors in the kernel.
 111  */
 112 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 113 {
 114         pgd_t * pg_dir;
 115         pte_t * pg_table;
 116         unsigned long tmp;
 117         unsigned long address;
 118 
 119 /*
 120  * Physical page 0 is special; it's not touched by Linux since BIOS
 121  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
 122  * and write protected to detect null pointer references in the
 123  * kernel.
 124  * It may also hold the MP configuration table when we are booting SMP.
 125  */
 126 #if 0
 127         memset((void *) 0, 0, PAGE_SIZE);
 128 #endif
 129 #ifdef __SMP__
 130         smp_scan_config(0x0,0x400);     /* Scan the bottom 1K for a signature */
 131         /*
 132          *      FIXME: Linux assumes you have 640K of base ram.. this continues
 133          *      the error...
 134          */
 135         smp_scan_config(639*0x400,0x400);       /* Scan the top 1K of base RAM */
 136         smp_scan_config(0xF0000,0x10000);       /* Scan the 64K of bios */
 137         /*
 138          *      If it is an SMP machine we should know now, unless the configuration
 139          *      is in an EISA/MCA bus machine with an extended bios data area. I don't
 140          *      have such a machine so someone else can fill in the check of the EBDA
 141          *      here.
 142          */
 143 /*      smp_alloc_memory(8192); */
 144 #endif
 145 #ifdef TEST_VERIFY_AREA
 146         wp_works_ok = 0;
 147 #endif
 148         start_mem = PAGE_ALIGN(start_mem);
 149         address = 0;
 150         pg_dir = swapper_pg_dir;
 151         while (address < end_mem) {
 152 #ifdef USE_PENTIUM_MM
 153                 if (address <= end_mem + 4*1024*1024 &&
 154                     (x86_capability & 8)) {
 155 #ifdef GAS_KNOWS_CR4
 156                         __asm__("movl %%cr4,%%eax\n\t"
 157                                 "orl $16,%%eax\n\t"
 158                                 "movl %%eax,%%cr4"
 159                                 : : :"ax");
 160 #else
 161                         __asm__(".byte 0x0f,0x20,0xe0\n\t"
 162                                 "orl $16,%%eax\n\t"
 163                                 ".byte 0x0f,0x22,0xe0"
 164                                 : : :"ax");
 165 #endif
 166                         wp_works_ok = 1;
 167                         pgd_val(pg_dir[0]) = _PAGE_TABLE | _PAGE_4M | address;
 168                         pgd_val(pg_dir[768]) = _PAGE_TABLE | _PAGE_4M | address;
 169                         pg_dir++;
 170                         address += 4*1024*1024;
 171                         continue;
 172                 }
 173 #endif
 174                 /* map the memory at virtual addr 0xC0000000 */
 175                 pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[768]));
 176                 if (!pg_table) {
 177                         pg_table = (pte_t *) start_mem;
 178                         start_mem += PAGE_SIZE;
 179                 }
 180 
 181                 /* also map it temporarily at 0x0000000 for init */
 182                 pgd_val(pg_dir[0])   = _PAGE_TABLE | (unsigned long) pg_table;
 183                 pgd_val(pg_dir[768]) = _PAGE_TABLE | (unsigned long) pg_table;
 184                 pg_dir++;
 185                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
 186                         if (address < end_mem)
 187                                 set_pte(pg_table, mk_pte(address, PAGE_SHARED));
 188                         else
 189                                 pte_clear(pg_table);
 190                         address += PAGE_SIZE;
 191                 }
 192         }
 193         invalidate();
 194         return free_area_init(start_mem, end_mem);
 195 }
 196 
 197 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         unsigned long start_low_mem = PAGE_SIZE;
 200         int codepages = 0;
 201         int reservedpages = 0;
 202         int datapages = 0;
 203         unsigned long tmp;
 204         extern int _etext;
 205 
 206         end_mem &= PAGE_MASK;
 207         high_memory = end_mem;
 208 
 209         /* clear the zero-page */
 210         memset(empty_zero_page, 0, PAGE_SIZE);
 211 
 212         /* mark usable pages in the mem_map[] */
 213         start_low_mem = PAGE_ALIGN(start_low_mem);
 214 
 215 #ifdef __SMP__
 216         /*
 217          * But first pinch a few for the stack/trampoline stuff
 218          */
 219         start_low_mem += PAGE_SIZE;                             /* 32bit startup code */
 220         start_low_mem = smp_alloc_memory(start_low_mem);        /* AP processor stacks */
 221 #endif
 222         start_mem = PAGE_ALIGN(start_mem);
 223 
 224         /*
 225          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
 226          * They seem to have done something stupid with the floppy
 227          * controller as well..
 228          */
 229         while (start_low_mem < 0x9f000) {
 230                 mem_map[MAP_NR(start_low_mem)].reserved = 0;
 231                 start_low_mem += PAGE_SIZE;
 232         }
 233 
 234         while (start_mem < high_memory) {
 235                 mem_map[MAP_NR(start_mem)].reserved = 0;
 236                 start_mem += PAGE_SIZE;
 237         }
 238         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
 239                 if (tmp >= 16*1024*1024)
 240                         mem_map[MAP_NR(tmp)].dma = 0;
 241                 if (mem_map[MAP_NR(tmp)].reserved) {
 242                         if (tmp >= 0xA0000 && tmp < 0x100000)
 243                                 reservedpages++;
 244                         else if (tmp < (unsigned long) &_etext)
 245                                 codepages++;
 246                         else
 247                                 datapages++;
 248                         continue;
 249                 }
 250                 mem_map[MAP_NR(tmp)].count = 1;
 251 #ifdef CONFIG_BLK_DEV_INITRD
 252                 if (!initrd_start || (tmp < initrd_start || tmp >=
 253                     initrd_end))
 254 #endif
 255                         free_page(tmp);
 256         }
 257         tmp = nr_free_pages << PAGE_SHIFT;
 258         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 259                 tmp >> 10,
 260                 high_memory >> 10,
 261                 codepages << (PAGE_SHIFT-10),
 262                 reservedpages << (PAGE_SHIFT-10),
 263                 datapages << (PAGE_SHIFT-10));
 264 /* test if the WP bit is honoured in supervisor mode */
 265         if (wp_works_ok < 0) {
 266                 pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
 267                 invalidate();
 268                 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
 269                 pg0[0] = 0;
 270                 invalidate();
 271                 if (wp_works_ok < 0)
 272                         wp_works_ok = 0;
 273         }
 274         return;
 275 }
 276 
 277 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         int i;
 280 
 281         i = high_memory >> PAGE_SHIFT;
 282         val->totalram = 0;
 283         val->sharedram = 0;
 284         val->freeram = nr_free_pages << PAGE_SHIFT;
 285         val->bufferram = buffermem;
 286         while (i-- > 0)  {
 287                 if (mem_map[i].reserved)
 288                         continue;
 289                 val->totalram++;
 290                 if (!mem_map[i].count)
 291                         continue;
 292                 val->sharedram += mem_map[i].count-1;
 293         }
 294         val->totalram <<= PAGE_SHIFT;
 295         val->sharedram <<= PAGE_SHIFT;
 296         return;
 297 }

/* [previous][next][first][last][top][bottom][index][help] */