root/arch/i386/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. show_mem
  4. paging_init
  5. mem_init
  6. si_meminfo

   1 /*
   2  *  linux/arch/i386/mm/init.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/errno.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/mman.h>
  17 #include <linux/mm.h>
  18 #include <linux/swap.h>
  19 #include <linux/smp.h>
  20 #ifdef CONFIG_BLK_DEV_INITRD
  21 #include <linux/blk.h>
  22 #endif
  23 
  24 #include <asm/system.h>
  25 #include <asm/segment.h>
  26 #include <asm/pgtable.h>
  27 #include <asm/dma.h>
  28 
  29 /*
  30  * The SMP kernel can't handle the 4MB page table optimizations yet
  31  */
  32 #ifdef __SMP__
  33 #undef USE_PENTIUM_MM
  34 #endif
  35 
  36 extern void die_if_kernel(char *,struct pt_regs *,long);
  37 extern void show_net_buffers(void);
  38 
  39 /*
  40  * BAD_PAGE is the page that is used for page faults when linux
  41  * is out-of-memory. Older versions of linux just did a
  42  * do_exit(), but using this instead means there is less risk
  43  * for a process dying in kernel mode, possibly leaving a inode
  44  * unused etc..
  45  *
  46  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  47  * to point to BAD_PAGE entries.
  48  *
  49  * ZERO_PAGE is a special page that is used for zero-initialized
  50  * data and COW.
  51  */
  52 pte_t * __bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         extern char empty_bad_page_table[PAGE_SIZE];
  55 
  56         __asm__ __volatile__("cld ; rep ; stosl":
  57                 :"a" (pte_val(BAD_PAGE)),
  58                  "D" ((long) empty_bad_page_table),
  59                  "c" (PAGE_SIZE/4)
  60                 :"di","cx");
  61         return (pte_t *) empty_bad_page_table;
  62 }
  63 
  64 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         extern char empty_bad_page[PAGE_SIZE];
  67 
  68         __asm__ __volatile__("cld ; rep ; stosl":
  69                 :"a" (0),
  70                  "D" ((long) empty_bad_page),
  71                  "c" (PAGE_SIZE/4)
  72                 :"di","cx");
  73         return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
  74 }
  75 
  76 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         int i,free = 0,total = 0,reserved = 0;
  79         int shared = 0;
  80 
  81         printk("Mem-info:\n");
  82         show_free_areas();
  83         printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  84         i = high_memory >> PAGE_SHIFT;
  85         while (i-- > 0) {
  86                 total++;
  87                 if (PageReserved(mem_map+i))
  88                         reserved++;
  89                 else if (!mem_map[i].count)
  90                         free++;
  91                 else
  92                         shared += mem_map[i].count-1;
  93         }
  94         printk("%d pages of RAM\n",total);
  95         printk("%d free pages\n",free);
  96         printk("%d reserved pages\n",reserved);
  97         printk("%d pages shared\n",shared);
  98         show_buffers();
  99 #ifdef CONFIG_NET
 100         show_net_buffers();
 101 #endif
 102 }
 103 
 104 extern unsigned long free_area_init(unsigned long, unsigned long);
 105 
 106 /*
 107  * paging_init() sets up the page tables - note that the first 4MB are
 108  * already mapped by head.S.
 109  *
 110  * This routines also unmaps the page at virtual kernel address 0, so
 111  * that we can trap those pesky NULL-reference errors in the kernel.
 112  */
 113 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115         pgd_t * pg_dir;
 116         pte_t * pg_table;
 117         unsigned long tmp;
 118         unsigned long address;
 119 
 120 /*
 121  * Physical page 0 is special; it's not touched by Linux since BIOS
 122  * and SMM (for laptops with [34]86/SL chips) may need it.  It is read
 123  * and write protected to detect null pointer references in the
 124  * kernel.
 125  * It may also hold the MP configuration table when we are booting SMP.
 126  */
 127 #if 0
 128         memset((void *) 0, 0, PAGE_SIZE);
 129 #endif
 130 #ifdef __SMP__
 131         if (!smp_scan_config(0x0,0x400))        /* Scan the bottom 1K for a signature */
 132         {
 133                 /*
 134                  *      FIXME: Linux assumes you have 640K of base ram.. this continues
 135                  *      the error...
 136                  */
 137                 if (!smp_scan_config(639*0x400,0x400))  /* Scan the top 1K of base RAM */
 138                         smp_scan_config(0xF0000,0x10000);       /* Scan the 64K of bios */
 139         }
 140         /*
 141          *      If it is an SMP machine we should know now, unless the configuration
 142          *      is in an EISA/MCA bus machine with an extended bios data area. I don't
 143          *      have such a machine so someone else can fill in the check of the EBDA
 144          *      here.
 145          */
 146 /*      smp_alloc_memory(8192); */
 147 #endif
 148 #ifdef TEST_VERIFY_AREA
 149         wp_works_ok = 0;
 150 #endif
 151         start_mem = PAGE_ALIGN(start_mem);
 152         address = 0;
 153         pg_dir = swapper_pg_dir;
 154         while (address < end_mem) {
 155 #ifdef USE_PENTIUM_MM
 156                 /*
 157                  * This will create page tables that
 158                  * span up to the next 4MB virtual
 159                  * memory boundary, but that's ok,
 160                  * we won't use that memory anyway.
 161                  */
 162                 if (x86_capability & 8) {
 163 #ifdef GAS_KNOWS_CR4
 164                         __asm__("movl %%cr4,%%eax\n\t"
 165                                 "orl $16,%%eax\n\t"
 166                                 "movl %%eax,%%cr4"
 167                                 : : :"ax");
 168 #else
 169                         __asm__(".byte 0x0f,0x20,0xe0\n\t"
 170                                 "orl $16,%%eax\n\t"
 171                                 ".byte 0x0f,0x22,0xe0"
 172                                 : : :"ax");
 173 #endif
 174                         wp_works_ok = 1;
 175                         pgd_val(pg_dir[0]) = _PAGE_TABLE | _PAGE_4M | address;
 176                         pgd_val(pg_dir[768]) = _PAGE_TABLE | _PAGE_4M | address;
 177                         pg_dir++;
 178                         address += 4*1024*1024;
 179                         continue;
 180                 }
 181 #endif
 182                 /* map the memory at virtual addr 0xC0000000 */
 183                 pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[768]));
 184                 if (!pg_table) {
 185                         pg_table = (pte_t *) start_mem;
 186                         start_mem += PAGE_SIZE;
 187                 }
 188 
 189                 /* also map it temporarily at 0x0000000 for init */
 190                 pgd_val(pg_dir[0])   = _PAGE_TABLE | (unsigned long) pg_table;
 191                 pgd_val(pg_dir[768]) = _PAGE_TABLE | (unsigned long) pg_table;
 192                 pg_dir++;
 193                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
 194                         if (address < end_mem)
 195                                 set_pte(pg_table, mk_pte(address, PAGE_SHARED));
 196                         else
 197                                 pte_clear(pg_table);
 198                         address += PAGE_SIZE;
 199                 }
 200         }
 201         flush_tlb();
 202         return free_area_init(start_mem, end_mem);
 203 }
 204 
 205 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 206 {
 207         unsigned long start_low_mem = PAGE_SIZE;
 208         int codepages = 0;
 209         int reservedpages = 0;
 210         int datapages = 0;
 211         unsigned long tmp;
 212         extern int _etext;
 213 
 214         end_mem &= PAGE_MASK;
 215         high_memory = end_mem;
 216 
 217         /* clear the zero-page */
 218         memset(empty_zero_page, 0, PAGE_SIZE);
 219 
 220         /* mark usable pages in the mem_map[] */
 221         start_low_mem = PAGE_ALIGN(start_low_mem);
 222 
 223 #ifdef __SMP__
 224         /*
 225          * But first pinch a few for the stack/trampoline stuff
 226          */
 227         start_low_mem += PAGE_SIZE;                             /* 32bit startup code */
 228         start_low_mem = smp_alloc_memory(start_low_mem);        /* AP processor stacks */
 229 #endif
 230         start_mem = PAGE_ALIGN(start_mem);
 231 
 232         /*
 233          * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
 234          * They seem to have done something stupid with the floppy
 235          * controller as well..
 236          */
 237         while (start_low_mem < 0x9f000) {
 238                 clear_bit(PG_reserved, &mem_map[MAP_NR(start_low_mem)].flags);
 239                 start_low_mem += PAGE_SIZE;
 240         }
 241 
 242         while (start_mem < high_memory) {
 243                 clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
 244                 start_mem += PAGE_SIZE;
 245         }
 246         for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
 247                 if (tmp >= MAX_DMA_ADDRESS)
 248                         clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
 249                 if (PageReserved(mem_map+MAP_NR(tmp))) {
 250                         if (tmp >= 0xA0000 && tmp < 0x100000)
 251                                 reservedpages++;
 252                         else if (tmp < (unsigned long) &_etext)
 253                                 codepages++;
 254                         else
 255                                 datapages++;
 256                         continue;
 257                 }
 258                 mem_map[MAP_NR(tmp)].count = 1;
 259 #ifdef CONFIG_BLK_DEV_INITRD
 260                 if (!initrd_start || (tmp < initrd_start || tmp >=
 261                     initrd_end))
 262 #endif
 263                         free_page(tmp);
 264         }
 265         tmp = nr_free_pages << PAGE_SHIFT;
 266         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 267                 tmp >> 10,
 268                 high_memory >> 10,
 269                 codepages << (PAGE_SHIFT-10),
 270                 reservedpages << (PAGE_SHIFT-10),
 271                 datapages << (PAGE_SHIFT-10));
 272 /* test if the WP bit is honoured in supervisor mode */
 273         if (wp_works_ok < 0) {
 274                 pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
 275                 flush_tlb();
 276                 __asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
 277                 pg0[0] = 0;
 278                 flush_tlb();
 279                 if (wp_works_ok < 0)
 280                         wp_works_ok = 0;
 281         }
 282         return;
 283 }
 284 
 285 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 286 {
 287         int i;
 288 
 289         i = high_memory >> PAGE_SHIFT;
 290         val->totalram = 0;
 291         val->sharedram = 0;
 292         val->freeram = nr_free_pages << PAGE_SHIFT;
 293         val->bufferram = buffermem;
 294         while (i-- > 0)  {
 295                 if (PageReserved(mem_map+i))
 296                         continue;
 297                 val->totalram++;
 298                 if (!mem_map[i].count)
 299                         continue;
 300                 val->sharedram += mem_map[i].count-1;
 301         }
 302         val->totalram <<= PAGE_SHIFT;
 303         val->sharedram <<= PAGE_SHIFT;
 304         return;
 305 }

/* [previous][next][first][last][top][bottom][index][help] */