root/arch/m68k/mm/init.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __bad_pagetable
  2. __bad_page
  3. show_mem
  4. kernel_page_table
  5. map_chunk
  6. paging_init
  7. mem_init
  8. si_meminfo

   1 /*
   2  *  linux/arch/m68k/mm/init.c
   3  *
   4  *  Copyright (C) 1995  Hamish Macdonald
   5  */
   6 
   7 #include <linux/config.h>
   8 #include <linux/signal.h>
   9 #include <linux/sched.h>
  10 #include <linux/mm.h>
  11 #include <linux/swap.h>
  12 #include <linux/kernel.h>
  13 #include <linux/string.h>
  14 #include <linux/types.h>
  15 #ifdef CONFIG_BLK_DEV_INITRD
  16 #include <linux/blk.h>
  17 #endif
  18 
  19 #include <asm/segment.h>
  20 #include <asm/page.h>
  21 #include <asm/pgtable.h>
  22 #include <asm/system.h>
  23 #include <asm/bootinfo.h>
  24 #include <asm/machdep.h>
  25 
  26 extern void die_if_kernel(char *,struct pt_regs *,long);
  27 extern void show_net_buffers(void);
  28 extern unsigned long mm_phys_to_virt (unsigned long addr);
  29 extern char *rd_start;
  30 extern int rd_doload;
  31 
  32 unsigned long ramdisk_length;
  33 
  34 /*
  35  * BAD_PAGE is the page that is used for page faults when linux
  36  * is out-of-memory. Older versions of linux just did a
  37  * do_exit(), but using this instead means there is less risk
  38  * for a process dying in kernel mode, possibly leaving a inode
  39  * unused etc..
  40  *
  41  * BAD_PAGETABLE is the accompanying page-table: it is initialized
  42  * to point to BAD_PAGE entries.
  43  *
  44  * ZERO_PAGE is a special page that is used for zero-initialized
  45  * data and COW.
  46  */
  47 static unsigned long empty_bad_page_table;
  48 
  49 pte_t *__bad_pagetable(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  50 {
  51     memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
  52     return (pte_t *)empty_bad_page_table;
  53 }
  54 
  55 static unsigned long empty_bad_page;
  56 
  57 pte_t __bad_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  58 {
  59     memset ((void *)empty_bad_page, 0, PAGE_SIZE);
  60     return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
  61 }
  62 
  63 unsigned long empty_zero_page;
  64 
  65 void show_mem(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  66 {
  67     unsigned long i;
  68     int free = 0, total = 0, reserved = 0, nonshared = 0, shared = 0;
  69 
  70     printk("\nMem-info:\n");
  71     show_free_areas();
  72     printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
  73     i = high_memory >> PAGE_SHIFT;
  74     while (i-- > 0) {
  75         total++;
  76         if (PageReserved(mem_map+i))
  77             reserved++;
  78         else if (!mem_map[i].count)
  79             free++;
  80         else if (mem_map[i].count == 1)
  81             nonshared++;
  82         else
  83             shared += mem_map[i].count-1;
  84     }
  85     printk("%d pages of RAM\n",total);
  86     printk("%d free pages\n",free);
  87     printk("%d reserved pages\n",reserved);
  88     printk("%d pages nonshared\n",nonshared);
  89     printk("%d pages shared\n",shared);
  90     show_buffers();
  91 #ifdef CONFIG_NET
  92     show_net_buffers();
  93 #endif
  94 }
  95 
  96 #if 0 /* The 68030 doesn't care about reserved bits. */
  97 /*
  98  * Bits to add to page descriptors for "normal" caching mode.
  99  * For 68020/030 this is 0.
 100  * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
 101  */
 102 unsigned long mm_cachebits;
 103 #endif
 104 
 105 pte_t *kernel_page_table (unsigned long *memavailp)
     /* [previous][next][first][last][top][bottom][index][help] */
 106 {
 107         pte_t *ptablep;
 108 
 109         ptablep = (pte_t *)*memavailp;
 110         *memavailp += PAGE_SIZE;
 111 
 112         nocache_page ((unsigned long)ptablep);
 113 
 114         return ptablep;
 115 }
 116 
 117 static unsigned long map_chunk (unsigned long addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 118                                 unsigned long size,
 119                                 unsigned long *memavailp)
 120 {
 121 #define ONEMEG  (1024*1024)
 122 #define L3TREESIZE (256*1024)
 123 
 124         int is040 = m68k_is040or060;
 125         static unsigned long mem_mapped = 0;
 126         static unsigned long virtaddr = 0;
 127         static pte_t *ktablep = NULL;
 128         unsigned long *kpointerp;
 129         unsigned long physaddr;
 130         extern pte_t *kpt;
 131         int pindex;   /* index into pointer table */
 132         pgd_t *page_dir = pgd_offset_k (virtaddr);
 133 
 134         if (!pgd_present (*page_dir)) {
 135                 /* we need a new pointer table */
 136                 kpointerp = (unsigned long *) get_kpointer_table ();
 137                 pgd_set (page_dir, (pmd_t *) kpointerp);
 138                 memset (kpointerp, 0, PTRS_PER_PMD * sizeof (pmd_t));
 139         }
 140         else
 141                 kpointerp = (unsigned long *) pgd_page (*page_dir);
 142 
 143         /*
 144          * pindex is the offset into the pointer table for the
 145          * descriptors for the current virtual address being mapped.
 146          */
 147         pindex = (virtaddr >> 18) & 0x7f;
 148 
 149 #ifdef DEBUG
 150         printk ("mm=%ld, kernel_pg_dir=%p, kpointerp=%p, pindex=%d\n",
 151                 mem_mapped, kernel_pg_dir, kpointerp, pindex);
 152 #endif
 153 
 154         /*
 155          * if this is running on an '040, we already allocated a page
 156          * table for the first 4M.  The address is stored in kpt by
 157          * arch/head.S
 158          *
 159          */
 160         if (is040 && mem_mapped == 0)
 161                 ktablep = kpt;
 162 
 163         for (physaddr = addr;
 164              physaddr < addr + size;
 165              mem_mapped += L3TREESIZE, virtaddr += L3TREESIZE) {
 166 
 167 #ifdef DEBUG
 168                 printk ("pa=%#lx va=%#lx ", physaddr, virtaddr);
 169 #endif
 170 
 171                 if (pindex > 127 && mem_mapped >= 32*ONEMEG) {
 172                         /* we need a new pointer table every 32M */
 173 #ifdef DEBUG
 174                         printk ("[new pointer]");
 175 #endif
 176 
 177                         kpointerp = (unsigned long *)get_kpointer_table ();
 178                         pgd_set(pgd_offset_k(virtaddr), (pmd_t *)kpointerp);
 179                         pindex = 0;
 180                 }
 181 
 182                 if (is040) {
 183                         int i;
 184                         unsigned long ktable;
 185 
 186                         /* Don't map the first 4 MB again. The pagetables
 187                          * for this range have already been initialized
 188                          * in boot/head.S. Otherwise the pages used for
 189                          * tables would be reinitialized to copyback mode.
 190                          */
 191 
 192                         if (mem_mapped < 4 * ONEMEG)
 193                         {
 194 #ifdef DEBUG
 195                                 printk ("Already initialized\n");
 196 #endif
 197                                 physaddr += L3TREESIZE;
 198                                 pindex++;
 199                                 continue;
 200                         }
 201 #ifdef DEBUG
 202                         printk ("[setup table]");
 203 #endif
 204 
 205                         /*
 206                          * 68040, use page tables pointed to by the
 207                          * kernel pointer table.
 208                          */
 209 
 210                         if ((pindex & 15) == 0) {
 211                                 /* Need new page table every 4M on the '040 */
 212 #ifdef DEBUG
 213                                 printk ("[new table]");
 214 #endif
 215                                 ktablep = kernel_page_table (memavailp);
 216                         }
 217 
 218                         ktable = VTOP(ktablep);
 219 
 220                         /*
 221                          * initialize section of the page table mapping
 222                          * this 256K portion.
 223                          */
 224                         for (i = 0; i < 64; i++) {
 225                                 pte_val(ktablep[i]) = physaddr | _PAGE_PRESENT
 226                                         | _PAGE_CACHE040 | _PAGE_GLOBAL040;
 227                                 physaddr += PAGE_SIZE;
 228                         }
 229                         ktablep += 64;
 230 
 231                         /*
 232                          * make the kernel pointer table point to the
 233                          * kernel page table.  Each entries point to a
 234                          * 64 entry section of the page table.
 235                          */
 236 
 237                         kpointerp[pindex++] = ktable | _PAGE_TABLE;
 238                 } else {
 239                         /*
 240                          * 68030, use early termination page descriptors.
 241                          * Each one points to 64 pages (256K).
 242                          */
 243 #ifdef DEBUG
 244                         printk ("[early term] ");
 245 #endif
 246                         if (virtaddr == 0UL) {
 247                                 /* map the first 256K using a 64 entry
 248                                  * 3rd level page table.
 249                                  * UNMAP the first entry to trap
 250                                  * zero page (NULL pointer) references
 251                                  */
 252                                 int i;
 253                                 unsigned long *tbl;
 254                                 
 255                                 tbl = (unsigned long *)get_kpointer_table();
 256 
 257                                 kpointerp[pindex++] = VTOP(tbl) | _PAGE_TABLE;
 258 
 259                                 for (i = 0; i < 64; i++, physaddr += PAGE_SIZE)
 260                                         tbl[i] = physaddr | _PAGE_PRESENT;
 261                                 
 262                                 /* unmap the zero page */
 263                                 tbl[0] = 0;
 264                         } else {
 265                                 /* not the first 256K */
 266                                 kpointerp[pindex++] = physaddr | _PAGE_PRESENT;
 267 #ifdef DEBUG
 268                                 printk ("%lx=%lx ", VTOP(&kpointerp[pindex-1]),
 269                                         kpointerp[pindex-1]);
 270 #endif
 271                                 physaddr += 64 * PAGE_SIZE;
 272                         }
 273                 }
 274 #ifdef DEBUG
 275                 printk ("\n");
 276 #endif
 277         }
 278 
 279         return mem_mapped;
 280 }
 281 
 282 extern unsigned long free_area_init(unsigned long, unsigned long);
 283 
 284 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 285 
 286 /*
 287  * paging_init() continues the virtual memory environment setup which
 288  * was begun by the code in arch/head.S.
 289  * The parameters are pointers to where to stick the starting and ending
 290  * addresses  of available kernel virtual memory.
 291  */
 292 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 293 {
 294         int chunk;
 295         unsigned long mem_avail = 0;
 296         /* pointer to page table for kernel stacks */
 297         extern unsigned long availmem;
 298 
 299 #ifdef DEBUG
 300         {
 301                 extern pte_t *kpt;
 302                 printk ("start of paging_init (%p, %p, %lx, %lx, %lx)\n",
 303                         kernel_pg_dir, kpt, availmem, start_mem, end_mem);
 304         }
 305 #endif
 306 
 307 #if 0
 308         /*
 309          * Setup cache bits
 310          */
 311         mm_cachebits = m68k_is040or060 ? _PAGE_CACHE040 : 0;
 312 
 313         /* Initialize protection map.  */
 314         protection_map[0] = PAGE_READONLY;
 315         protection_map[1] = PAGE_READONLY;
 316         protection_map[2] = PAGE_COPY;
 317         protection_map[3] = PAGE_COPY;
 318         protection_map[4] = PAGE_READONLY;
 319         protection_map[5] = PAGE_READONLY;
 320         protection_map[6] = PAGE_COPY;
 321         protection_map[7] = PAGE_COPY;
 322         protection_map[8] = PAGE_READONLY;
 323         protection_map[9] = PAGE_READONLY;
 324         protection_map[10] = PAGE_SHARED;
 325         protection_map[11] = PAGE_SHARED;
 326         protection_map[12] = PAGE_READONLY;
 327         protection_map[13] = PAGE_READONLY;
 328         protection_map[14] = PAGE_SHARED;
 329         protection_map[15] = PAGE_SHARED;
 330 #endif
 331 
 332         /*
 333          * Map the physical memory available into the kernel virtual
 334          * address space.  It may allocate some memory for page
 335          * tables and thus modify availmem.
 336          */
 337 
 338         for (chunk = 0; chunk < boot_info.num_memory; chunk++) {
 339                 mem_avail = map_chunk (boot_info.memory[chunk].addr,
 340                                        boot_info.memory[chunk].size,
 341                                        &availmem);
 342 
 343         }
 344         flush_tlb_all();
 345 #ifdef DEBUG
 346         printk ("memory available is %ldKB\n", mem_avail >> 10);
 347 #endif
 348 
 349         /*
 350          * virtual address after end of kernel
 351          * "availmem" is setup by the code in head.S.
 352          */
 353         start_mem = availmem;
 354 
 355 #ifdef DEBUG
 356         printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
 357                 start_mem, end_mem);
 358 #endif
 359 
 360         /*
 361          * initialize the bad page table and bad page to point
 362          * to a couple of allocated pages
 363          */
 364         empty_bad_page_table = start_mem;
 365         start_mem += PAGE_SIZE;
 366         empty_bad_page = start_mem;
 367         start_mem += PAGE_SIZE;
 368         empty_zero_page = start_mem;
 369         start_mem += PAGE_SIZE;
 370         memset((void *)empty_zero_page, 0, PAGE_SIZE);
 371 
 372 #if 0
 373         /* 
 374          * allocate the "swapper" page directory and
 375          * record in task 0 (swapper) tss 
 376          */
 377         swapper_pg_dir = (pgd_t *)get_kpointer_table();
 378 
 379         init_mm.pgd = swapper_pg_dir;
 380 #endif
 381 
 382         memset (swapper_pg_dir, 0, sizeof(pgd_t)*PTRS_PER_PGD);
 383         task[0]->tss.pagedir_v = (unsigned long *)swapper_pg_dir;
 384         task[0]->tss.pagedir_p = VTOP (swapper_pg_dir);
 385 
 386 #ifdef DEBUG
 387         printk ("task 0 pagedir at %p virt, %#lx phys\n",
 388                 task[0]->tss.pagedir_v, task[0]->tss.pagedir_p);
 389 #endif
 390 
 391         /* setup CPU root pointer for swapper task */
 392         task[0]->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
 393         task[0]->tss.crp[1] = task[0]->tss.pagedir_p;
 394 
 395         if (m68k_is040or060)
 396                 asm ("movel %0,%/d0\n\t"
 397                      ".long 0x4e7b0806" /* movec d0,urp */
 398                      : /* no outputs */
 399                      : "g" (task[0]->tss.crp[1])
 400                      : "d0");
 401         else
 402                 asm ("pmove %0@,%/crp"
 403                      : /* no outputs */
 404                      : "a" (task[0]->tss.crp));
 405 
 406 #ifdef DEBUG
 407         printk ("set crp\n");
 408 #endif
 409 
 410         /*
 411          * Set up SFC/DFC registers (user data space)
 412          */
 413         set_fs (USER_DS);
 414 
 415 #ifdef DEBUG
 416         printk ("before free_area_init\n");
 417 #endif
 418 
 419 #ifndef CONFIG_BLK_DEV_INITRD
 420         /*
 421          * Since the initialization of the ramdisk's has been changed
 422          * so it fits the new driver initialization scheme, we have to
 423          * make room for our preloaded image here, instead of doing it
 424          * in rd_init() as we cannot kmalloc() a block large enough
 425          * for the image.
 426          */
 427 
 428         ramdisk_length = boot_info.ramdisk_size * 1024;
 429 
 430         if ((ramdisk_length > 0) && (ROOT_DEV == 0)) {
 431           char *rdp;         /* current location of ramdisk */
 432 
 433           rd_start = (char *) start_mem;
 434 
 435           /* get current address of ramdisk */
 436           rdp = (char *)mm_phys_to_virt (boot_info.ramdisk_addr);
 437 
 438           /* copy the ram disk image */
 439           memcpy (rd_start, rdp, ramdisk_length);
 440           start_mem += ramdisk_length;
 441           rd_doload = 1;     /* tell rd_load to load this thing */
 442         }
 443 #endif
 444 
 445         return free_area_init (start_mem, end_mem);
 446 }
 447 
 448 void mem_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         int codepages = 0;
 451         int reservedpages = 0;
 452         int datapages = 0;
 453         unsigned long tmp;
 454         extern int _etext;
 455 
 456         end_mem &= PAGE_MASK;
 457         high_memory = end_mem;
 458 
 459         start_mem = PAGE_ALIGN(start_mem);
 460         while (start_mem < high_memory) {
 461                 clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
 462                 start_mem += PAGE_SIZE;
 463         }
 464 
 465 #ifdef CONFIG_ATARI
 466 
 467         if (MACH_IS_ATARI) {
 468 
 469                 /* If the page with physical address 0 isn't the first kernel
 470                  * code page, it has to be reserved because the first 2 KB of
 471                  * ST-Ram can only be accessed from supervisor mode by
 472                  * hardware.
 473                  */
 474 
 475                 unsigned long virt0 = PTOV( 0 ), adr;
 476                 extern unsigned long rsvd_stram_beg, rsvd_stram_end;
 477                 
 478                 if (virt0 != 0) {
 479 
 480                         set_bit(PG_reserved, &mem_map[MAP_NR(virt0)].flags);
 481 
 482                         /* Also, reserve all pages that have been marked by
 483                          * stram_alloc() (e.g. for the screen memory). (This may
 484                          * treat the first ST-Ram page a second time, but that
 485                          * doesn't hurt...) */
 486                         
 487                         rsvd_stram_end += PAGE_SIZE - 1;
 488                         rsvd_stram_end &= PAGE_MASK;
 489                         rsvd_stram_beg &= PAGE_MASK;
 490                         for( adr = rsvd_stram_beg; adr < rsvd_stram_end; adr += PAGE_SIZE )
 491                                 set_bit(PG_reserved, &mem_map[MAP_NR(adr)].flags);
 492                 }
 493         }
 494         
 495 #endif
 496 #ifdef DEBUG
 497         printk ("task[0] root table is %p\n", task[0]->tss.pagedir_v);
 498 #endif
 499 
 500         for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
 501                 if (VTOP (tmp) >= mach_max_dma_address)
 502                         clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
 503                 if (PageReserved(mem_map+MAP_NR(tmp))) {
 504                         if (tmp < (unsigned long)&_etext)
 505                                 codepages++;
 506                         else
 507                                 datapages++;
 508                         continue;
 509                 }
 510                 mem_map[MAP_NR(tmp)].count = 1;
 511 #ifdef CONFIG_BLK_DEV_INITRD
 512                 if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
 513 #endif
 514                         free_page(tmp);
 515         }
 516         tmp = nr_free_pages << PAGE_SHIFT;
 517         printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
 518                tmp >> 10,
 519                high_memory >> 10,
 520                codepages << (PAGE_SHIFT-10),
 521                reservedpages << (PAGE_SHIFT-10),
 522                datapages << (PAGE_SHIFT-10));
 523 }
 524 
 525 void si_meminfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527     unsigned long i;
 528 
 529     i = high_memory >> PAGE_SHIFT;
 530     val->totalram = 0;
 531     val->sharedram = 0;
 532     val->freeram = nr_free_pages << PAGE_SHIFT;
 533     val->bufferram = buffermem;
 534     while (i-- > 0) {
 535         if (PageReserved(mem_map+i))
 536             continue;
 537         val->totalram++;
 538         if (!mem_map[i].count)
 539             continue;
 540         val->sharedram += mem_map[i].count-1;
 541     }
 542     val->totalram <<= PAGE_SHIFT;
 543     val->sharedram <<= PAGE_SHIFT;
 544     return;
 545 }

/* [previous][next][first][last][top][bottom][index][help] */