root/arch/m68k/mm/memory.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_pointer_table
  2. free_pointer_table
  3. get_kpointer_table
  4. free_kpointer_table
  5. mm_vtop
  6. mm_ptov
  7. cache_clear
  8. cache_push
  9. cache_push_v
  10. flush_cache_all
  11. flush_page_to_ram
  12. mm_phys_to_virt
  13. mm_end_of_chunk
  14. kernel_map
  15. set_cmode_pte
  16. set_cmode_pmd
  17. kernel_set_cachemode

   1 /*
   2  *  linux/arch/m68k/mm/memory.c
   3  *
   4  *  Copyright (C) 1995  Hamish Macdonald
   5  */
   6 
   7 #include <linux/mm.h>
   8 #include <linux/kernel.h>
   9 #include <linux/string.h>
  10 #include <linux/types.h>
  11 #include <linux/malloc.h>
  12 
  13 #include <asm/segment.h>
  14 #include <asm/page.h>
  15 #include <asm/pgtable.h>
  16 #include <asm/system.h>
  17 #include <asm/traps.h>
  18 #include <asm/amigahw.h>
  19 #include <asm/bootinfo.h>
  20 
  21 extern pte_t *kernel_page_table (unsigned long *memavailp);
  22 
  23 static struct ptable_desc {
  24         struct ptable_desc *prev;
  25         struct ptable_desc *next;
  26         unsigned long      page;
  27         unsigned char      alloced;
  28 } ptable_list = { &ptable_list, &ptable_list, 0, 0xff };
  29 
  30 #define PD_NONEFREE(dp) ((dp)->alloced == 0xff)
  31 #define PD_ALLFREE(dp) ((dp)->alloced == 0)
  32 #define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i))))
  33 #define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i)))
  34 #define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i)))
  35 
  36 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
  37 
  38 pmd_t *get_pointer_table (void)
     /* [previous][next][first][last][top][bottom][index][help] */
  39 {
  40         pmd_t *pmdp = NULL;
  41         unsigned long flags;
  42         struct ptable_desc *dp = ptable_list.next;
  43         int i;
  44 
  45         /*
  46          * For a pointer table for a user process address space, a
  47          * table is taken from a page allocated for the purpose.  Each
  48          * page can hold 8 pointer tables.  The page is remapped in
  49          * virtual address space to be noncacheable.
  50          */
  51         if (PD_NONEFREE (dp)) {
  52 
  53                 if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) {
  54                         return 0;
  55                 }
  56 
  57                 if (!(dp->page = __get_free_page (GFP_KERNEL))) {
  58                         kfree (dp);
  59                         return 0;
  60                 }
  61 
  62                 nocache_page (dp->page);
  63 
  64                 dp->alloced = 0;
  65                 /* put at head of list */
  66                 save_flags(flags);
  67                 cli();
  68                 dp->next = ptable_list.next;
  69                 dp->prev = ptable_list.next->prev;
  70                 ptable_list.next->prev = dp;
  71                 ptable_list.next = dp;
  72                 restore_flags(flags);
  73         }
  74 
  75         for (i = 0; i < 8; i++)
  76                 if (PD_TABLEFREE (dp, i)) {
  77                         PD_MARKUSED (dp, i);
  78                         pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i);
  79                         break;
  80                 }
  81 
  82         if (PD_NONEFREE (dp)) {
  83                 /* move to end of list */
  84                 save_flags(flags);
  85                 cli();
  86                 dp->prev->next = dp->next;
  87                 dp->next->prev = dp->prev;
  88 
  89                 dp->next = ptable_list.next->prev;
  90                 dp->prev = ptable_list.prev;
  91                 ptable_list.prev->next = dp;
  92                 ptable_list.prev = dp;
  93                 restore_flags(flags);
  94         }
  95 
  96         memset (pmdp, 0, PTABLE_SIZE);
  97 
  98         return pmdp;
  99 }
 100 
 101 void free_pointer_table (pmd_t *ptable)
     /* [previous][next][first][last][top][bottom][index][help] */
 102 {
 103         struct ptable_desc *dp;
 104         unsigned long page = (unsigned long)ptable & PAGE_MASK;
 105         int index = ((unsigned long)ptable - page)/PTABLE_SIZE;
 106         unsigned long flags;
 107 
 108         for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next)
 109                 ;
 110 
 111         if (!dp->page)
 112                 panic ("unable to find desc for ptable %p on list!", ptable);
 113 
 114         if (PD_TABLEFREE (dp, index))
 115                 panic ("table already free!");
 116 
 117         PD_MARKFREE (dp, index);
 118 
 119         if (PD_ALLFREE (dp)) {
 120                 /* all tables in page are free, free page */
 121                 save_flags(flags);
 122                 cli();
 123                 dp->prev->next = dp->next;
 124                 dp->next->prev = dp->prev;
 125                 restore_flags(flags);
 126                 cache_page (dp->page);
 127                 free_page (dp->page);
 128                 kfree (dp);
 129                 return;
 130         } else {
 131                 /*
 132                  * move this descriptor the the front of the list, since
 133                  * it has one or more free tables.
 134                  */
 135                 save_flags(flags);
 136                 cli();
 137                 dp->prev->next = dp->next;
 138                 dp->next->prev = dp->prev;
 139 
 140                 dp->next = ptable_list.next;
 141                 dp->prev = ptable_list.next->prev;
 142                 ptable_list.next->prev = dp;
 143                 ptable_list.next = dp;
 144                 restore_flags(flags);
 145         }
 146 }
 147 
 148 static unsigned char alloced = 0;
 149 extern pmd_t (*kernel_pmd_table)[PTRS_PER_PMD]; /* initialized in head.S */
 150 
 151 pmd_t *get_kpointer_table (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         /* For pointer tables for the kernel virtual address space,
 154          * use a page that is allocated in head.S that can hold up to
 155          * 8 pointer tables.  This allows mapping of 8 * 32M = 256M of
 156          * physical memory.  This should be sufficient for now.
 157          */
 158         pmd_t *ptable;
 159         int i;
 160 
 161         for (i = 0; i < PAGE_SIZE/(PTRS_PER_PMD*sizeof(pmd_t)); i++)
 162                 if ((alloced & (1 << i)) == 0) {
 163                         ptable = kernel_pmd_table[i];
 164                         memset (ptable, 0, PTRS_PER_PMD*sizeof(pmd_t));
 165                         alloced |= (1 << i);
 166                         return ptable;
 167                 }
 168         printk ("no space for kernel pointer table\n");
 169         return NULL;
 170 }
 171 
 172 void free_kpointer_table (pmd_t *pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 173 {
 174         int index = (pmd_t (*)[PTRS_PER_PMD])pmdp - kernel_pmd_table;
 175 
 176         if (index < 0 || index > 7 ||
 177             /* This works because kernel_pmd_table is page aligned. */
 178             ((unsigned long)pmdp & (sizeof(pmd_t) * PTRS_PER_PMD - 1)))
 179                 panic("attempt to free invalid kernel pointer table");
 180         else
 181                 alloced &= ~(1 << index);
 182 }
 183 
 184 /*
 185  * The following two routines map from a physical address to a kernel
 186  * virtual address and vice versa.
 187  */
 188 unsigned long mm_vtop (unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 189 {
 190         int i;
 191         unsigned long voff = vaddr;
 192         unsigned long offset = 0;
 193 
 194         for (i = 0; i < boot_info.num_memory; i++)
 195         {
 196                 if (voff < offset + boot_info.memory[i].size) {
 197 #ifdef DEBUGPV
 198                         printk ("VTOP(%lx)=%lx\n", vaddr,
 199                                 boot_info.memory[i].addr + voff - offset);
 200 #endif
 201                         return boot_info.memory[i].addr + voff - offset;
 202                 } else
 203                         offset += boot_info.memory[i].size;
 204         }
 205 
 206         /* not in one of the memory chunks; get the actual
 207          * physical address from the MMU.
 208          */
 209         if (m68k_is040or060 == 6) {
 210           unsigned long fs = get_fs();
 211           unsigned long  paddr;
 212 
 213           set_fs (SUPER_DATA);
 214 
 215           /* The PLPAR instruction causes an access error if the translation
 216            * is not possible. We don't catch that here, so a bad kernel trap
 217            * will be reported in this case. */
 218           asm volatile ("movel %1,%/a0\n\t"
 219                         ".word 0xf5c8\n\t"      /* plpar (a0) */
 220                         "movel %/a0,%0"
 221                         : "=g" (paddr)
 222                         : "g" (vaddr)
 223                         : "a0" );
 224           set_fs (fs);
 225 
 226           return paddr;
 227 
 228         } else if (m68k_is040or060 == 4) {
 229           unsigned long mmusr;
 230           unsigned long fs = get_fs();
 231 
 232           set_fs (SUPER_DATA);
 233 
 234           asm volatile ("movel %1,%/a0\n\t"
 235                         ".word 0xf568\n\t"      /* ptestr (a0) */
 236                         ".long 0x4e7a8805\n\t"  /* movec mmusr, a0 */
 237                         "movel %/a0,%0"
 238                         : "=g" (mmusr)
 239                         : "g" (vaddr)
 240                         : "a0", "d0");
 241           set_fs (fs);
 242 
 243           if (mmusr & MMU_R_040)
 244             return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
 245 
 246           panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
 247         } else {
 248           volatile unsigned short temp;
 249           unsigned short mmusr;
 250           unsigned long *descaddr;
 251 
 252           asm volatile ("ptestr #5,%2@,#7,%0\n\t"
 253                         "pmove %/psr,%1@"
 254                         : "=a&" (descaddr)
 255                         : "a" (&temp), "a" (vaddr));
 256           mmusr = temp;
 257 
 258           if (mmusr & (MMU_I|MMU_B|MMU_L))
 259             panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
 260 
 261           descaddr = (unsigned long *)PTOV(descaddr);
 262 
 263           switch (mmusr & MMU_NUM) {
 264           case 1:
 265             return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
 266           case 2:
 267             return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
 268           case 3:
 269             return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
 270           default:
 271             panic ("VTOP: bad levels (%u) for virtual address %08lx", 
 272                    mmusr & MMU_NUM, vaddr);
 273           }
 274         }
 275 
 276         panic ("VTOP: bad virtual address %08lx", vaddr);
 277 }
 278 
 279 unsigned long mm_ptov (unsigned long paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 280 {
 281         int i;
 282         unsigned long offset = 0;
 283 
 284         for (i = 0; i < boot_info.num_memory; i++)
 285         {
 286                 if (paddr >= boot_info.memory[i].addr &&
 287                     paddr < (boot_info.memory[i].addr
 288                              + boot_info.memory[i].size)) {
 289 #ifdef DEBUGPV
 290                         printk ("PTOV(%lx)=%lx\n", paddr,
 291                                 (paddr - boot_info.memory[i].addr) + offset);
 292 #endif
 293                         return (paddr - boot_info.memory[i].addr) + offset;
 294                 } else
 295                         offset += boot_info.memory[i].size;
 296         }
 297 
 298         /*
 299          * assume that the kernel virtual address is the same as the
 300          * physical address.
 301          *
 302          * This should be reasonable in most situations:
 303          *  1) They shouldn't be dereferencing the virtual address
 304          *     unless they are sure that it is valid from kernel space.
 305          *  2) The only usage I see so far is converting a page table
 306          *     reference to some non-FASTMEM address space when freeing
 307          *     mmaped "/dev/mem" pages.  These addresses are just passed
 308          *     to "free_page", which ignores addresses that aren't in
 309          *     the memory list anyway.
 310          *
 311          */
 312 
 313         /*
 314          * if on an amiga and address is in first 16M, move it 
 315          * to the ZTWO_ADDR range
 316          */
 317         if (MACH_IS_AMIGA && paddr < 16*1024*1024)
 318                 return ZTWO_VADDR(paddr);
 319         return paddr;
 320 }
 321 
 322 #define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
 323                                               ".word 0xf4d0"\
 324                                               /* CINVP I/D (a0) */\
 325                                               : : "g" ((paddr))\
 326                                               : "a0")
 327 
 328 #define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
 329                                              ".word 0xf4f0"\
 330                                              /* CPUSHP I/D (a0) */\
 331                                              : : "g" ((paddr))\
 332                                              : "a0")
 333 
 334 #define pushcl040(paddr) do { push040((paddr));\
 335                               if (m68k_is040or060 == 6) clear040((paddr));\
 336                          } while(0)
 337 
 338 #define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
 339                                               /* ptestr (a0) */\
 340                                               ".word 0xf568\n\t"\
 341                                               /* movec mmusr,d0 */\
 342                                               ".long 0x4e7a0805\n\t"\
 343                                               "andw #0xf000,%/d0\n\t"\
 344                                               "movel %/d0,%/a0\n\t"\
 345                                               /* CPUSHP I/D (a0) */\
 346                                               ".word 0xf4f0"\
 347                                               : : "g" ((vaddr))\
 348                                               : "a0", "d0")
 349 
 350 #define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\
 351                                               /* plpar (a0) */\
 352                                               ".word 0xf5c8\n\t"\
 353                                               /* CPUSHP I/D (a0) */\
 354                                               ".word 0xf4f0"\
 355                                               : : "g" ((vaddr))\
 356                                               : "a0")
 357 
 358 
 359 /*
 360  * 040: Hit every page containing an address in the range paddr..paddr+len-1.
 361  * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
 362  * Hit every page until there is a page or less to go. Hit the next page,
 363  * and the one after that if the range hits it.
 364  */
 365 /* ++roman: A little bit more care is required here: The CINVP instruction
 366  * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
 367  * and the end of the region must be treated differently if they are not
 368  * exactly at the beginning or end of a page boundary. Else, maybe too much
 369  * data becomes invalidated and thus lost forever. CPUSHP does what we need:
 370  * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
 371  * for discovering the problem!)
 372  */
 373 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
 374  * the DPI bit in the CACR; would it cause problems with temporarily changing
 375  * this?). So we have to push first and then additionally to invalidate.
 376  */
 377 void cache_clear (unsigned long paddr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 378 {
 379     if (m68k_is040or060) {
 380         /* ++roman: There have been too many problems with the CINV, it seems
 381          * to break the cache maintenance of DMAing drivers. I don't expect
 382          * too much overhead by using CPUSH instead.
 383          */
 384         while (len > PAGE_SIZE) {
 385             pushcl040(paddr);
 386             len -= PAGE_SIZE;
 387             paddr += PAGE_SIZE;
 388         }
 389         if (len > 0) {
 390             pushcl040(paddr);
 391             if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
 392                 /* a page boundary gets crossed at the end */
 393                 pushcl040(paddr + len - 1);
 394             }
 395         }
 396     }
 397 #if 0
 398         /* on 68040, invalidate cache lines for pages in the range */
 399         while (len > PAGE_SIZE) {
 400             clear040(paddr);
 401             len -= PAGE_SIZE;
 402             paddr += PAGE_SIZE;
 403             }
 404         if (len > 0) {
 405             /* 0 < len <= PAGE_SIZE */
 406             clear040(paddr);
 407             if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
 408                 /* a page boundary gets crossed at the end */
 409                 clear040(paddr + len - 1);
 410                 }
 411             }
 412 #endif
 413     else /* 68030 or 68020 */
 414         asm volatile ("movec %/cacr,%/d0\n\t"
 415                       "oriw %0,%/d0\n\t"
 416                       "movec %/d0,%/cacr"
 417                       : : "i" (FLUSH_I_AND_D)
 418                       : "d0");
 419 }
 420 
 421 
 422 
 423 void cache_push (unsigned long paddr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 424 {
 425     if (m68k_is040or060) {
 426         /*
 427          * on 68040 or 68060, push cache lines for pages in the range;
 428          * on the '040 this also invalidates the pushed lines, but not on
 429          * the '060!
 430          */
 431         while (len > PAGE_SIZE) {
 432             push040(paddr);
 433             len -= PAGE_SIZE;
 434             paddr += PAGE_SIZE;
 435             }
 436         if (len > 0) {
 437             push040(paddr);
 438 #if 0
 439             if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) {
 440 #endif
 441             if (((paddr + len - 1) ^ paddr) & PAGE_MASK) {
 442                 /* a page boundary gets crossed at the end */
 443                 push040(paddr + len - 1);
 444                 }
 445             }
 446         }
 447     
 448     
 449     /*
 450      * 68030/68020 have no writeback cache. On the other hand,
 451      * cache_push is actually a superset of cache_clear (the lines
 452      * get written back and invalidated), so we should make sure
 453      * to perform the corresponding actions. After all, this is getting
 454      * called in places where we've just loaded code, or whatever, so
 455      * flushing the icache is appropriate; flushing the dcache shouldn't
 456      * be required.
 457      */
 458     else /* 68030 or 68020 */
 459         asm volatile ("movec %/cacr,%/d0\n\t"
 460                       "oriw %0,%/d0\n\t"
 461                       "movec %/d0,%/cacr"
 462                       : : "i" (FLUSH_I)
 463                       : "d0");
 464 }
 465 
 466 void cache_push_v (unsigned long vaddr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 467 {
 468     if (m68k_is040or060 == 4) {
 469         /* on 68040, push cache lines for pages in the range */
 470         while (len > PAGE_SIZE) {
 471             pushv040(vaddr);
 472             len -= PAGE_SIZE;
 473             vaddr += PAGE_SIZE;
 474             }
 475         if (len > 0) {
 476             pushv040(vaddr);
 477 #if 0
 478             if (((vaddr + len - 1) / PAGE_SIZE) != (vaddr / PAGE_SIZE)) {
 479 #endif
 480             if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
 481                 /* a page boundary gets crossed at the end */
 482                 pushv040(vaddr + len - 1);
 483                 }
 484             }
 485         }
 486     else if (m68k_is040or060 == 6) {
 487         /* on 68040, push cache lines for pages in the range */
 488         while (len > PAGE_SIZE) {
 489             pushv060(vaddr);
 490             len -= PAGE_SIZE;
 491             vaddr += PAGE_SIZE;
 492         }
 493         if (len > 0) {
 494             pushv060(vaddr);
 495             if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) {
 496                 /* a page boundary gets crossed at the end */
 497                 pushv060(vaddr + len - 1);
 498             }
 499         }
 500     }
 501     /* 68030/68020 have no writeback cache; still need to clear icache. */
 502     else /* 68030 or 68020 */
 503         asm volatile ("movec %/cacr,%/d0\n\t"
 504                       "oriw %0,%/d0\n\t"
 505                       "movec %/d0,%/cacr"
 506                       : : "i" (FLUSH_I)
 507                       : "d0");
 508 }
 509 
 510 void flush_cache_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 511 {
 512     if (m68k_is040or060 >= 4)
 513         __asm__ __volatile__ (".word 0xf478\n" ::);
 514     else /* 68030 or 68020 */
 515         asm volatile ("movec %/cacr,%/d0\n\t"
 516                       "oriw %0,%/d0\n\t"
 517                       "movec %/d0,%/cacr"
 518                       : : "i" (FLUSH_I)
 519                       : "d0");
 520 }
 521 
 522 void flush_page_to_ram (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 523 {
 524     if (m68k_is040or060 == 4)
 525         pushv040(addr);
 526 
 527     else if (m68k_is040or060 == 6)
 528         push040(VTOP(addr)); /* someone mentioned that pushv060 doesn't work */
 529 
 530     /* 68030/68020 have no writeback cache; still need to clear icache. */
 531     else /* 68030 or 68020 */
 532         asm volatile ("movec %/cacr,%/d0\n\t"
 533                       "oriw %0,%/d0\n\t"
 534                       "movec %/d0,%/cacr"
 535                       : : "i" (FLUSH_I)
 536                       : "d0");
 537 }
 538 
 539 #undef clear040
 540 #undef push040
 541 #undef pushv040
 542 #undef pushv060
 543 
 544 unsigned long mm_phys_to_virt (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 545 {
 546     return PTOV (addr);
 547 }
 548 
 549 int mm_end_of_chunk (unsigned long addr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 550 {
 551         int i;
 552 
 553         for (i = 0; i < boot_info.num_memory; i++)
 554                 if (boot_info.memory[i].addr + boot_info.memory[i].size
 555                     == addr + len)
 556                         return 1;
 557         return 0;
 558 }
 559 
 560 /* Map some physical address range into the kernel address space. The
 561  * code is copied and adapted from map_chunk().
 562  */
 563 
 564 unsigned long kernel_map(unsigned long paddr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 565                          int nocacheflag, unsigned long *memavailp )
 566 {
 567 #define STEP_SIZE       (256*1024)
 568 
 569         static unsigned long vaddr = 0xe0000000; /* safe place */
 570         unsigned long physaddr, retaddr;
 571         pte_t *ktablep = NULL;
 572         pmd_t *kpointerp;
 573         pgd_t *page_dir;
 574         int pindex;   /* index into pointer table */
 575         int prot;
 576         
 577         /* Round down 'paddr' to 256 KB and adjust size */
 578         physaddr = paddr & ~(STEP_SIZE-1);
 579         size += paddr - physaddr;
 580         retaddr = vaddr + (paddr - physaddr);
 581         paddr = physaddr;
 582         /* Round up the size to 256 KB. It doesn't hurt if too much is
 583          * mapped... */
 584         size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1);
 585 
 586         if (m68k_is040or060) {
 587                 prot = _PAGE_PRESENT | _PAGE_GLOBAL040;
 588                 switch( nocacheflag ) {
 589                   case KERNELMAP_FULL_CACHING:
 590                         prot |= _PAGE_CACHE040;
 591                         break;
 592                   case KERNELMAP_NOCACHE_SER:
 593                   default:
 594                         prot |= _PAGE_NOCACHE_S;
 595                         break;
 596                   case KERNELMAP_NOCACHE_NONSER:
 597                         prot |= _PAGE_NOCACHE;
 598                         break;
 599                   case KERNELMAP_NO_COPYBACK:
 600                         prot |= _PAGE_CACHE040W;
 601                         /* prot |= 0; */
 602                         break;
 603                 }
 604         } else
 605                 prot = _PAGE_PRESENT |
 606                            ((nocacheflag == KERNELMAP_FULL_CACHING ||
 607                                  nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030);
 608         
 609         page_dir = pgd_offset_k(vaddr);
 610         if (pgd_present(*page_dir)) {
 611                 kpointerp = (pmd_t *)pgd_page(*page_dir);
 612                 pindex = (vaddr >> 18) & 0x7f;
 613                 if (pindex != 0 && m68k_is040or060) {
 614                         if (pmd_present(*kpointerp))
 615                                 ktablep = (pte_t *)pmd_page(*kpointerp);
 616                         else {
 617                                 ktablep = kernel_page_table (memavailp);
 618                                 /* Make entries invalid */
 619                                 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
 620                                 pmd_set(kpointerp,ktablep);
 621                         }
 622                         ktablep += (pindex & 15)*64;
 623                 }
 624         }
 625         else {
 626                 /* we need a new pointer table */
 627                 kpointerp = get_kpointer_table ();
 628                 pgd_set(page_dir, (pmd_t *)kpointerp);
 629                 memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
 630                 pindex = 0;
 631         }
 632 
 633         for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) {
 634 
 635                 if (pindex > 127) {
 636                         /* we need a new pointer table */
 637                         kpointerp = get_kpointer_table ();
 638                         pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp);
 639                         memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t));
 640                         pindex = 0;
 641                 }
 642 
 643                 if (m68k_is040or060) {
 644                         int i;
 645                         unsigned long ktable;
 646 
 647                         /*
 648                          * 68040, use page tables pointed to by the
 649                          * kernel pointer table.
 650                          */
 651 
 652                         if ((pindex & 15) == 0) {
 653                                 /* Need new page table every 4M on the '040 */
 654                                 ktablep = kernel_page_table (memavailp);
 655                                 /* Make entries invalid */
 656                                 memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE);
 657                         }
 658 
 659                         ktable = VTOP(ktablep);
 660 
 661                         /*
 662                          * initialize section of the page table mapping
 663                          * this 1M portion.
 664                          */
 665                         for (i = 0; i < 64; i++) {
 666                                 pte_val(*ktablep++) = physaddr | prot;
 667                                 physaddr += PAGE_SIZE;
 668                         }
 669 
 670                         /*
 671                          * make the kernel pointer table point to the
 672                          * kernel page table.
 673                          */
 674 
 675                         ((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE;
 676 
 677                 } else {
 678                         /*
 679                          * 68030, use early termination page descriptors.
 680                          * Each one points to 64 pages (256K).
 681                          */
 682                         ((unsigned long *)kpointerp)[pindex++] = physaddr | prot;
 683                         physaddr += 64 * PAGE_SIZE;
 684                 }
 685         }
 686 
 687         return( retaddr );
 688 }
 689 
 690 
 691 static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 692                                   unsigned long size, unsigned cmode )
 693 {       pte_t *pte;
 694         unsigned long end;
 695 
 696         if (pmd_none(*pmd))
 697                 return;
 698 
 699         pte = pte_offset( pmd, address );
 700         address &= ~PMD_MASK;
 701         end = address + size;
 702         if (end >= PMD_SIZE)
 703                 end = PMD_SIZE;
 704 
 705         for( ; address < end; pte++ ) {
 706                 pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
 707                 address += PAGE_SIZE;
 708         }
 709 }
 710 
 711 
 712 static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 713                                   unsigned long size, unsigned cmode )
 714 {
 715         pmd_t *pmd;
 716         unsigned long end;
 717 
 718         if (pgd_none(*dir))
 719                 return;
 720 
 721         pmd = pmd_offset( dir, address );
 722         address &= ~PGDIR_MASK;
 723         end = address + size;
 724         if (end > PGDIR_SIZE)
 725                 end = PGDIR_SIZE;
 726 
 727         if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
 728                 /* 68030 early termination descriptor */
 729                 pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
 730                 return;
 731         }
 732         else {
 733                 /* "normal" tables */
 734                 for( ; address < end; pmd++ ) {
 735                         set_cmode_pte( pmd, address, end - address, cmode );
 736                         address = (address + PMD_SIZE) & PMD_MASK;
 737                 }
 738         }
 739 }
 740 
 741 
 742 /*
 743  * Set new cache mode for some kernel address space.
 744  * The caller must push data for that range itself, if such data may already
 745  * be in the cache.
 746  */
 747 
 748 void kernel_set_cachemode( unsigned long address, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 749                                                    unsigned cmode )
 750 {
 751         pgd_t *dir = pgd_offset_k( address );
 752         unsigned long end = address + size;
 753         
 754         if (m68k_is040or060) {
 755                 switch( cmode ) {
 756                   case KERNELMAP_FULL_CACHING:
 757                         cmode = _PAGE_CACHE040;
 758                         break;
 759                   case KERNELMAP_NOCACHE_SER:
 760                   default:
 761                         cmode = _PAGE_NOCACHE_S;
 762                         break;
 763                   case KERNELMAP_NOCACHE_NONSER:
 764                         cmode = _PAGE_NOCACHE;
 765                         break;
 766                   case KERNELMAP_NO_COPYBACK:
 767                         cmode = _PAGE_CACHE040W;
 768                         break;
 769                 }
 770         } else
 771                 cmode = ((cmode == KERNELMAP_FULL_CACHING ||
 772                                   cmode == KERNELMAP_NO_COPYBACK)    ?
 773                          0 : _PAGE_NOCACHE030);
 774 
 775         for( ; address < end; dir++ ) {
 776                 set_cmode_pmd( dir, address, end - address, cmode );
 777                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 778         }
 779         flush_tlb_all();
 780 }
 781 
 782 

/* [previous][next][first][last][top][bottom][index][help] */