root/include/asm-m68k/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __flush_tlb_one
  2. flush_tlb_mm
  3. flush_tlb_page
  4. flush_tlb_range
  5. mk_pte
  6. pte_modify
  7. pmd_set
  8. pmd_set_et
  9. pgd_set
  10. pte_page
  11. pmd_page2
  12. pgd_page
  13. pte_none
  14. pte_present
  15. pte_clear
  16. pmd_none2
  17. pmd_bad2
  18. pmd_present2
  19. pmd_clear
  20. pgd_none
  21. pgd_bad
  22. pgd_present
  23. pgd_clear
  24. pte_read
  25. pte_write
  26. pte_exec
  27. pte_dirty
  28. pte_young
  29. pte_wrprotect
  30. pte_rdprotect
  31. pte_exprotect
  32. pte_mkclean
  33. pte_mkold
  34. pte_mkwrite
  35. pte_mkread
  36. pte_mkexec
  37. pte_mkdirty
  38. pte_mkyoung
  39. pte_mknocache
  40. pte_mkcache
  41. SET_PAGE_DIR
  42. pgd_offset
  43. pgd_offset_k
  44. pmd_offset
  45. pte_offset
  46. nocache_page
  47. cache_page
  48. pte_free
  49. pte_alloc
  50. pmd_free
  51. pmd_alloc
  52. pte_free_kernel
  53. pte_alloc_kernel
  54. pmd_free_kernel
  55. pmd_alloc_kernel
  56. pgd_free
  57. pgd_alloc
  58. update_mmu_cache

   1 #ifndef _M68K_PGTABLE_H
   2 #define _M68K_PGTABLE_H
   3 
   4 /*
   5  * This file contains the functions and defines necessary to modify and use
   6  * the m68k page table tree.
   7  */
   8 
   9 #define __flush_tlb() \
  10 do {    \
  11         if (m68k_is040or060) \
  12                 __asm__ __volatile__(".word 0xf510\n"::); /* pflushan */ \
  13         else \
  14                 __asm__ __volatile__("pflusha\n"::); \
  15 } while (0)
  16 
  17 #if 1
  18 static inline void __flush_tlb_one(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
  19 {
  20         if (m68k_is040or060) {
  21                 register unsigned long a0 __asm__ ("a0") = addr;
  22                 __asm__ __volatile__(".word 0xf508" /* pflush (%a0) */
  23                                      : : "a" (a0));
  24         } else
  25                 __asm__ __volatile__("pflush #0,#0,(%0)" : : "a" (addr));
  26 }
  27 #else
  28 #define __flush_tlb_one(addr) __flush_tlb()
  29 #endif
  30 
  31 #define flush_tlb() __flush_tlb()
  32 #define flush_tlb_all() flush_tlb()
  33 
  34 static inline void flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36         if (mm == current->mm)
  37                 __flush_tlb();
  38 }
  39 
  40 static inline void flush_tlb_page(struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  41         unsigned long addr)
  42 {
  43         if (vma->vm_mm == current->mm)
  44                 __flush_tlb_one(addr);
  45 }
  46 
  47 static inline void flush_tlb_range(struct mm_struct *mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  48         unsigned long start, unsigned long end)
  49 {
  50         if (mm == current->mm)
  51                 __flush_tlb();
  52 }
  53 
  54 /* Certain architectures need to do special things when pte's
  55  * within a page table are directly modified.  Thus, the following
  56  * hook is made available.
  57  */
  58 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  59 
  60 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  61 #define PMD_SHIFT       22
  62 #define PMD_SIZE        (1UL << PMD_SHIFT)
  63 #define PMD_MASK        (~(PMD_SIZE-1))
  64 
  65 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  66 #define PGDIR_SHIFT     25
  67 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  68 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  69 
  70 /*
  71  * entries per page directory level: the m68k is configured as three-level,
  72  * so we do have PMD level physically.
  73  */
  74 #define PTRS_PER_PTE    1024
  75 #define PTRS_PER_PMD    8
  76 #define PTRS_PER_PGD    128
  77 
  78 /* the no. of pointers that fit on a page: this will go away */
  79 #define PTRS_PER_PAGE   (PAGE_SIZE/sizeof(void*))
  80 
  81 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  82  * current 8MB value just means that there will be a 8MB "hole" after the
  83  * physical memory until the kernel virtual memory starts.  That means that
  84  * any out-of-bounds memory accesses will hopefully be caught.
  85  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  86  * area for the same reason. ;)
  87  */
  88 #define VMALLOC_OFFSET  (8*1024*1024)
  89 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  90 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
  91 
  92 /*
  93  * Definitions for MMU descriptors
  94  */
  95 #define _PAGE_PRESENT   0x001
  96 #define _PAGE_SHORT     0x002
  97 #define _PAGE_RONLY     0x004
  98 #define _PAGE_ACCESSED  0x008
  99 #define _PAGE_DIRTY     0x010
 100 #define _PAGE_GLOBAL040 0x400   /* 68040 global bit, used for kva descs */
 101 #define _PAGE_COW       0x800   /* implemented in software */
 102 #define _PAGE_NOCACHE030 0x040  /* 68030 no-cache mode */
 103 #define _PAGE_NOCACHE   0x060   /* 68040 cache mode, non-serialized */
 104 #define _PAGE_NOCACHE_S 0x040   /* 68040 no-cache mode, serialized */
 105 #define _PAGE_CACHE040  0x020   /* 68040 cache mode, cachable, copyback */
 106 #define _PAGE_CACHE040W 0x000   /* 68040 cache mode, cachable, write-through */
 107 
 108 #define _DESCTYPE_MASK  0x003
 109 
 110 #define _CACHEMASK040   (~0x060)
 111 #define _TABLE_MASK     (0xfffffff0)
 112 
 113 #define _PAGE_TABLE     (_PAGE_SHORT)
 114 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
 115 
 116 #define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
 117 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE040)
 118 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
 119 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
 120 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_CACHE040)
 121 
 122 /*
 123  * The m68k can't do page protection for execute, and considers that the same are read.
 124  * Also, write permissions imply read permissions. This is the closest we can get..
 125  */
 126 #define __P000  PAGE_NONE
 127 #define __P001  PAGE_READONLY
 128 #define __P010  PAGE_COPY
 129 #define __P011  PAGE_COPY
 130 #define __P100  PAGE_READONLY
 131 #define __P101  PAGE_READONLY
 132 #define __P110  PAGE_COPY
 133 #define __P111  PAGE_COPY
 134 
 135 #define __S000  PAGE_NONE
 136 #define __S001  PAGE_READONLY
 137 #define __S010  PAGE_SHARED
 138 #define __S011  PAGE_SHARED
 139 #define __S100  PAGE_READONLY
 140 #define __S101  PAGE_READONLY
 141 #define __S110  PAGE_SHARED
 142 #define __S111  PAGE_SHARED
 143 
 144 /* zero page used for uninitialized stuff */
 145 extern unsigned long empty_zero_page;
 146 
 147 /*
 148  * BAD_PAGETABLE is used when we need a bogus page-table, while
 149  * BAD_PAGE is used for a bogus page.
 150  *
 151  * ZERO_PAGE is a global shared page that is always zero: used
 152  * for zero-mapped memory areas etc..
 153  */
 154 extern pte_t __bad_page(void);
 155 extern pte_t * __bad_pagetable(void);
 156 
 157 #define BAD_PAGETABLE __bad_pagetable()
 158 #define BAD_PAGE __bad_page()
 159 #define ZERO_PAGE empty_zero_page
 160 
 161 /* number of bits that fit into a memory pointer */
 162 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 163 
 164 /* to align the pointer to a pointer address */
 165 #define PTR_MASK                        (~(sizeof(void*)-1))
 166 
 167 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 168 /* 64-bit machines, beware!  SRB. */
 169 #define SIZEOF_PTR_LOG2                 2
 170 
 171 /* to find an entry in a page-table */
 172 #define PAGE_PTR(address) \
 173 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 174 
 175 extern unsigned long high_memory;
 176 
 177 /* For virtual address to physical address conversion */
 178 extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
 179 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
 180 #define VTOP(addr)  (mm_vtop((unsigned long)(addr)))
 181 #define PTOV(addr)  (mm_ptov((unsigned long)(addr)))
 182 
 183 /*
 184  * Conversion functions: convert a page and protection to a page entry,
 185  * and a page entry and page directory to the page they refer to.
 186  */
 187 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 188 { pte_t pte; pte_val(pte) = VTOP(page) | pgprot_val(pgprot); return pte; }
 189 
 190 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 191 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 192 
 193 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         int i;
 196 
 197         ptep = (pte_t *) VTOP(ptep);
 198         for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
 199                 pmdp->pmd[i] = _PAGE_TABLE | (unsigned long)ptep;
 200 }
 201 
 202 /* early termination version of the above */
 203 extern inline void pmd_set_et(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 204 {
 205         int i;
 206 
 207         ptep = (pte_t *) VTOP(ptep);
 208         for (i = 0; i < 16; i++, ptep += PTRS_PER_PTE/16)
 209                 pmdp->pmd[i] = _PAGE_PRESENT | (unsigned long)ptep;
 210 }
 211 
 212 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 { pgd_val(*pgdp) = _PAGE_TABLE | VTOP(pmdp); }
 214 
 215 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 216 { return PTOV(pte_val(pte) & PAGE_MASK); }
 217 
 218 extern inline unsigned long pmd_page2(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 { return PTOV(pmd_val(*pmd) & _TABLE_MASK); }
 220 #define pmd_page(pmd) pmd_page2(&(pmd))
 221 
 222 extern inline unsigned long pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 { return PTOV(pgd_val(pgd) & _TABLE_MASK); }
 224 
 225 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 226 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
 227 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 228 
 229 extern inline int pmd_none2(pmd_t *pmd)         { return !pmd_val(*pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 230 #define pmd_none(pmd) pmd_none2(&(pmd))
 231 extern inline int pmd_bad2(pmd_t *pmd)          { return (pmd_val(*pmd) & _DESCTYPE_MASK) != _PAGE_TABLE || pmd_page(*pmd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 232 #define pmd_bad(pmd) pmd_bad2(&(pmd))
 233 extern inline int pmd_present2(pmd_t *pmd)      { return pmd_val(*pmd) & _PAGE_TABLE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 234 #define pmd_present(pmd) pmd_present2(&(pmd))
 235 extern inline void pmd_clear(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 236 {
 237         int i;
 238 
 239         for (i = 0; i < 16; i++)
 240                 pmdp->pmd[i] = 0;
 241 }
 242 
 243 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 244 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 245 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_TABLE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 246 
 247 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 248 
 249 /*
 250  * The following only work if pte_present() is true.
 251  * Undefined behaviour if not..
 252  */
 253 extern inline int pte_read(pte_t pte)           { return 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 254 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_RONLY); }
     /* [previous][next][first][last][top][bottom][index][help] */
 255 extern inline int pte_exec(pte_t pte)           { return 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 256 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 257 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 258 
 259 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_RONLY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 260 extern inline pte_t pte_rdprotect(pte_t pte)    { return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 261 extern inline pte_t pte_exprotect(pte_t pte)    { return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 262 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 263 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 264 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 265 extern inline pte_t pte_mkread(pte_t pte)       { return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 266 extern inline pte_t pte_mkexec(pte_t pte)       { return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 267 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 268 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 269 extern inline pte_t pte_mknocache(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 270 {
 271         pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
 272         return pte;
 273 }
 274 extern inline pte_t pte_mkcache(pte_t pte)      { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | _PAGE_CACHE040; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 275 
 276 /* to set the page-dir */
 277 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         tsk->tss.pagedir_v = (unsigned long *)pgdir;
 280         tsk->tss.pagedir_p = VTOP(pgdir);
 281         tsk->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
 282         tsk->tss.crp[1] = tsk->tss.pagedir_p;
 283         if (tsk == current) {
 284                 if (m68k_is040or060)
 285                         __asm__ __volatile__ (".word 0xf510\n\t" /* pflushan */
 286                                               "movel %0@,%/d0\n\t"
 287                                               ".long 0x4e7b0806\n\t"
 288                                               /* movec d0,urp */
 289                                               : : "a" (&tsk->tss.crp[1])
 290                                               : "d0");
 291                 else
 292                         __asm__ __volatile__ ("movec  %/cacr,%/d0\n\t"
 293                                               "oriw #0x0808,%/d0\n\t"
 294                                               "movec %/d0,%/cacr\n\t"
 295                                               "pmove %0@,%/crp\n\t"
 296                                               : : "a" (&tsk->tss.crp[0])
 297                                               : "d0");
 298         }
 299 }
 300 
 301 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 302 
 303 /* to find an entry in a page-table-directory */
 304 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 305 {
 306         return mm->pgd + (address >> PGDIR_SHIFT);
 307 }
 308 
 309 extern pgd_t swapper_pg_dir[128];
 310 extern pgd_t kernel_pg_dir[128];
 311 
 312 extern inline pgd_t * pgd_offset_k(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 313 {
 314         return kernel_pg_dir + (address >> PGDIR_SHIFT);
 315 }
 316 
 317 
 318 /* Find an entry in the second-level page table.. */
 319 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
 322 }
 323 
 324 /* Find an entry in the third-level page table.. */ 
 325 extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327         return (pte_t *) pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 328 }
 329 
 330 /*
 331  * Allocate and free page tables. The xxx_kernel() versions are
 332  * used to allocate a kernel page table - this turns on ASN bits
 333  * if any.
 334  */
 335 
 336 extern inline void nocache_page (unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 337 {
 338         if (m68k_is040or060) {
 339                 pgd_t *dir;
 340                 pmd_t *pmdp;
 341                 pte_t *ptep;
 342 
 343                 dir = pgd_offset_k(vaddr);
 344                 pmdp = pmd_offset(dir,vaddr);
 345                 ptep = pte_offset(pmdp,vaddr);
 346                 *ptep = pte_mknocache(*ptep);
 347         }
 348 }
 349 
 350 static inline void cache_page (unsigned long vaddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352         if (m68k_is040or060) {
 353                 pgd_t *dir;
 354                 pmd_t *pmdp;
 355                 pte_t *ptep;
 356 
 357                 dir = pgd_offset_k(vaddr);
 358                 pmdp = pmd_offset(dir,vaddr);
 359                 ptep = pte_offset(pmdp,vaddr);
 360                 *ptep = pte_mkcache(*ptep);
 361         }
 362 }
 363 
 364 
 365 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         cache_page((unsigned long)pte);
 368         free_page((unsigned long) pte);
 369 }
 370 
 371 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 372 {
 373         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 374         if (pmd_none(*pmd)) {
 375                 pte_t * page = (pte_t *)get_free_page(GFP_KERNEL);
 376                 if (pmd_none(*pmd)) {
 377                         if (page) {
 378                                 nocache_page((unsigned long)page);
 379                                 pmd_set(pmd,page);
 380                                 return page + address;
 381                         }
 382                         pmd_set(pmd, BAD_PAGETABLE);
 383                         return NULL;
 384                 }
 385                 free_page((unsigned long)page);
 386         }
 387         if (pmd_bad(*pmd)) {
 388                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 389                 pmd_set(pmd, BAD_PAGETABLE);
 390                 return NULL;
 391         }
 392         return (pte_t *) pmd_page(*pmd) + address;
 393 }
 394 
 395 extern pmd_t *get_pointer_table (void);
 396 extern void free_pointer_table (pmd_t *);
 397 extern pmd_t *get_kpointer_table (void);
 398 extern void free_kpointer_table (pmd_t *);
 399 
 400 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 401 {
 402         free_pointer_table (pmd);
 403 }
 404 
 405 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 406 {
 407         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 408         if (pgd_none(*pgd)) {
 409                 pmd_t *page = get_pointer_table();
 410                 if (pgd_none(*pgd)) {
 411                         if (page) {
 412                                 pgd_set(pgd, page);
 413                                 return page + address;
 414                         }
 415                         pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
 416                         return NULL;
 417                 }
 418                 free_pointer_table(page);
 419         }
 420         if (pgd_bad(*pgd)) {
 421                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 422                 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
 423                 return NULL;
 424         }
 425         return (pmd_t *) pgd_page(*pgd) + address;
 426 }
 427 
 428 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 429 {
 430         cache_page((unsigned long)pte);
 431         free_page((unsigned long) pte);
 432 }
 433 
 434 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 435 {
 436         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 437         if (pmd_none(*pmd)) {
 438                 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
 439                 if (pmd_none(*pmd)) {
 440                         if (page) {
 441                                 nocache_page((unsigned long)page);
 442                                 pmd_set(pmd, page);
 443                                 return page + address;
 444                         }
 445                         pmd_set(pmd, BAD_PAGETABLE);
 446                         return NULL;
 447                 }
 448                 free_page((unsigned long) page);
 449         }
 450         if (pmd_bad(*pmd)) {
 451                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 452                 pmd_set(pmd, BAD_PAGETABLE);
 453                 return NULL;
 454         }
 455         return (pte_t *) pmd_page(*pmd) + address;
 456 }
 457 
 458 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 459 {
 460         free_kpointer_table(pmd);
 461 }
 462 
 463 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 464 {
 465         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 466         if (pgd_none(*pgd)) {
 467                 pmd_t *page = get_kpointer_table();
 468                 if (pgd_none(*pgd)) {
 469                         if (page) {
 470                                 pgd_set(pgd, page);
 471                                 return page + address;
 472                         }
 473                         pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
 474                         return NULL;
 475                 }
 476                 free_kpointer_table(page);
 477         }
 478         if (pgd_bad(*pgd)) {
 479                 printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd));
 480                 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
 481                 return NULL;
 482         }
 483         return (pmd_t *) pgd_page(*pgd) + address;
 484 }
 485 
 486 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 487 {
 488         free_pointer_table ((pmd_t *) pgd);
 489 }
 490 
 491 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 492 {
 493         return (pgd_t *)get_pointer_table ();
 494 }
 495 
 496 #define flush_icache() \
 497 do { \
 498         if (m68k_is040or060) \
 499                 asm (".word 0xf498"); /* CINVA I */ \
 500         else \
 501                 asm ("movec %/cacr,%/d0;" \
 502                      "oriw %0,%/d0;" \
 503                      "movec %/d0,%/cacr" \
 504                      : /* no outputs */ \
 505                      : "i" (FLUSH_I) \
 506                      : "d0"); \
 507 } while (0)
 508 
 509 /*
 510  * invalidate the cache for the specified memory range.
 511  * It starts at the physical address specified for
 512  * the given number of bytes.
 513  */
 514 extern void cache_clear (unsigned long paddr, int len);
 515 /*
 516  * push any dirty cache in the specified memory range.
 517  * It starts at the physical address specified for
 518  * the given number of bytes.
 519  */
 520 extern void cache_push (unsigned long paddr, int len);
 521 
 522 /*
 523  * push and invalidate pages in the specified user virtual
 524  * memory range.
 525  */
 526 extern void cache_push_v (unsigned long vaddr, int len);
 527 
 528 /*
 529  * Could someone take a look at these?
 530  */
 531 extern void flush_cache_all(void);
 532 #define flush_cache_mm(mm)                 flush_cache_all()
 533 #define flush_cache_range(mm, start, end)  flush_cache_all()
 534 #define flush_cache_page(vma, addr)        flush_cache_all()
 535 extern void flush_page_to_ram(unsigned long addr);
 536 
 537 /* cache code */
 538 #define FLUSH_I_AND_D   (0x00000808)
 539 #define FLUSH_I         (0x00000008)
 540 
 541 /*
 542  * Check if the addr/len goes up to the end of a physical
 543  * memory chunk.  Used for DMA functions.
 544  */
 545 int mm_end_of_chunk (unsigned long addr, int len);
 546 
 547 /*
 548  * Map some physical address range into the kernel address space. The
 549  * code is copied and adapted from map_chunk().
 550  */
 551 extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
 552                                 int nocacheflag, unsigned long *memavailp );
 553 /*
 554  * Change the cache mode of some kernel address range.
 555  */
 556 extern void kernel_set_cachemode( unsigned long address, unsigned long size,
 557                                   unsigned cmode );
 558 
 559 /* Values for nocacheflag and cmode */
 560 #define KERNELMAP_FULL_CACHING          0
 561 #define KERNELMAP_NOCACHE_SER           1
 562 #define KERNELMAP_NOCACHE_NONSER        2
 563 #define KERNELMAP_NO_COPYBACK           3
 564 
 565 /*
 566  * The m68k doesn't have any external MMU info: the kernel page
 567  * tables contain all the necessary information.
 568  */
 569 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 570         unsigned long address, pte_t pte)
 571 {
 572 }
 573 
 574 /*
 575  * I don't know what is going on here, but since these were changed,
 576  * swapping haven't been working on the 68040.
 577  */
 578 
 579 #if 0
 580 #define SWP_TYPE(entry)  (((entry) >> 2) & 0x7f)
 581 #define SWP_OFFSET(entry) ((entry) >> 9)
 582 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
 583 #else
 584 #define SWP_TYPE(entry)  (((entry) & 0x1fc) >> 2)
 585 #define SWP_OFFSET(entry) ((entry) >> PAGE_SHIFT)
 586 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << PAGE_SHIFT))
 587 #endif
 588 
 589 #endif /* _M68K_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */