root/include/asm-alpha/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. reload_context
  2. flush_tlb_current_page
  3. flush_tlb
  4. flush_tlb_all
  5. flush_tlb_mm
  6. flush_tlb_page
  7. flush_tlb_range
  8. mk_pte
  9. pte_modify
  10. pmd_set
  11. pgd_set
  12. pte_page
  13. pmd_page
  14. pgd_page
  15. pte_none
  16. pte_present
  17. pte_clear
  18. pmd_none
  19. pmd_bad
  20. pmd_present
  21. pmd_clear
  22. pgd_none
  23. pgd_bad
  24. pgd_present
  25. pgd_clear
  26. pte_read
  27. pte_write
  28. pte_exec
  29. pte_dirty
  30. pte_young
  31. pte_wrprotect
  32. pte_rdprotect
  33. pte_exprotect
  34. pte_mkclean
  35. pte_mkold
  36. pte_mkwrite
  37. pte_mkread
  38. pte_mkexec
  39. pte_mkdirty
  40. pte_mkyoung
  41. SET_PAGE_DIR
  42. pgd_offset
  43. pmd_offset
  44. pte_offset
  45. pte_free_kernel
  46. pte_alloc_kernel
  47. pmd_free_kernel
  48. pmd_alloc_kernel
  49. pte_free
  50. pte_alloc
  51. pmd_free
  52. pmd_alloc
  53. pgd_free
  54. pgd_alloc
  55. update_mmu_cache
  56. mk_swap_pte

   1 #ifndef _ALPHA_PGTABLE_H
   2 #define _ALPHA_PGTABLE_H
   3 
   4 /*
   5  * This file contains the functions and defines necessary to modify and use
   6  * the alpha page table tree.
   7  *
   8  * This hopefully works with any standard alpha page-size, as defined
   9  * in <asm/page.h> (currently 8192).
  10  */
  11 
  12 #include <asm/system.h>
  13 #include <asm/mmu_context.h>
  14 
  15 /* Caches aren't brain-dead on the alpha. */
  16 #define flush_cache_all()                       do { } while (0)
  17 #define flush_cache_mm(mm)                      do { } while (0)
  18 #define flush_cache_range(mm, start, end)       do { } while (0)
  19 #define flush_cache_page(vma, vmaddr)           do { } while (0)
  20 #define flush_page_to_ram(page)                 do { } while (0)
  21 
  22 /*
  23  * Force a context reload. This is needed when we
  24  * change the page table pointer or when we update
  25  * the ASN of the current process.
  26  */
  27 static inline void reload_context(struct task_struct *task)
     /* [previous][next][first][last][top][bottom][index][help] */
  28 {
  29         __asm__ __volatile__(
  30                 "bis %0,%0,$16\n\t"
  31                 "call_pal %1"
  32                 : /* no outputs */
  33                 : "r" (&task->tss), "i" (PAL_swpctx)
  34                 : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
  35 }
  36 
  37 /*
  38  * Use a few helper functions to hide the ugly broken ASN
  39  * numbers on early alpha's (ev4 and ev45)
  40  */
  41 #ifdef BROKEN_ASN
  42 
  43 #define flush_tlb_current(x) tbiap()
  44 #define flush_tlb_other(x) do { } while (0)
  45 
  46 #else
  47 
  48 extern void get_new_asn_and_reload(struct task_struct *, struct mm_struct *);
  49 
  50 #define flush_tlb_current(mm) get_new_asn_and_reload(current, mm)
  51 #define flush_tlb_other(mm) do { (mm)->context = 0; } while (0)
  52 
  53 #endif
  54 
  55 /*
  56  * Flush just one page in the current TLB set.
  57  * We need to be very careful about the icache here, there
  58  * is no way to invalidate a specific icache page..
  59  */
  60 static inline void flush_tlb_current_page(struct mm_struct * mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  61         struct vm_area_struct *vma,
  62         unsigned long addr)
  63 {
  64 #ifdef BROKEN_ASN
  65         tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
  66 #else
  67         if (vma->vm_flags & VM_EXEC)
  68                 flush_tlb_current(mm);
  69         else
  70                 tbi(2, addr);
  71 #endif
  72 }
  73 
  74 /*
  75  * Flush current user mapping.
  76  */
  77 static inline void flush_tlb(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         flush_tlb_current(current->mm);
  80 }
  81 
  82 /*
  83  * Flush everything (kernel mapping may also have
  84  * changed due to vmalloc/vfree)
  85  */
  86 static inline void flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         tbia();
  89 }
  90 
  91 /*
  92  * Flush a specified user mapping
  93  */
  94 static inline void flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96         if (mm != current->mm)
  97                 flush_tlb_other(mm);
  98         else
  99                 flush_tlb_current(mm);
 100 }
 101 
 102 /*
 103  * Page-granular tlb flush.
 104  *
 105  * do a tbisd (type = 2) normally, and a tbis (type = 3)
 106  * if it is an executable mapping.  We want to avoid the
 107  * itlb flush, because that potentially also does a
 108  * icache flush.
 109  */
 110 static inline void flush_tlb_page(struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 111         unsigned long addr)
 112 {
 113         struct mm_struct * mm = vma->vm_mm;
 114 
 115         if (mm != current->mm)
 116                 flush_tlb_other(mm);
 117         else
 118                 flush_tlb_current_page(mm, vma, addr);
 119 }
 120 
 121 /*
 122  * Flush a specified range of user mapping: on the
 123  * alpha we flush the whole user tlb
 124  */
 125 static inline void flush_tlb_range(struct mm_struct *mm,
     /* [previous][next][first][last][top][bottom][index][help] */
 126         unsigned long start, unsigned long end)
 127 {
 128         flush_tlb_mm(mm);
 129 }
 130 
 131 /* Certain architectures need to do special things when pte's
 132  * within a page table are directly modified.  Thus, the following
 133  * hook is made available.
 134  */
 135 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 136 
 137 /* PMD_SHIFT determines the size of the area a second-level page table can map */
 138 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
 139 #define PMD_SIZE        (1UL << PMD_SHIFT)
 140 #define PMD_MASK        (~(PMD_SIZE-1))
 141 
 142 /* PGDIR_SHIFT determines what a third-level page table entry can map */
 143 #define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
 144 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 145 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
 146 
 147 /*
 148  * entries per page directory level: the alpha is three-level, with
 149  * all levels having a one-page page table.
 150  *
 151  * The PGD is special: the last entry is reserved for self-mapping.
 152  */
 153 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
 154 #define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
 155 #define PTRS_PER_PGD    ((1UL << (PAGE_SHIFT-3))-1)
 156 
 157 /* the no. of pointers that fit on a page: this will go away */
 158 #define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
 159 
 160 #define VMALLOC_START           0xFFFFFE0000000000
 161 #define VMALLOC_VMADDR(x)       ((unsigned long)(x))
 162 
 163 /*
 164  * OSF/1 PAL-code-imposed page table bits
 165  */
 166 #define _PAGE_VALID     0x0001
 167 #define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
 168 #define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
 169 #define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
 170 #define _PAGE_ASM       0x0010
 171 #define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
 172 #define _PAGE_URE       0x0200  /* xxx */
 173 #define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
 174 #define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
 175 
 176 /* .. and these are ours ... */
 177 #define _PAGE_DIRTY     0x20000
 178 #define _PAGE_ACCESSED  0x40000
 179 
 180 /*
 181  * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
 182  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 183  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 184  * the KRE/URE bits to watch for it. That way we don't need to overload the
 185  * KWE/UWE bits with both handling dirty and accessed.
 186  *
 187  * Note that the kernel uses the accessed bit just to check whether to page
 188  * out a page or not, so it doesn't have to be exact anyway.
 189  */
 190 
 191 #define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 192 #define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 193 
 194 #define _PFN_MASK       0xFFFFFFFF00000000
 195 
 196 #define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 197 #define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 198 
 199 /*
 200  * All the normal masks have the "page accessed" bits on, as any time they are used,
 201  * the page is accessed. They are cleared only by the page-out routines
 202  */
 203 #define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 204 #define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 205 #define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 206 #define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 207 #define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 208 
 209 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 210 
 211 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 212 #define _PAGE_S(x) _PAGE_NORMAL(x)
 213 
 214 /*
 215  * The hardware can handle write-only mappings, but as the alpha
 216  * architecture does byte-wide writes with a read-modify-write
 217  * sequence, it's not practical to have write-without-read privs.
 218  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 219  * arch/alpha/mm/fault.c)
 220  */
 221         /* xwr */
 222 #define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 223 #define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 224 #define __P010  _PAGE_P(_PAGE_FOE)
 225 #define __P011  _PAGE_P(_PAGE_FOE)
 226 #define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 227 #define __P101  _PAGE_P(_PAGE_FOW)
 228 #define __P110  _PAGE_P(0)
 229 #define __P111  _PAGE_P(0)
 230 
 231 #define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 232 #define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 233 #define __S010  _PAGE_S(_PAGE_FOE)
 234 #define __S011  _PAGE_S(_PAGE_FOE)
 235 #define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 236 #define __S101  _PAGE_S(_PAGE_FOW)
 237 #define __S110  _PAGE_S(0)
 238 #define __S111  _PAGE_S(0)
 239 
 240 /*
 241  * BAD_PAGETABLE is used when we need a bogus page-table, while
 242  * BAD_PAGE is used for a bogus page.
 243  *
 244  * ZERO_PAGE is a global shared page that is always zero: used
 245  * for zero-mapped memory areas etc..
 246  */
 247 extern pte_t __bad_page(void);
 248 extern pmd_t * __bad_pagetable(void);
 249 
 250 extern unsigned long __zero_page(void);
 251 
 252 #define BAD_PAGETABLE   __bad_pagetable()
 253 #define BAD_PAGE        __bad_page()
 254 #define ZERO_PAGE       0xfffffc000030A000
 255 
 256 /* number of bits that fit into a memory pointer */
 257 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 258 
 259 /* to align the pointer to a pointer address */
 260 #define PTR_MASK                        (~(sizeof(void*)-1))
 261 
 262 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 263 #define SIZEOF_PTR_LOG2                 3
 264 
 265 /* to find an entry in a page-table */
 266 #define PAGE_PTR(address)               \
 267   ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 268 
 269 extern unsigned long high_memory;
 270 
 271 /*
 272  * Conversion functions: convert a page and protection to a page entry,
 273  * and a page entry and page directory to the page they refer to.
 274  */
 275 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
 277 
 278 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 280 
 281 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 283 
 284 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 286 
 287 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 289 
 290 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 291 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 292 
 293 extern inline unsigned long pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 294 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 295 
 296 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 297 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 298 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 299 
 300 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 301 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 302 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 303 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 304 
 305 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 306 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 307 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 308 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 309 
 310 /*
 311  * The following only work if pte_present() is true.
 312  * Undefined behaviour if not..
 313  */
 314 extern inline int pte_read(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 315 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
     /* [previous][next][first][last][top][bottom][index][help] */
 316 extern inline int pte_exec(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOE); }
     /* [previous][next][first][last][top][bottom][index][help] */
 317 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 318 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 319 
 320 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 321 extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 322 extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 323 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 324 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 325 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 326 extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 327 extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 328 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 329 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 330 
 331 /* 
 332  * To set the page-dir. Note the self-mapping in the last entry
 333  *
 334  * Also note that if we update the current process ptbr, we need to
 335  * update the PAL-cached ptbr value as well.. There doesn't seem to
 336  * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
 337  * instead.
 338  */
 339 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
 342         tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
 343         if (tsk == current)
 344                 reload_context(tsk);
 345 }
 346 
 347 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 348 
 349 /* to find an entry in a page-table-directory. */
 350 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352         return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
 353 }
 354 
 355 /* Find an entry in the second-level page table.. */
 356 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 359 }
 360 
 361 /* Find an entry in the third-level page table.. */
 362 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 363 {
 364         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 365 }
 366 
 367 /*      
 368  * Allocate and free page tables. The xxx_kernel() versions are
 369  * used to allocate a kernel page table - this turns on ASN bits
 370  * if any.
 371  */
 372 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 373 {
 374         free_page((unsigned long) pte);
 375 }
 376 
 377 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 378 {
 379         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 380         if (pmd_none(*pmd)) {
 381                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 382                 if (pmd_none(*pmd)) {
 383                         if (page) {
 384                                 pmd_set(pmd, page);
 385                                 return page + address;
 386                         }
 387                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 388                         return NULL;
 389                 }
 390                 free_page((unsigned long) page);
 391         }
 392         if (pmd_bad(*pmd)) {
 393                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 394                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 395                 return NULL;
 396         }
 397         return (pte_t *) pmd_page(*pmd) + address;
 398 }
 399 
 400 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 401 {
 402         free_page((unsigned long) pmd);
 403 }
 404 
 405 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 406 {
 407         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 408         if (pgd_none(*pgd)) {
 409                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 410                 if (pgd_none(*pgd)) {
 411                         if (page) {
 412                                 pgd_set(pgd, page);
 413                                 return page + address;
 414                         }
 415                         pgd_set(pgd, BAD_PAGETABLE);
 416                         return NULL;
 417                 }
 418                 free_page((unsigned long) page);
 419         }
 420         if (pgd_bad(*pgd)) {
 421                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 422                 pgd_set(pgd, BAD_PAGETABLE);
 423                 return NULL;
 424         }
 425         return (pmd_t *) pgd_page(*pgd) + address;
 426 }
 427 
 428 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 429 {
 430         free_page((unsigned long) pte);
 431 }
 432 
 433 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 434 {
 435         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 436         if (pmd_none(*pmd)) {
 437                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 438                 if (pmd_none(*pmd)) {
 439                         if (page) {
 440                                 pmd_set(pmd, page);
 441                                 return page + address;
 442                         }
 443                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 444                         return NULL;
 445                 }
 446                 free_page((unsigned long) page);
 447         }
 448         if (pmd_bad(*pmd)) {
 449                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 450                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 451                 return NULL;
 452         }
 453         return (pte_t *) pmd_page(*pmd) + address;
 454 }
 455 
 456 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 457 {
 458         free_page((unsigned long) pmd);
 459 }
 460 
 461 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 462 {
 463         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 464         if (pgd_none(*pgd)) {
 465                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 466                 if (pgd_none(*pgd)) {
 467                         if (page) {
 468                                 pgd_set(pgd, page);
 469                                 return page + address;
 470                         }
 471                         pgd_set(pgd, BAD_PAGETABLE);
 472                         return NULL;
 473                 }
 474                 free_page((unsigned long) page);
 475         }
 476         if (pgd_bad(*pgd)) {
 477                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 478                 pgd_set(pgd, BAD_PAGETABLE);
 479                 return NULL;
 480         }
 481         return (pmd_t *) pgd_page(*pgd) + address;
 482 }
 483 
 484 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 485 {
 486         free_page((unsigned long) pgd);
 487 }
 488 
 489 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 490 {
 491         return (pgd_t *) get_free_page(GFP_KERNEL);
 492 }
 493 
 494 extern pgd_t swapper_pg_dir[1024];
 495 
 496 /*
 497  * The alpha doesn't have any external MMU info: the kernel page
 498  * tables contain all the necessary information.
 499  */
 500 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 501         unsigned long address, pte_t pte)
 502 {
 503 }
 504 
 505 /*
 506  * Non-present pages: high 24 bits are offset, next 8 bits type,
 507  * low 32 bits zero..
 508  */
 509 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 510 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 511 
 512 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 513 #define SWP_OFFSET(entry) ((entry) >> 40)
 514 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 515 
 516 #endif /* _ALPHA_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */