root/include/asm-alpha/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. flush_tlb
  2. flush_tlb_all
  3. flush_tlb_mm
  4. flush_tlb_page
  5. flush_tlb_range
  6. mk_pte
  7. pte_modify
  8. pmd_set
  9. pgd_set
  10. pte_page
  11. pmd_page
  12. pgd_page
  13. pte_none
  14. pte_present
  15. pte_clear
  16. pmd_none
  17. pmd_bad
  18. pmd_present
  19. pmd_clear
  20. pgd_none
  21. pgd_bad
  22. pgd_present
  23. pgd_clear
  24. pte_read
  25. pte_write
  26. pte_exec
  27. pte_dirty
  28. pte_young
  29. pte_wrprotect
  30. pte_rdprotect
  31. pte_exprotect
  32. pte_mkclean
  33. pte_mkold
  34. pte_mkwrite
  35. pte_mkread
  36. pte_mkexec
  37. pte_mkdirty
  38. pte_mkyoung
  39. SET_PAGE_DIR
  40. pgd_offset
  41. pmd_offset
  42. pte_offset
  43. pte_free_kernel
  44. pte_alloc_kernel
  45. pmd_free_kernel
  46. pmd_alloc_kernel
  47. pte_free
  48. pte_alloc
  49. pmd_free
  50. pmd_alloc
  51. pgd_free
  52. pgd_alloc
  53. update_mmu_cache
  54. mk_swap_pte

   1 #ifndef _ALPHA_PGTABLE_H
   2 #define _ALPHA_PGTABLE_H
   3 
   4 /*
   5  * This file contains the functions and defines necessary to modify and use
   6  * the alpha page table tree.
   7  *
   8  * This hopefully works with any standard alpha page-size, as defined
   9  * in <asm/page.h> (currently 8192).
  10  */
  11 
  12 #include <asm/system.h>
  13 
  14 /* Caches aren't brain-dead on the alpha. */
  15 #define flush_cache_all()                       do { } while (0)
  16 #define flush_cache_mm(mm)                      do { } while (0)
  17 #define flush_cache_range(mm, start, end)       do { } while (0)
  18 #define flush_cache_page(vma, vmaddr)           do { } while (0)
  19 #define flush_page_to_ram(page)                 do { } while (0)
  20 
  21 /*
  22  * Flush current user mapping.
  23  */
  24 static inline void flush_tlb(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  25 {
  26         tbiap();
  27 }
  28 
  29 /*
  30  * Flush everything (kernel mapping may also have
  31  * changed due to vmalloc/vfree)
  32  */
  33 static inline void flush_tlb_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  34 {
  35         tbia();
  36 }
  37 
  38 /*
  39  * Flush a specified user mapping
  40  */
  41 static inline void flush_tlb_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  42 {
  43         if (mm != current->mm)
  44                 mm->context = 0;
  45         else
  46                 tbiap();
  47 }
  48 
  49 /*
  50  * Page-granular tlb flush.
  51  *
  52  * do a tbisd (type = 2) normally, and a tbis (type = 3)
  53  * if it is an executable mapping.  We want to avoid the
  54  * itlb flush, because that potentially also does a
  55  * icache flush.
  56  */
  57 static inline void flush_tlb_page(struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  58         unsigned long addr)
  59 {
  60         struct mm_struct * mm = vma->vm_mm;
  61 
  62         if (mm != current->mm)
  63                 mm->context = 0;
  64         else
  65                 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
  66 }
  67 
  68 /*
  69  * Flush a specified range of user mapping: on the
  70  * alpha we flush the whole user tlb
  71  */
  72 static inline void flush_tlb_range(struct mm_struct *mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  73         unsigned long start, unsigned long end)
  74 {
  75         if (mm != current->mm)
  76                 mm->context = 0;
  77         else
  78                 tbiap();
  79 }
  80 
  81 /* Certain architectures need to do special things when pte's
  82  * within a page table are directly modified.  Thus, the following
  83  * hook is made available.
  84  */
  85 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  86 
  87 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  88 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  89 #define PMD_SIZE        (1UL << PMD_SHIFT)
  90 #define PMD_MASK        (~(PMD_SIZE-1))
  91 
  92 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  93 #define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  94 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  95 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  96 
  97 /*
  98  * entries per page directory level: the alpha is three-level, with
  99  * all levels having a one-page page table.
 100  *
 101  * The PGD is special: the last entry is reserved for self-mapping.
 102  */
 103 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
 104 #define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
 105 #define PTRS_PER_PGD    ((1UL << (PAGE_SHIFT-3))-1)
 106 
 107 /* the no. of pointers that fit on a page: this will go away */
 108 #define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
 109 
 110 #define VMALLOC_START           0xFFFFFE0000000000
 111 #define VMALLOC_VMADDR(x)       ((unsigned long)(x))
 112 
 113 /*
 114  * OSF/1 PAL-code-imposed page table bits
 115  */
 116 #define _PAGE_VALID     0x0001
 117 #define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
 118 #define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
 119 #define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
 120 #define _PAGE_ASM       0x0010
 121 #define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
 122 #define _PAGE_URE       0x0200  /* xxx */
 123 #define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
 124 #define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
 125 
 126 /* .. and these are ours ... */
 127 #define _PAGE_DIRTY     0x20000
 128 #define _PAGE_ACCESSED  0x40000
 129 
 130 /*
 131  * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
 132  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 133  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 134  * the KRE/URE bits to watch for it. That way we don't need to overload the
 135  * KWE/UWE bits with both handling dirty and accessed.
 136  *
 137  * Note that the kernel uses the accessed bit just to check whether to page
 138  * out a page or not, so it doesn't have to be exact anyway.
 139  */
 140 
 141 #define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 142 #define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 143 
 144 #define _PFN_MASK       0xFFFFFFFF00000000
 145 
 146 #define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 147 #define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 148 
 149 /*
 150  * All the normal masks have the "page accessed" bits on, as any time they are used,
 151  * the page is accessed. They are cleared only by the page-out routines
 152  */
 153 #define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 154 #define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 155 #define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 156 #define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 157 #define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 158 
 159 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 160 
 161 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 162 #define _PAGE_S(x) _PAGE_NORMAL(x)
 163 
 164 /*
 165  * The hardware can handle write-only mappings, but as the alpha
 166  * architecture does byte-wide writes with a read-modify-write
 167  * sequence, it's not practical to have write-without-read privs.
 168  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 169  * arch/alpha/mm/fault.c)
 170  */
 171         /* xwr */
 172 #define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 173 #define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 174 #define __P010  _PAGE_P(_PAGE_FOE)
 175 #define __P011  _PAGE_P(_PAGE_FOE)
 176 #define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 177 #define __P101  _PAGE_P(_PAGE_FOW)
 178 #define __P110  _PAGE_P(0)
 179 #define __P111  _PAGE_P(0)
 180 
 181 #define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 182 #define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 183 #define __S010  _PAGE_S(_PAGE_FOE)
 184 #define __S011  _PAGE_S(_PAGE_FOE)
 185 #define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 186 #define __S101  _PAGE_S(_PAGE_FOW)
 187 #define __S110  _PAGE_S(0)
 188 #define __S111  _PAGE_S(0)
 189 
 190 /*
 191  * BAD_PAGETABLE is used when we need a bogus page-table, while
 192  * BAD_PAGE is used for a bogus page.
 193  *
 194  * ZERO_PAGE is a global shared page that is always zero: used
 195  * for zero-mapped memory areas etc..
 196  */
 197 extern pte_t __bad_page(void);
 198 extern pmd_t * __bad_pagetable(void);
 199 
 200 extern unsigned long __zero_page(void);
 201 
 202 #define BAD_PAGETABLE   __bad_pagetable()
 203 #define BAD_PAGE        __bad_page()
 204 #define ZERO_PAGE       0xfffffc000030A000
 205 
 206 /* number of bits that fit into a memory pointer */
 207 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 208 
 209 /* to align the pointer to a pointer address */
 210 #define PTR_MASK                        (~(sizeof(void*)-1))
 211 
 212 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 213 #define SIZEOF_PTR_LOG2                 3
 214 
 215 /* to find an entry in a page-table */
 216 #define PAGE_PTR(address)               \
 217   ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 218 
 219 extern unsigned long high_memory;
 220 
 221 /*
 222  * Conversion functions: convert a page and protection to a page entry,
 223  * and a page entry and page directory to the page they refer to.
 224  */
 225 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
 227 
 228 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 230 
 231 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 233 
 234 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 236 
 237 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 238 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 239 
 240 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 242 
 243 extern inline unsigned long pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 244 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 245 
 246 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 247 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 248 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 249 
 250 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 251 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 252 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 253 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 254 
 255 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 256 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 257 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 258 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 259 
 260 /*
 261  * The following only work if pte_present() is true.
 262  * Undefined behaviour if not..
 263  */
 264 extern inline int pte_read(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 265 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
     /* [previous][next][first][last][top][bottom][index][help] */
 266 extern inline int pte_exec(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOE); }
     /* [previous][next][first][last][top][bottom][index][help] */
 267 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 268 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 269 
 270 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 271 extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 272 extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 273 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 274 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 275 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 276 extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 277 extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 278 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 279 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 280 
 281 /* 
 282  * To set the page-dir. Note the self-mapping in the last entry
 283  *
 284  * Also note that if we update the current process ptbr, we need to
 285  * update the PAL-cached ptbr value as well.. There doesn't seem to
 286  * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
 287  * instead.
 288  */
 289 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
 290 {
 291         pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
 292         tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
 293         if (tsk == current)
 294                 __asm__ __volatile__(
 295                         "bis %0,%0,$16\n\t"
 296                         "call_pal %1"
 297                         : /* no outputs */
 298                         : "r" (&tsk->tss), "i" (PAL_swpctx)
 299                         : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
 300 }
 301 
 302 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 303 
 304 /* to find an entry in a page-table-directory. */
 305 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
 308 }
 309 
 310 /* Find an entry in the second-level page table.. */
 311 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 312 {
 313         return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 314 }
 315 
 316 /* Find an entry in the third-level page table.. */
 317 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 318 {
 319         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 320 }
 321 
 322 /*      
 323  * Allocate and free page tables. The xxx_kernel() versions are
 324  * used to allocate a kernel page table - this turns on ASN bits
 325  * if any.
 326  */
 327 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 328 {
 329         free_page((unsigned long) pte);
 330 }
 331 
 332 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 333 {
 334         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 335         if (pmd_none(*pmd)) {
 336                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 337                 if (pmd_none(*pmd)) {
 338                         if (page) {
 339                                 pmd_set(pmd, page);
 340                                 return page + address;
 341                         }
 342                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 343                         return NULL;
 344                 }
 345                 free_page((unsigned long) page);
 346         }
 347         if (pmd_bad(*pmd)) {
 348                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 349                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 350                 return NULL;
 351         }
 352         return (pte_t *) pmd_page(*pmd) + address;
 353 }
 354 
 355 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 356 {
 357         free_page((unsigned long) pmd);
 358 }
 359 
 360 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 361 {
 362         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 363         if (pgd_none(*pgd)) {
 364                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 365                 if (pgd_none(*pgd)) {
 366                         if (page) {
 367                                 pgd_set(pgd, page);
 368                                 return page + address;
 369                         }
 370                         pgd_set(pgd, BAD_PAGETABLE);
 371                         return NULL;
 372                 }
 373                 free_page((unsigned long) page);
 374         }
 375         if (pgd_bad(*pgd)) {
 376                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 377                 pgd_set(pgd, BAD_PAGETABLE);
 378                 return NULL;
 379         }
 380         return (pmd_t *) pgd_page(*pgd) + address;
 381 }
 382 
 383 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385         free_page((unsigned long) pte);
 386 }
 387 
 388 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 391         if (pmd_none(*pmd)) {
 392                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 393                 if (pmd_none(*pmd)) {
 394                         if (page) {
 395                                 pmd_set(pmd, page);
 396                                 return page + address;
 397                         }
 398                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 399                         return NULL;
 400                 }
 401                 free_page((unsigned long) page);
 402         }
 403         if (pmd_bad(*pmd)) {
 404                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 405                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 406                 return NULL;
 407         }
 408         return (pte_t *) pmd_page(*pmd) + address;
 409 }
 410 
 411 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 412 {
 413         free_page((unsigned long) pmd);
 414 }
 415 
 416 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 417 {
 418         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 419         if (pgd_none(*pgd)) {
 420                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 421                 if (pgd_none(*pgd)) {
 422                         if (page) {
 423                                 pgd_set(pgd, page);
 424                                 return page + address;
 425                         }
 426                         pgd_set(pgd, BAD_PAGETABLE);
 427                         return NULL;
 428                 }
 429                 free_page((unsigned long) page);
 430         }
 431         if (pgd_bad(*pgd)) {
 432                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 433                 pgd_set(pgd, BAD_PAGETABLE);
 434                 return NULL;
 435         }
 436         return (pmd_t *) pgd_page(*pgd) + address;
 437 }
 438 
 439 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 440 {
 441         free_page((unsigned long) pgd);
 442 }
 443 
 444 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         return (pgd_t *) get_free_page(GFP_KERNEL);
 447 }
 448 
 449 extern pgd_t swapper_pg_dir[1024];
 450 
 451 /*
 452  * The alpha doesn't have any external MMU info: the kernel page
 453  * tables contain all the necessary information.
 454  */
 455 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 456         unsigned long address, pte_t pte)
 457 {
 458 }
 459 
 460 /*
 461  * Non-present pages: high 24 bits are offset, next 8 bits type,
 462  * low 32 bits zero..
 463  */
 464 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 465 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 466 
 467 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 468 #define SWP_OFFSET(entry) ((entry) >> 40)
 469 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 470 
 471 #endif /* _ALPHA_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */