root/include/asm-alpha/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. invalidate
  2. invalidate_all
  3. invalidate_mm
  4. invalidate_page
  5. invalidate_range
  6. mk_pte
  7. pte_modify
  8. pmd_set
  9. pgd_set
  10. pte_page
  11. pmd_page
  12. pgd_page
  13. pte_none
  14. pte_present
  15. pte_inuse
  16. pte_clear
  17. pte_reuse
  18. pmd_none
  19. pmd_bad
  20. pmd_present
  21. pmd_inuse
  22. pmd_clear
  23. pmd_reuse
  24. pgd_none
  25. pgd_bad
  26. pgd_present
  27. pgd_inuse
  28. pgd_clear
  29. pte_read
  30. pte_write
  31. pte_exec
  32. pte_dirty
  33. pte_young
  34. pte_wrprotect
  35. pte_rdprotect
  36. pte_exprotect
  37. pte_mkclean
  38. pte_mkold
  39. pte_mkwrite
  40. pte_mkread
  41. pte_mkexec
  42. pte_mkdirty
  43. pte_mkyoung
  44. SET_PAGE_DIR
  45. pgd_offset
  46. pmd_offset
  47. pte_offset
  48. pte_free_kernel
  49. pte_alloc_kernel
  50. pmd_free_kernel
  51. pmd_alloc_kernel
  52. pte_free
  53. pte_alloc
  54. pmd_free
  55. pmd_alloc
  56. pgd_free
  57. pgd_alloc
  58. update_mmu_cache
  59. mk_swap_pte

   1 #ifndef _ALPHA_PGTABLE_H
   2 #define _ALPHA_PGTABLE_H
   3 
   4 /*
   5  * This file contains the functions and defines necessary to modify and use
   6  * the alpha page table tree.
   7  *
   8  * This hopefully works with any standard alpha page-size, as defined
   9  * in <asm/page.h> (currently 8192).
  10  */
  11 
  12 #include <asm/system.h>
  13 
  14 /*
  15  * Invalidate current user mapping.
  16  */
  17 static inline void invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  18 {
  19         tbiap();
  20 }
  21 
  22 /*
  23  * Invalidate everything (kernel mapping may also have
  24  * changed due to vmalloc/vfree)
  25  */
  26 static inline void invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  27 {
  28         tbia();
  29 }
  30 
  31 /*
  32  * Invalidate a specified user mapping
  33  */
  34 static inline void invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36         if (mm != current->mm)
  37                 mm->context = 0;
  38         else
  39                 tbiap();
  40 }
  41 
  42 /*
  43  * Page-granular invalidate.
  44  *
  45  * do a tbisd (type = 2) normally, and a tbis (type = 3)
  46  * if it is an executable mapping.  We want to avoid the
  47  * itlb invalidate, because that potentially also does a
  48  * icache invalidate. 
  49  */
  50 static inline void invalidate_page(struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  51         unsigned long addr)
  52 {
  53         struct mm_struct * mm = vma->vm_mm;
  54 
  55         if (mm != current->mm)
  56                 mm->context = 0;
  57         else
  58                 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
  59 }
  60 
  61 /*
  62  * Invalidate a specified range of user mapping: on the
  63  * alpha we invalidate the whole user tlb
  64  */
  65 static inline void invalidate_range(struct mm_struct *mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  66         unsigned long start, unsigned long end)
  67 {
  68         if (mm != current->mm)
  69                 mm->context = 0;
  70         else
  71                 tbiap();
  72 }
  73 
  74 /* Certain architectures need to do special things when pte's
  75  * within a page table are directly modified.  Thus, the following
  76  * hook is made available.
  77  */
  78 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  79 
  80 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  81 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  82 #define PMD_SIZE        (1UL << PMD_SHIFT)
  83 #define PMD_MASK        (~(PMD_SIZE-1))
  84 
  85 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  86 #define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  87 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  88 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  89 
  90 /*
  91  * entries per page directory level: the alpha is three-level, with
  92  * all levels having a one-page page table.
  93  *
  94  * The PGD is special: the last entry is reserved for self-mapping.
  95  */
  96 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
  97 #define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
  98 #define PTRS_PER_PGD    ((1UL << (PAGE_SHIFT-3))-1)
  99 
 100 /* the no. of pointers that fit on a page: this will go away */
 101 #define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
 102 
 103 #define VMALLOC_START           0xFFFFFE0000000000
 104 #define VMALLOC_VMADDR(x)       ((unsigned long)(x))
 105 
 106 /*
 107  * OSF/1 PAL-code-imposed page table bits
 108  */
 109 #define _PAGE_VALID     0x0001
 110 #define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
 111 #define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
 112 #define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
 113 #define _PAGE_ASM       0x0010
 114 #define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
 115 #define _PAGE_URE       0x0200  /* xxx */
 116 #define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
 117 #define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
 118 
 119 /* .. and these are ours ... */
 120 #define _PAGE_DIRTY     0x20000
 121 #define _PAGE_ACCESSED  0x40000
 122 
 123 /*
 124  * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
 125  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 126  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 127  * the KRE/URE bits to watch for it. That way we don't need to overload the
 128  * KWE/UWE bits with both handling dirty and accessed.
 129  *
 130  * Note that the kernel uses the accessed bit just to check whether to page
 131  * out a page or not, so it doesn't have to be exact anyway.
 132  */
 133 
 134 #define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 135 #define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 136 
 137 #define _PFN_MASK       0xFFFFFFFF00000000
 138 
 139 #define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 140 #define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 141 
 142 /*
 143  * All the normal masks have the "page accessed" bits on, as any time they are used,
 144  * the page is accessed. They are cleared only by the page-out routines
 145  */
 146 #define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 147 #define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 148 #define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 149 #define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 150 #define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 151 
 152 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 153 
 154 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 155 #define _PAGE_S(x) _PAGE_NORMAL(x)
 156 
 157 /*
 158  * The hardware can handle write-only mappings, but as the alpha
 159  * architecture does byte-wide writes with a read-modify-write
 160  * sequence, it's not practical to have write-without-read privs.
 161  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 162  * arch/alpha/mm/fault.c)
 163  */
 164         /* xwr */
 165 #define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 166 #define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 167 #define __P010  _PAGE_P(_PAGE_FOE)
 168 #define __P011  _PAGE_P(_PAGE_FOE)
 169 #define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 170 #define __P101  _PAGE_P(_PAGE_FOW)
 171 #define __P110  _PAGE_P(0)
 172 #define __P111  _PAGE_P(0)
 173 
 174 #define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 175 #define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 176 #define __S010  _PAGE_S(_PAGE_FOE)
 177 #define __S011  _PAGE_S(_PAGE_FOE)
 178 #define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 179 #define __S101  _PAGE_S(_PAGE_FOW)
 180 #define __S110  _PAGE_S(0)
 181 #define __S111  _PAGE_S(0)
 182 
 183 /*
 184  * BAD_PAGETABLE is used when we need a bogus page-table, while
 185  * BAD_PAGE is used for a bogus page.
 186  *
 187  * ZERO_PAGE is a global shared page that is always zero: used
 188  * for zero-mapped memory areas etc..
 189  */
 190 extern pte_t __bad_page(void);
 191 extern pmd_t * __bad_pagetable(void);
 192 
 193 extern unsigned long __zero_page(void);
 194 
 195 #define BAD_PAGETABLE   __bad_pagetable()
 196 #define BAD_PAGE        __bad_page()
 197 #define ZERO_PAGE       0xfffffc000030A000
 198 
 199 /* number of bits that fit into a memory pointer */
 200 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 201 
 202 /* to align the pointer to a pointer address */
 203 #define PTR_MASK                        (~(sizeof(void*)-1))
 204 
 205 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 206 #define SIZEOF_PTR_LOG2                 3
 207 
 208 /* to find an entry in a page-table */
 209 #define PAGE_PTR(address)               \
 210   ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 211 
 212 extern unsigned long high_memory;
 213 
 214 /*
 215  * Conversion functions: convert a page and protection to a page entry,
 216  * and a page entry and page directory to the page they refer to.
 217  */
 218 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
 220 
 221 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 223 
 224 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 225 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 226 
 227 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 228 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 229 
 230 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 232 
 233 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 235 
 236 extern inline unsigned long pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 238 
 239 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 240 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 241 extern inline int pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 242 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 243 extern inline void pte_reuse(pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 244 {
 245         if (!mem_map[MAP_NR(ptep)].reserved)
 246                 mem_map[MAP_NR(ptep)].count++;
 247 }
 248 
 249 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 250 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 251 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 252 extern inline int pmd_inuse(pmd_t *pmdp)        { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 253 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 254 extern inline void pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         if (!mem_map[MAP_NR(pmdp)].reserved)
 257                 mem_map[MAP_NR(pmdp)].count++;
 258 }
 259 
 260 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 261 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 262 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 263 extern inline int pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
 264 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 265 
 266 /*
 267  * The following only work if pte_present() is true.
 268  * Undefined behaviour if not..
 269  */
 270 extern inline int pte_read(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 271 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
     /* [previous][next][first][last][top][bottom][index][help] */
 272 extern inline int pte_exec(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOE); }
     /* [previous][next][first][last][top][bottom][index][help] */
 273 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 274 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 275 
 276 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 277 extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 278 extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 279 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 280 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 281 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 282 extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 283 extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 284 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 285 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 286 
 287 /* 
 288  * To set the page-dir. Note the self-mapping in the last entry
 289  *
 290  * Also note that if we update the current process ptbr, we need to
 291  * update the PAL-cached ptbr value as well.. There doesn't seem to
 292  * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
 293  * instead.
 294  */
 295 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
 296 {
 297         pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
 298         tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
 299         if (tsk == current)
 300                 __asm__ __volatile__(
 301                         "bis %0,%0,$16\n\t"
 302                         "call_pal %1"
 303                         : /* no outputs */
 304                         : "r" (&tsk->tss), "i" (PAL_swpctx)
 305                         : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
 306 }
 307 
 308 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 309 
 310 /* to find an entry in a page-table-directory. */
 311 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 312 {
 313         return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
 314 }
 315 
 316 /* Find an entry in the second-level page table.. */
 317 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 318 {
 319         return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 320 }
 321 
 322 /* Find an entry in the third-level page table.. */
 323 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 326 }
 327 
 328 /*      
 329  * Allocate and free page tables. The xxx_kernel() versions are
 330  * used to allocate a kernel page table - this turns on ASN bits
 331  * if any, and marks the page tables reserved.
 332  */
 333 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 334 {
 335         mem_map[MAP_NR(pte)].reserved = 0;
 336         free_page((unsigned long) pte);
 337 }
 338 
 339 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 342         if (pmd_none(*pmd)) {
 343                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 344                 if (pmd_none(*pmd)) {
 345                         if (page) {
 346                                 pmd_set(pmd, page);
 347                                 mem_map[MAP_NR(page)].reserved = 1;
 348                                 return page + address;
 349                         }
 350                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 351                         return NULL;
 352                 }
 353                 free_page((unsigned long) page);
 354         }
 355         if (pmd_bad(*pmd)) {
 356                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 357                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 358                 return NULL;
 359         }
 360         return (pte_t *) pmd_page(*pmd) + address;
 361 }
 362 
 363 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 364 {
 365         mem_map[MAP_NR(pmd)].reserved = 0;
 366         free_page((unsigned long) pmd);
 367 }
 368 
 369 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 370 {
 371         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 372         if (pgd_none(*pgd)) {
 373                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 374                 if (pgd_none(*pgd)) {
 375                         if (page) {
 376                                 pgd_set(pgd, page);
 377                                 mem_map[MAP_NR(page)].reserved = 1;
 378                                 return page + address;
 379                         }
 380                         pgd_set(pgd, BAD_PAGETABLE);
 381                         return NULL;
 382                 }
 383                 free_page((unsigned long) page);
 384         }
 385         if (pgd_bad(*pgd)) {
 386                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 387                 pgd_set(pgd, BAD_PAGETABLE);
 388                 return NULL;
 389         }
 390         return (pmd_t *) pgd_page(*pgd) + address;
 391 }
 392 
 393 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         free_page((unsigned long) pte);
 396 }
 397 
 398 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 399 {
 400         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 401         if (pmd_none(*pmd)) {
 402                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 403                 if (pmd_none(*pmd)) {
 404                         if (page) {
 405                                 pmd_set(pmd, page);
 406                                 return page + address;
 407                         }
 408                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 409                         return NULL;
 410                 }
 411                 free_page((unsigned long) page);
 412         }
 413         if (pmd_bad(*pmd)) {
 414                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 415                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 416                 return NULL;
 417         }
 418         return (pte_t *) pmd_page(*pmd) + address;
 419 }
 420 
 421 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 422 {
 423         free_page((unsigned long) pmd);
 424 }
 425 
 426 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 427 {
 428         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 429         if (pgd_none(*pgd)) {
 430                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 431                 if (pgd_none(*pgd)) {
 432                         if (page) {
 433                                 pgd_set(pgd, page);
 434                                 return page + address;
 435                         }
 436                         pgd_set(pgd, BAD_PAGETABLE);
 437                         return NULL;
 438                 }
 439                 free_page((unsigned long) page);
 440         }
 441         if (pgd_bad(*pgd)) {
 442                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 443                 pgd_set(pgd, BAD_PAGETABLE);
 444                 return NULL;
 445         }
 446         return (pmd_t *) pgd_page(*pgd) + address;
 447 }
 448 
 449 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         free_page((unsigned long) pgd);
 452 }
 453 
 454 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 455 {
 456         return (pgd_t *) get_free_page(GFP_KERNEL);
 457 }
 458 
 459 extern pgd_t swapper_pg_dir[1024];
 460 
 461 /*
 462  * The alpha doesn't have any external MMU info: the kernel page
 463  * tables contain all the necessary information.
 464  */
 465 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 466         unsigned long address, pte_t pte)
 467 {
 468 }
 469 
 470 /*
 471  * Non-present pages: high 24 bits are offset, next 8 bits type,
 472  * low 32 bits zero..
 473  */
 474 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 475 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 476 
 477 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 478 #define SWP_OFFSET(entry) ((entry) >> 40)
 479 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 480 
 481 #endif /* _ALPHA_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */