root/include/asm-alpha/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. invalidate
  2. invalidate_all
  3. invalidate_mm
  4. invalidate_page
  5. invalidate_range
  6. mk_pte
  7. pte_modify
  8. pmd_set
  9. pgd_set
  10. pte_page
  11. pmd_page
  12. pgd_page
  13. pte_none
  14. pte_present
  15. pte_inuse
  16. pte_clear
  17. pte_reuse
  18. pmd_none
  19. pmd_bad
  20. pmd_present
  21. pmd_inuse
  22. pmd_clear
  23. pmd_reuse
  24. pgd_none
  25. pgd_bad
  26. pgd_present
  27. pgd_inuse
  28. pgd_clear
  29. pte_read
  30. pte_write
  31. pte_exec
  32. pte_dirty
  33. pte_young
  34. pte_wrprotect
  35. pte_rdprotect
  36. pte_exprotect
  37. pte_mkclean
  38. pte_mkold
  39. pte_mkwrite
  40. pte_mkread
  41. pte_mkexec
  42. pte_mkdirty
  43. pte_mkyoung
  44. SET_PAGE_DIR
  45. pgd_offset
  46. pmd_offset
  47. pte_offset
  48. pte_free_kernel
  49. pte_alloc_kernel
  50. pmd_free_kernel
  51. pmd_alloc_kernel
  52. pte_free
  53. pte_alloc
  54. pmd_free
  55. pmd_alloc
  56. pgd_free
  57. pgd_alloc
  58. update_mmu_cache
  59. mk_swap_pte

   1 #ifndef _ALPHA_PGTABLE_H
   2 #define _ALPHA_PGTABLE_H
   3 
   4 /*
   5  * This file contains the functions and defines necessary to modify and use
   6  * the alpha page table tree.
   7  *
   8  * This hopefully works with any standard alpha page-size, as defined
   9  * in <asm/page.h> (currently 8192).
  10  */
  11 
  12 extern void tbi(long type, ...);
  13 
  14 #define tbisi(x)        tbi(1,(x))
  15 #define tbisd(x)        tbi(2,(x))
  16 #define tbis(x)         tbi(3,(x))
  17 #define tbiap()         tbi(-1)
  18 #define tbia()          tbi(-2)
  19 
  20 /*
  21  * Invalidate current user mapping.
  22  */
  23 static inline void invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  24 {
  25         tbiap();
  26 }
  27 
  28 /*
  29  * Invalidate everything (kernel mapping may also have
  30  * changed due to vmalloc/vfree)
  31  */
  32 static inline void invalidate_all(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  33 {
  34         tbia();
  35 }
  36 
  37 /*
  38  * Invalidate a specified user mapping
  39  */
  40 static inline void invalidate_mm(struct mm_struct *mm)
     /* [previous][next][first][last][top][bottom][index][help] */
  41 {
  42         tbiap();
  43 }
  44 
  45 /*
  46  * Page-granular invalidate.
  47  *
  48  * do a tbisd (type = 2) normally, and a tbis (type = 3)
  49  * if it is an executable mapping.  We want to avoid the
  50  * itlb invalidate, because that potentially also does a
  51  * icache invalidate. 
  52  */
  53 static inline void invalidate_page(struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  54         unsigned long addr)
  55 {
  56         tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
  57 }
  58 
  59 /*
  60  * Invalidate a specified range of user mapping: on the
  61  * alpha we invalidate the whole user tlb
  62  */
  63 static inline void invalidate_range(struct mm_struct *mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  64         unsigned long start, unsigned long end)
  65 {
  66         tbiap();
  67 }
  68 
  69 /* Certain architectures need to do special things when pte's
  70  * within a page table are directly modified.  Thus, the following
  71  * hook is made available.
  72  */
  73 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  74 
  75 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  76 #define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  77 #define PMD_SIZE        (1UL << PMD_SHIFT)
  78 #define PMD_MASK        (~(PMD_SIZE-1))
  79 
  80 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  81 #define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  82 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  83 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  84 
  85 /*
  86  * entries per page directory level: the alpha is three-level, with
  87  * all levels having a one-page page table.
  88  *
  89  * The PGD is special: the last entry is reserved for self-mapping.
  90  */
  91 #define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
  92 #define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
  93 #define PTRS_PER_PGD    ((1UL << (PAGE_SHIFT-3))-1)
  94 
  95 /* the no. of pointers that fit on a page: this will go away */
  96 #define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
  97 
  98 #define VMALLOC_START           0xFFFFFE0000000000
  99 #define VMALLOC_VMADDR(x)       ((unsigned long)(x))
 100 
 101 /*
 102  * OSF/1 PAL-code-imposed page table bits
 103  */
 104 #define _PAGE_VALID     0x0001
 105 #define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
 106 #define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
 107 #define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
 108 #define _PAGE_ASM       0x0010
 109 #define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
 110 #define _PAGE_URE       0x0200  /* xxx */
 111 #define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
 112 #define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
 113 
 114 /* .. and these are ours ... */
 115 #define _PAGE_DIRTY     0x20000
 116 #define _PAGE_ACCESSED  0x40000
 117 
 118 /*
 119  * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
 120  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 121  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 122  * the KRE/URE bits to watch for it. That way we don't need to overload the
 123  * KWE/UWE bits with both handling dirty and accessed.
 124  *
 125  * Note that the kernel uses the accessed bit just to check whether to page
 126  * out a page or not, so it doesn't have to be exact anyway.
 127  */
 128 
 129 #define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 130 #define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 131 
 132 #define _PFN_MASK       0xFFFFFFFF00000000
 133 
 134 #define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 135 #define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 136 
 137 /*
 138  * All the normal masks have the "page accessed" bits on, as any time they are used,
 139  * the page is accessed. They are cleared only by the page-out routines
 140  */
 141 #define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 142 #define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 143 #define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 144 #define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 145 #define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 146 
 147 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 148 
 149 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 150 #define _PAGE_S(x) _PAGE_NORMAL(x)
 151 
 152 /*
 153  * The hardware can handle write-only mappings, but as the alpha
 154  * architecture does byte-wide writes with a read-modify-write
 155  * sequence, it's not practical to have write-without-read privs.
 156  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 157  * arch/alpha/mm/fault.c)
 158  */
 159         /* xwr */
 160 #define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 161 #define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 162 #define __P010  _PAGE_P(_PAGE_FOE)
 163 #define __P011  _PAGE_P(_PAGE_FOE)
 164 #define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 165 #define __P101  _PAGE_P(_PAGE_FOW)
 166 #define __P110  _PAGE_P(0)
 167 #define __P111  _PAGE_P(0)
 168 
 169 #define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 170 #define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 171 #define __S010  _PAGE_S(_PAGE_FOE)
 172 #define __S011  _PAGE_S(_PAGE_FOE)
 173 #define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 174 #define __S101  _PAGE_S(_PAGE_FOW)
 175 #define __S110  _PAGE_S(0)
 176 #define __S111  _PAGE_S(0)
 177 
 178 /*
 179  * BAD_PAGETABLE is used when we need a bogus page-table, while
 180  * BAD_PAGE is used for a bogus page.
 181  *
 182  * ZERO_PAGE is a global shared page that is always zero: used
 183  * for zero-mapped memory areas etc..
 184  */
 185 extern pte_t __bad_page(void);
 186 extern pmd_t * __bad_pagetable(void);
 187 
 188 extern unsigned long __zero_page(void);
 189 
 190 #define BAD_PAGETABLE   __bad_pagetable()
 191 #define BAD_PAGE        __bad_page()
 192 #define ZERO_PAGE       0xfffffc000030A000
 193 
 194 /* number of bits that fit into a memory pointer */
 195 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 196 
 197 /* to align the pointer to a pointer address */
 198 #define PTR_MASK                        (~(sizeof(void*)-1))
 199 
 200 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 201 #define SIZEOF_PTR_LOG2                 3
 202 
 203 /* to find an entry in a page-table */
 204 #define PAGE_PTR(address)               \
 205   ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 206 
 207 extern unsigned long high_memory;
 208 
 209 /*
 210  * Conversion functions: convert a page and protection to a page entry,
 211  * and a page entry and page directory to the page they refer to.
 212  */
 213 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
 215 
 216 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 217 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 218 
 219 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 221 
 222 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 224 
 225 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 227 
 228 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 230 
 231 extern inline unsigned long pgd_page(pgd_t pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 233 
 234 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 235 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 236 extern inline int pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 237 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 238 extern inline void pte_reuse(pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 239 {
 240         if (!mem_map[MAP_NR(ptep)].reserved)
 241                 mem_map[MAP_NR(ptep)].count++;
 242 }
 243 
 244 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 245 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE || pmd_page(pmd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 246 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 247 extern inline int pmd_inuse(pmd_t *pmdp)        { return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 248 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 249 extern inline void pmd_reuse(pmd_t * pmdp)
     /* [previous][next][first][last][top][bottom][index][help] */
 250 {
 251         if (!mem_map[MAP_NR(pmdp)].reserved)
 252                 mem_map[MAP_NR(pmdp)].count++;
 253 }
 254 
 255 extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 256 extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE || pgd_page(pgd) > high_memory; }
     /* [previous][next][first][last][top][bottom][index][help] */
 257 extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
 258 extern inline int pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
 259 extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 260 
 261 /*
 262  * The following only work if pte_present() is true.
 263  * Undefined behaviour if not..
 264  */
 265 extern inline int pte_read(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOR); }
     /* [previous][next][first][last][top][bottom][index][help] */
 266 extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
     /* [previous][next][first][last][top][bottom][index][help] */
 267 extern inline int pte_exec(pte_t pte)           { return !(pte_val(pte) & _PAGE_FOE); }
     /* [previous][next][first][last][top][bottom][index][help] */
 268 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 269 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 270 
 271 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 272 extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 273 extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 274 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 275 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 276 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 277 extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOR; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 278 extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_FOE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 279 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 280 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 281 
 282 /* 
 283  * To set the page-dir. Note the self-mapping in the last entry
 284  *
 285  * Also note that if we update the current process ptbr, we need to
 286  * update the PAL-cached ptbr value as well.. There doesn't seem to
 287  * be any "wrptbr" PAL-insn, but we can do a dummy swpctx to ourself
 288  * instead.
 289  */
 290 extern inline void SET_PAGE_DIR(struct task_struct * tsk, pgd_t * pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
 291 {
 292         pgd_val(pgdir[PTRS_PER_PGD]) = pte_val(mk_pte((unsigned long) pgdir, PAGE_KERNEL));
 293         tsk->tss.ptbr = ((unsigned long) pgdir - PAGE_OFFSET) >> PAGE_SHIFT;
 294         if (tsk == current)
 295                 __asm__ __volatile__(
 296                         "bis %0,%0,$16\n\t"
 297                         "call_pal %1"
 298                         : /* no outputs */
 299                         : "r" (&tsk->tss), "i" (PAL_swpctx)
 300                         : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
 301 }
 302 
 303 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 304 
 305 /* to find an entry in a page-table-directory. */
 306 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 307 {
 308         return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
 309 }
 310 
 311 /* Find an entry in the second-level page table.. */
 312 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 313 {
 314         return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 315 }
 316 
 317 /* Find an entry in the third-level page table.. */
 318 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 319 {
 320         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 321 }
 322 
 323 /*      
 324  * Allocate and free page tables. The xxx_kernel() versions are
 325  * used to allocate a kernel page table - this turns on ASN bits
 326  * if any, and marks the page tables reserved.
 327  */
 328 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         mem_map[MAP_NR(pte)].reserved = 0;
 331         free_page((unsigned long) pte);
 332 }
 333 
 334 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 335 {
 336         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 337         if (pmd_none(*pmd)) {
 338                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 339                 if (pmd_none(*pmd)) {
 340                         if (page) {
 341                                 pmd_set(pmd, page);
 342                                 mem_map[MAP_NR(page)].reserved = 1;
 343                                 return page + address;
 344                         }
 345                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 346                         return NULL;
 347                 }
 348                 free_page((unsigned long) page);
 349         }
 350         if (pmd_bad(*pmd)) {
 351                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 352                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 353                 return NULL;
 354         }
 355         return (pte_t *) pmd_page(*pmd) + address;
 356 }
 357 
 358 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 359 {
 360         mem_map[MAP_NR(pmd)].reserved = 0;
 361         free_page((unsigned long) pmd);
 362 }
 363 
 364 extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 365 {
 366         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 367         if (pgd_none(*pgd)) {
 368                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 369                 if (pgd_none(*pgd)) {
 370                         if (page) {
 371                                 pgd_set(pgd, page);
 372                                 mem_map[MAP_NR(page)].reserved = 1;
 373                                 return page + address;
 374                         }
 375                         pgd_set(pgd, BAD_PAGETABLE);
 376                         return NULL;
 377                 }
 378                 free_page((unsigned long) page);
 379         }
 380         if (pgd_bad(*pgd)) {
 381                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 382                 pgd_set(pgd, BAD_PAGETABLE);
 383                 return NULL;
 384         }
 385         return (pmd_t *) pgd_page(*pgd) + address;
 386 }
 387 
 388 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390         free_page((unsigned long) pte);
 391 }
 392 
 393 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 396         if (pmd_none(*pmd)) {
 397                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 398                 if (pmd_none(*pmd)) {
 399                         if (page) {
 400                                 pmd_set(pmd, page);
 401                                 return page + address;
 402                         }
 403                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 404                         return NULL;
 405                 }
 406                 free_page((unsigned long) page);
 407         }
 408         if (pmd_bad(*pmd)) {
 409                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 410                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 411                 return NULL;
 412         }
 413         return (pte_t *) pmd_page(*pmd) + address;
 414 }
 415 
 416 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 417 {
 418         free_page((unsigned long) pmd);
 419 }
 420 
 421 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 422 {
 423         address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 424         if (pgd_none(*pgd)) {
 425                 pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
 426                 if (pgd_none(*pgd)) {
 427                         if (page) {
 428                                 pgd_set(pgd, page);
 429                                 return page + address;
 430                         }
 431                         pgd_set(pgd, BAD_PAGETABLE);
 432                         return NULL;
 433                 }
 434                 free_page((unsigned long) page);
 435         }
 436         if (pgd_bad(*pgd)) {
 437                 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
 438                 pgd_set(pgd, BAD_PAGETABLE);
 439                 return NULL;
 440         }
 441         return (pmd_t *) pgd_page(*pgd) + address;
 442 }
 443 
 444 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         free_page((unsigned long) pgd);
 447 }
 448 
 449 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 450 {
 451         return (pgd_t *) get_free_page(GFP_KERNEL);
 452 }
 453 
 454 extern pgd_t swapper_pg_dir[1024];
 455 
 456 /*
 457  * The alpha doesn't have any external MMU info: the kernel page
 458  * tables contain all the necessary information.
 459  */
 460 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 461         unsigned long address, pte_t pte)
 462 {
 463 }
 464 
 465 /*
 466  * Non-present pages: high 24 bits are offset, next 8 bits type,
 467  * low 32 bits zero..
 468  */
 469 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
     /* [previous][next][first][last][top][bottom][index][help] */
 470 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 471 
 472 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 473 #define SWP_OFFSET(entry) ((entry) >> 40)
 474 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 475 
 476 #endif /* _ALPHA_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */