root/include/asm-ppc/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pte_none
  2. pte_present
  3. pte_inuse
  4. pte_clear
  5. pte_reuse
  6. pmd_none
  7. pmd_bad
  8. pmd_present
  9. pmd_inuse
  10. pmd_clear
  11. pmd_reuse
  12. pgd_none
  13. pgd_bad
  14. pgd_present
  15. pgd_inuse
  16. pgd_clear
  17. pte_read
  18. pte_write
  19. pte_exec
  20. pte_dirty
  21. pte_young
  22. pte_cow
  23. pte_wrprotect
  24. pte_rdprotect
  25. pte_exprotect
  26. pte_mkclean
  27. pte_mkold
  28. pte_uncow
  29. pte_mkwrite
  30. pte_mkread
  31. pte_mkexec
  32. pte_mkdirty
  33. pte_mkyoung
  34. pte_mkcow
  35. mk_pte
  36. pte_modify
  37. pte_page
  38. pmd_page
  39. pgd_offset
  40. pmd_offset
  41. pte_offset
  42. pte_free_kernel
  43. pte_alloc_kernel
  44. pmd_free_kernel
  45. pmd_alloc_kernel
  46. pte_free
  47. pte_alloc
  48. pmd_free
  49. pmd_alloc
  50. pgd_free
  51. pgd_alloc
  52. update_mmu_cache

   1 /* * Last edited: Nov  7 23:44 1995 (cort) */
   2 #ifndef _PPC_PGTABLE_H
   3 #define _PPC_PGTABLE_H
   4 
   5 #include <asm/page.h>
   6 #include <asm/mmu.h>
   7 
   8 /*
   9  * Memory management on the PowerPC is a software emulation of the i386
  10  * MMU folded onto the PowerPC hardware MMU.  The emulated version looks
  11  * and behaves like the two-level i386 MMU.  Entries from these tables
  12  * are merged into the PowerPC hashed MMU tables, on demand, treating the
  13  * hashed tables like a special cache.
  14  *
  15  * Since the PowerPC does not have separate kernel and user address spaces,
  16  * the user virtual address space must be a [proper] subset of the kernel
  17  * space.  Thus, all tasks will have a specific virtual mapping for the
  18  * user virtual space and a common mapping for the kernel space.  The
  19  * simplest way to split this was literally in half.  Also, life is so
  20  * much simpler for the kernel if the machine hardware resources are
  21  * always mapped in.  Thus, some additional space is given up to the
  22  * kernel space to accommodate this.
  23  *
  24  * CAUTION! Some of the trade-offs make sense for the PreP platform on
  25  * which this code was originally developed.  When it migrates to other
  26  * PowerPC environments, some of the assumptions may fail and the whole
  27  * setup may need to be reevaluated.
  28  *
  29  * On the PowerPC, page translations are kept in a hashed table.  There
  30  * is exactly one of these tables [although the architecture supports
  31  * an arbitrary number].  Page table entries move in/out of this hashed
  32  * structure on demand, with the kernel filling in entries as they are
  33  * needed.  Just where a page table entry hits in the hashed table is a
  34  * function of the hashing which is in turn based on the upper 4 bits
  35  * of the logical address.  These 4 bits address a "virtual segment id"
  36  * which is unique per task/page combination for user addresses and
  37  * fixed for the kernel addresses.  Thus, the kernel space can be simply
  38  * shared [indeed at low overhead] among all tasks.
  39  *
  40  * The basic virtual address space is thus:
  41  *
  42  * 0x0XXXXXX  --+
  43  * 0x1XXXXXX    |
  44  * 0x2XXXXXX    |  User address space. 
  45  * 0x3XXXXXX    |
  46  * 0x4XXXXXX    |
  47  * 0x5XXXXXX    |
  48  * 0x6XXXXXX    |
  49  * 0x7XXXXXX  --+
  50  * 0x8XXXXXX       PCI/ISA I/O space
  51  * 0x9XXXXXX  --+
  52  * 0xAXXXXXX    |  Kernel virtual memory
  53  * 0xBXXXXXX  --+
  54  * 0xCXXXXXX       PCI/ISA Memory space
  55  * 0xDXXXXXX
  56  * 0xEXXXXXX
  57  * 0xFXXXXXX       Board I/O space
  58  *
  59  * CAUTION!  One of the real problems here is keeping the software
  60  * managed tables coherent with the hardware hashed tables.  When
  61  * the software decides to update the table, it's normally easy to
  62  * update the hardware table.  But when the hardware tables need
  63  * changed, e.g. as the result of a page fault, it's more difficult
  64  * to reflect those changes back into the software entries.  Currently,
  65  * this process is quite crude, with updates causing the entire set
  66  * of tables to become invalidated.  Some performance could certainly
  67  * be regained by improving this.
  68  *
  69  * The Linux memory management assumes a three-level page table setup. On
  70  * the i386, we use that, but "fold" the mid level into the top-level page
  71  * table, so that we physically have the same two-level page table as the
  72  * i386 mmu expects.
  73  *
  74  * This file contains the functions and defines necessary to modify and use
  75  * the i386 page table tree.
  76  */
  77 
  78 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  79 #define PMD_SHIFT       22
  80 #define PMD_SIZE        (1UL << PMD_SHIFT)
  81 #define PMD_MASK        (~(PMD_SIZE-1))
  82 
  83 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  84 #define PGDIR_SHIFT     22
  85 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  86 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  87 
  88 /*
  89  * entries per page directory level: the i386 is two-level, so
  90  * we don't really have any PMD directory physically.
  91  */
  92 #define PTRS_PER_PTE    1024
  93 #define PTRS_PER_PMD    1
  94 #define PTRS_PER_PGD    1024
  95 
  96 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  97  * current 8MB value just means that there will be a 8MB "hole" after the
  98  * physical memory until the kernel virtual memory starts.  That means that
  99  * any out-of-bounds memory accesses will hopefully be caught.
 100  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 101  * area for the same reason. ;)
 102  */
 103 #define VMALLOC_OFFSET  (8*1024*1024)
 104 #define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 105 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
 106 
 107 #define _PAGE_PRESENT   0x001
 108 #define _PAGE_RW        0x002
 109 #define _PAGE_USER      0x004
 110 #define _PAGE_PCD       0x010
 111 #define _PAGE_ACCESSED  0x020
 112 #define _PAGE_DIRTY     0x040
 113 #define _PAGE_COW       0x200   /* implemented in software (one of the AVL bits) */
 114 
 115 #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
 116 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 117 
 118 #define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
 119 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 120 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_COW)
 121 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 122 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 123 
 124 /*
 125  * The i386 can't do page protection for execute, and considers that the same are read.
 126  * Also, write permissions imply read permissions. This is the closest we can get..
 127  */
 128 #define __P000  PAGE_NONE
 129 #define __P001  PAGE_READONLY
 130 #define __P010  PAGE_COPY
 131 #define __P011  PAGE_COPY
 132 #define __P100  PAGE_READONLY
 133 #define __P101  PAGE_READONLY
 134 #define __P110  PAGE_COPY
 135 #define __P111  PAGE_COPY
 136 
 137 #define __S000  PAGE_NONE
 138 #define __S001  PAGE_READONLY
 139 #define __S010  PAGE_SHARED
 140 #define __S011  PAGE_SHARED
 141 #define __S100  PAGE_READONLY
 142 #define __S101  PAGE_READONLY
 143 #define __S110  PAGE_SHARED
 144 #define __S111  PAGE_SHARED
 145 
 146 /*
 147  * Define this if things work differently on a i386 and a i486:
 148  * it will (on a i486) warn about kernel memory accesses that are
 149  * done without a 'verify_area(VERIFY_WRITE,..)'
 150  */
 151 #undef CONFIG_TEST_VERIFY_AREA
 152 
 153 /* page table for 0-4MB for everybody */
 154 extern unsigned long pg0[1024];
 155 
 156 /*
 157  * BAD_PAGETABLE is used when we need a bogus page-table, while
 158  * BAD_PAGE is used for a bogus page.
 159  *
 160  * ZERO_PAGE is a global shared page that is always zero: used
 161  * for zero-mapped memory areas etc..
 162  */
 163 extern pte_t __bad_page(void);
 164 extern pte_t * __bad_pagetable(void);
 165 
 166 extern unsigned long __zero_page(void);
 167 
 168 #define BAD_PAGETABLE __bad_pagetable()
 169 #define BAD_PAGE __bad_page()
 170 #define ZERO_PAGE __zero_page()
 171 
 172 /* number of bits that fit into a memory pointer */
 173 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 174 
 175 /* to align the pointer to a pointer address */
 176 #define PTR_MASK                        (~(sizeof(void*)-1))
 177 
 178 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 179 /* 64-bit machines, beware!  SRB. */
 180 #define SIZEOF_PTR_LOG2                 2
 181 
 182 /* to find an entry in a page-table */
 183 #define PAGE_PTR(address) \
 184 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 185 
 186 /* to set the page-dir */
 187 /* tsk is a task_struct and pgdir is a pte_t */
 188 #define SET_PAGE_DIR(tsk,pgdir) \
 189 do { \
 190         (tsk)->tss.pg_tables = (unsigned long *)(pgdir); \
 191         if ((tsk) == current) \
 192         { \
 193 /*_printk("Change page tables = %x\n", pgdir);*/ \
 194         } \
 195 } while (0)
 196 
 197 extern unsigned long high_memory;
 198 
 199 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 200 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
 201 extern inline int pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
 202 /*extern inline int pte_inuse(pte_t *ptep)      { return mem_map[MAP_NR(ptep)] != 1; }*/
 203 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 204 extern inline void pte_reuse(pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         if (!mem_map[MAP_NR(ptep)].reserved)
 207                 mem_map[MAP_NR(ptep)].count++;
 208 }
 209 /*
 210    extern inline void pte_reuse(pte_t * ptep)
 211 {
 212         if (!(mem_map[MAP_NR(ptep)] & MAP_PAGE_RESERVED))
 213                 mem_map[MAP_NR(ptep)]++;
 214 }
 215 */
 216 extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
 217 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 218 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
 219 extern inline int pmd_inuse(pmd_t *pmdp)        { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 220 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 221 extern inline void pmd_reuse(pmd_t * pmdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
 222 
 223 /*
 224  * The "pgd_xxx()" functions here are trivial for a folded two-level
 225  * setup: the pgd is never bad, and a pmd always exists (as it's folded
 226  * into the pgd entry)
 227  */
 228 extern inline int pgd_none(pgd_t pgd)           { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 229 extern inline int pgd_bad(pgd_t pgd)            { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 230 extern inline int pgd_present(pgd_t pgd)        { return 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 231 /*extern inline int pgd_inuse(pgd_t * pgdp)     { return mem_map[MAP_NR(pgdp)] != 1; }*/
 232 extern inline int pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved;  }
     /* [previous][next][first][last][top][bottom][index][help] */
 233 extern inline void pgd_clear(pgd_t * pgdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
 234 
 235 /*
 236 extern inline void pgd_reuse(pgd_t * pgdp)
 237 {
 238         if (!mem_map[MAP_NR(pgdp)].reserved)
 239                 mem_map[MAP_NR(pgdp)].count++;
 240 }
 241 */
 242 
 243 /*
 244  * The following only work if pte_present() is true.
 245  * Undefined behaviour if not..
 246  */
 247 extern inline int pte_read(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
     /* [previous][next][first][last][top][bottom][index][help] */
 248 extern inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 249 extern inline int pte_exec(pte_t pte)           { return pte_val(pte) & _PAGE_USER; }
     /* [previous][next][first][last][top][bottom][index][help] */
 250 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 251 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 252 extern inline int pte_cow(pte_t pte)            { return pte_val(pte) & _PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 253 
 254 extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_RW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 255 extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 256 extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 257 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 258 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 259 extern inline pte_t pte_uncow(pte_t pte)        { pte_val(pte) &= ~_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 260 extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) |= _PAGE_RW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 261 extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 262 extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 263 extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 264 extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 265 extern inline pte_t pte_mkcow(pte_t pte)        { pte_val(pte) |= _PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 266 
 267 /*
 268  * Conversion functions: convert a page and protection to a page entry,
 269  * and a page entry and page directory to the page they refer to.
 270  */
 271 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot); return pte; }
 273 
 274 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 275 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 276 
 277 /*extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 278 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 279 */
 280 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 { return pte_val(pte) & PAGE_MASK; }
 282 
 283 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 284 { return pmd_val(pmd) & PAGE_MASK; }
 285 
 286 
 287 /* to find an entry in a page-table-directory */
 288 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         return mm->pgd + (address >> PGDIR_SHIFT);
 291 }
 292 
 293 /* Find an entry in the second-level page table.. */
 294 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 295 {
 296         return (pmd_t *) dir;
 297 }
 298 
 299 /* Find an entry in the third-level page table.. */ 
 300 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302         return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 303 }
 304 
 305 
 306 /*
 307  * Allocate and free page tables. The xxx_kernel() versions are
 308  * used to allocate a kernel page table - this turns on ASN bits
 309  * if any, and marks the page tables reserved.
 310  */
 311 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 312 {
 313         mem_map[MAP_NR(pte)].reserved = 1;
 314         free_page((unsigned long) pte);
 315 }
 316 /*extern inline void pte_free_kernel(pte_t * pte)
 317 {
 318         mem_map[MAP_NR(pte)] = 1;
 319         free_page((unsigned long) pte);
 320 }
 321 */
 322 
 323 /*
 324 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
 325 {
 326         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 327         if (pmd_none(*pmd)) {
 328                 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
 329                 if (pmd_none(*pmd)) {
 330                         if (page) {
 331                                 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
 332                                 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 333                                 return page + address;
 334                         }
 335                         pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 336                         return NULL;
 337                 }
 338                 free_page((unsigned long) page);
 339         }
 340         if (pmd_bad(*pmd)) {
 341                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 342                 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 343                 return NULL;
 344         }
 345         return (pte_t *) pmd_page(*pmd) + address;
 346 }*/
 347 /*
 348 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 349 {
 350 printk("pte_alloc_kernel pmd = %08X, address = %08X\n", pmd, address);
 351         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 352 printk("address now = %08X\n", address);
 353         if (pmd_none(*pmd)) {
 354                 pte_t *page;
 355 printk("pmd_none(*pmd) true\n");
 356                 page = (pte_t *) get_free_page(GFP_KERNEL);
 357 printk("page = %08X after get_free_page(%08X)\n",page,GFP_KERNEL);
 358                 if (pmd_none(*pmd)) {
 359 printk("pmd_none(*pmd=%08X) still\n",*pmd);               
 360                         if (page) {
 361 printk("page true = %08X\n",page);                        
 362                                 pmd_set(pmd, page);
 363 printk("pmd_set(%08X,%08X)\n",pmd,page);                          
 364                                 mem_map[MAP_NR(page)].reserved = 1;
 365 printk("did mem_map\n",pmd,page);                         
 366                                 return page + address;
 367                         }
 368 printk("did pmd_set(%08X, %08X\n",pmd,BAD_PAGETABLE);                     
 369                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 370                         return NULL;
 371                 }
 372 printk("did free_page(%08X)\n",page);                                   
 373                 free_page((unsigned long) page);
 374         }
 375         if (pmd_bad(*pmd)) {
 376                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 377                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 378                 return NULL;
 379         }
 380 printk("returning pmd_page(%08X) + %08X\n",pmd_page(*pmd) , address);     
 381 
 382         return (pte_t *) pmd_page(*pmd) + address;
 383 }
 384 */
 385 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 386 {
 387         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 388         if (pmd_none(*pmd)) {
 389                 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
 390                 if (pmd_none(*pmd)) {
 391                         if (page) {
 392 /*                                pmd_set(pmd,page);*/
 393                         pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
 394                                 mem_map[MAP_NR(page)].reserved = 1;
 395                                 return page + address;
 396                         }
 397 /*                      pmd_set(pmd, BAD_PAGETABLE);*/
 398                         pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 399                         return NULL;
 400                 }
 401                 free_page((unsigned long) page);
 402         }
 403         if (pmd_bad(*pmd)) {
 404                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 405 /*              pmd_set(pmd, (pte_t *) BAD_PAGETABLE);          */
 406                 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 407                 return NULL;
 408         }
 409         return (pte_t *) pmd_page(*pmd) + address;
 410 }
 411 
 412 /*
 413  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 414  * inside the pgd, so has no extra memory associated with it.
 415  */
 416 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 417 {
 418 }
 419 
 420 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 421 {
 422         return (pmd_t *) pgd;
 423 }
 424 
 425 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 426 {
 427         free_page((unsigned long) pte);
 428 }
 429 
 430 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 431 {
 432         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 433         if (pmd_none(*pmd)) {
 434                 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
 435                 if (pmd_none(*pmd)) {
 436                         if (page) {
 437                                 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) page;
 438                                 return page + address;
 439                         }
 440                         pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 441                         return NULL;
 442                 }
 443                 free_page((unsigned long) page);
 444         }
 445         if (pmd_bad(*pmd)) {
 446                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 447                 pmd_val(*pmd) = _PAGE_TABLE | (unsigned long) BAD_PAGETABLE;
 448                 return NULL;
 449         }
 450         return (pte_t *) pmd_page(*pmd) + address;
 451 }
 452 
 453 /*
 454  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 455  * inside the pgd, so has no extra memory associated with it.
 456  */
 457 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 458 {
 459 }
 460 
 461 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 462 {
 463         return (pmd_t *) pgd;
 464 }
 465 
 466 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 467 {
 468         free_page((unsigned long) pgd);
 469 }
 470 
 471 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 472 {
 473         return (pgd_t *) get_free_page(GFP_KERNEL);
 474 }
 475 
 476 extern pgd_t swapper_pg_dir[1024*8];
 477 /*extern pgd_t *swapper_pg_dir;*/
 478 
 479 /*
 480  * Software maintained MMU tables may have changed -- update the
 481  * hardware [aka cache]
 482  */
 483 extern inline void update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 484         unsigned long address, pte_t _pte)
 485 {
 486 #if 0
 487         printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
 488         _printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
 489 /*      MMU_hash_page(&(vma->vm_task)->tss, address & PAGE_MASK, (pte *)&_pte);*/
 490 #endif  
 491         MMU_hash_page(&(current)->tss, address & PAGE_MASK, (pte *)&_pte);
 492 
 493 }
 494 
 495 
 496 #ifdef _SCHED_INIT_
 497 #define INIT_MMAP { &init_task, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
 498 
 499 #endif  
 500 
 501 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
 502 #define SWP_OFFSET(entry) ((entry) >> 8)
 503 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
 504 
 505 #endif /* _PPC_PAGE_H */

/* [previous][next][first][last][top][bottom][index][help] */