root/include/asm-mips/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pte_page
  2. pmd_page
  3. pmd_set
  4. pte_none
  5. pte_present
  6. pte_inuse
  7. pte_clear
  8. pte_reuse
  9. pmd_none
  10. pmd_bad
  11. pmd_present
  12. pmd_inuse
  13. pmd_clear
  14. pmd_reuse
  15. pgd_none
  16. pgd_bad
  17. pgd_present
  18. pgd_inuse
  19. pgd_clear
  20. pte_read
  21. pte_write
  22. pte_exec
  23. pte_dirty
  24. pte_young
  25. pte_cow
  26. pte_wrprotect
  27. pte_rdprotect
  28. pte_exprotect
  29. pte_mkclean
  30. pte_mkold
  31. pte_uncow
  32. pte_mkwrite
  33. pte_mkread
  34. pte_mkexec
  35. pte_mkdirty
  36. pte_mkyoung
  37. pte_mkcow
  38. mk_pte
  39. pte_modify
  40. pgd_offset
  41. pmd_offset
  42. pte_offset
  43. pte_free_kernel
  44. pte_alloc_kernel
  45. pmd_free_kernel
  46. pmd_alloc_kernel
  47. pte_free
  48. pte_alloc
  49. pmd_free
  50. pmd_alloc
  51. pgd_free
  52. pgd_init
  53. pgd_alloc

   1 #ifndef __ASM_MIPS_PGTABLE_H
   2 #define __ASM_MIPS_PGTABLE_H
   3 
   4 #ifndef __LANGUAGE_ASSEMBLY__
   5 
   6 #include <linux/linkage.h>
   7 #include <asm/cachectl.h>
   8 
   9 /*
  10  * The Linux memory management assumes a three-level page table setup. In
  11  * 32 bit mode we use that, but "fold" the mid level into the top-level page
  12  * table, so that we physically have the same two-level page table as the
  13  * i386 mmu expects. The 64 bit version uses a three level setup.
  14  *
  15  * This file contains the functions and defines necessary to modify and use
  16  * the MIPS page table tree.  Note the frequent conversion between addresses
  17  * in KSEG0 and KSEG1.
  18  *
  19  * This is required due to the cache aliasing problem of the R4xx0 series.
  20  * Sometimes doing uncached accesses also to improve the cache performance
  21  * slightly.  The R10000 caching mode "uncached accelerated" will help even
  22  * further.
  23  */
  24 
  25 /*
  26  * TLB invalidation:
  27  *
  28  *  - invalidate() invalidates the current mm struct TLBs
  29  *  - invalidate_all() invalidates all processes TLBs
  30  *  - invalidate_mm(mm) invalidates the specified mm context TLB's
  31  *  - invalidate_page(mm, vmaddr) invalidates one page
  32  *  - invalidate_range(mm, start, end) invalidates a range of pages
  33  *
  34  * FIXME: MIPS has full control of all TLB activity in the CPU.  Though
  35  * we just stick with complete flushing of TLBs for now.
  36  */
  37 extern asmlinkage void tlbflush(void);
  38 #define invalidate()    ({sys_cacheflush(0, ~0, BCACHE);tlbflush();})
  39 
  40 #define invalidate_all() invalidate()
  41 #define invalidate_mm(mm_struct) \
  42 do { if ((mm_struct) == current->mm) invalidate(); } while (0)
  43 #define invalidate_page(mm_struct,addr) \
  44 do { if ((mm_struct) == current->mm) invalidate(); } while (0)
  45 #define invalidate_range(mm_struct,start,end) \
  46 do { if ((mm_struct) == current->mm) invalidate(); } while (0)
  47 
  48 /*
  49  * We need a special version of copy_page that can handle virtual caches.
  50  * While we're at tweaking with caches we can use that to make it faster.
  51  * The R10000's accelerated caching mode will further accelerate it.
  52  */
  53 extern void __copy_page(unsigned long from, unsigned long to);
  54 #define copy_page(from,to) __copy_page((unsigned long)from, (unsigned long)to)
  55 
  56 /* Certain architectures need to do special things when pte's
  57  * within a page table are directly modified.  Thus, the following
  58  * hook is made available.
  59  */
  60 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  61 
  62 #endif /* !defined (__LANGUAGE_ASSEMBLY__) */
  63 
  64 /* PMD_SHIFT determines the size of the area a second-level page table can map */
  65 #define PMD_SHIFT       22
  66 #define PMD_SIZE        (1UL << PMD_SHIFT)
  67 #define PMD_MASK        (~(PMD_SIZE-1))
  68 
  69 /* PGDIR_SHIFT determines what a third-level page table entry can map */
  70 #define PGDIR_SHIFT     22
  71 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  72 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  73 
  74 /*
  75  * entries per page directory level: we use two-level, so
  76  * we don't really have any PMD directory physically.
  77  */
  78 #define PTRS_PER_PTE    1024
  79 #define PTRS_PER_PMD    1
  80 #define PTRS_PER_PGD    1024
  81 
  82 #define VMALLOC_START     KSEG2
  83 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
  84 
  85 /*
  86  * Note that we shift the lower 32bits of each EntryLo[01] entry
  87  * 6 bits to the left. That way we can convert the PFN into the
  88  * physical address by a single 'and' operation and gain 6 additional
  89  * bits for storing information which isn't present in a normal
  90  * MIPS page table.
  91  * Since the Mips has choosen some quite missleading names for the
  92  * valid and dirty bits they're defined here but only their synonyms
  93  * will be used.
  94  */
  95 #define _PAGE_PRESENT               (1<<0)  /* implemented in software */
  96 #define _PAGE_COW                   (1<<1)  /* implemented in software */
  97 #define _PAGE_READ                  (1<<2)  /* implemented in software */
  98 #define _PAGE_WRITE                 (1<<3)  /* implemented in software */
  99 #define _PAGE_ACCESSED              (1<<4)  /* implemented in software */
 100 #define _PAGE_MODIFIED              (1<<5)  /* implemented in software */
 101 #define _PAGE_GLOBAL                (1<<6)
 102 #define _PAGE_VALID                 (1<<7)
 103 #define _PAGE_SILENT_READ           (1<<7)  /* synonym                 */
 104 #define _PAGE_DIRTY                 (1<<8)  /* The MIPS dirty bit      */
 105 #define _PAGE_SILENT_WRITE          (1<<8)
 106 #define _CACHE_CACHABLE_NO_WA       (0<<9)  /* R4600 only              */
 107 #define _CACHE_CACHABLE_WA          (1<<9)  /* R4600 only              */
 108 #define _CACHE_UNCACHED             (2<<9)  /* R4[0246]00              */
 109 #define _CACHE_CACHABLE_NONCOHERENT (3<<9)  /* R4[0246]00              */
 110 #define _CACHE_CACHABLE_CE          (4<<9)  /* R4[04]00 only           */
 111 #define _CACHE_CACHABLE_COW         (5<<9)  /* R4[04]00 only           */
 112 #define _CACHE_CACHABLE_CUW         (6<<9)  /* R4[04]00 only           */
 113 #define _CACHE_CACHABLE_ACCELERATED (7<<9)  /* R10000 only             */
 114 #define _CACHE_MASK                 (7<<9)
 115 
 116 #define __READABLE      (_PAGE_READ|_PAGE_SILENT_READ|_PAGE_ACCESSED)
 117 #define __WRITEABLE     (_PAGE_WRITE|_PAGE_SILENT_WRITE|_PAGE_MODIFIED)
 118 
 119 #define _PAGE_TABLE     (_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 120                         _PAGE_DIRTY | _CACHE_UNCACHED)
 121 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _CACHE_MASK)
 122 
 123 #define PAGE_NONE       __pgprot(_PAGE_PRESENT | __READABLE | _CACHE_UNCACHED)
 124 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | __READABLE | _PAGE_WRITE | \
 125                         _PAGE_ACCESSED | _CACHE_CACHABLE_NONCOHERENT)
 126 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | __READABLE | _PAGE_COW | \
 127                         _CACHE_CACHABLE_NONCOHERENT)
 128 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | __READABLE | \
 129                         _CACHE_CACHABLE_NONCOHERENT)
 130 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 131                         _CACHE_CACHABLE_NONCOHERENT)
 132 
 133 /*
 134  * MIPS can't do page protection for execute, and considers that the same like
 135  * read. Also, write permissions imply read permissions. This is the closest
 136  * we can get by reasonable means..
 137  */
 138 #define __P000  PAGE_NONE
 139 #define __P001  PAGE_READONLY
 140 #define __P010  PAGE_COPY
 141 #define __P011  PAGE_COPY
 142 #define __P100  PAGE_READONLY
 143 #define __P101  PAGE_READONLY
 144 #define __P110  PAGE_COPY
 145 #define __P111  PAGE_COPY
 146 
 147 #define __S000  PAGE_NONE
 148 #define __S001  PAGE_READONLY
 149 #define __S010  PAGE_SHARED
 150 #define __S011  PAGE_SHARED
 151 #define __S100  PAGE_READONLY
 152 #define __S101  PAGE_READONLY
 153 #define __S110  PAGE_SHARED
 154 #define __S111  PAGE_SHARED
 155 
 156 #if !defined (__LANGUAGE_ASSEMBLY__)
 157 
 158 /* page table for 0-4MB for everybody */
 159 extern unsigned long pg0[1024];
 160 
 161 /*
 162  * BAD_PAGETABLE is used when we need a bogus page-table, while
 163  * BAD_PAGE is used for a bogus page.
 164  *
 165  * ZERO_PAGE is a global shared page that is always zero: used
 166  * for zero-mapped memory areas etc..
 167  */
 168 extern pte_t __bad_page(void);
 169 extern pte_t * __bad_pagetable(void);
 170 
 171 extern unsigned long __zero_page(void);
 172 
 173 #define BAD_PAGETABLE __bad_pagetable()
 174 #define BAD_PAGE __bad_page()
 175 #define ZERO_PAGE __zero_page()
 176 
 177 /* number of bits that fit into a memory pointer */
 178 #define BITS_PER_PTR                    (8*sizeof(unsigned long))
 179 
 180 /* to align the pointer to a pointer address */
 181 #define PTR_MASK                        (~(sizeof(void*)-1))
 182 
 183 /*
 184  * sizeof(void*)==1<<SIZEOF_PTR_LOG2
 185  */
 186 #if __mips == 3
 187 #define SIZEOF_PTR_LOG2                 3
 188 #else
 189 #define SIZEOF_PTR_LOG2                 2
 190 #endif
 191 
 192 /* to find an entry in a page-table */
 193 #define PAGE_PTR(address) \
 194 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 195 
 196 /* to set the page-dir */
 197 #define SET_PAGE_DIR(tsk,pgdir) \
 198 do { \
 199         (tsk)->tss.pg_dir = ((unsigned long) (pgdir)) - PT_OFFSET; \
 200         if ((tsk) == current) \
 201         { \
 202                 void load_pgd(unsigned long pg_dir); \
 203  \
 204                 load_pgd((tsk)->tss.pg_dir); \
 205         } \
 206 } while (0)
 207 
 208 extern unsigned long high_memory;
 209 extern pmd_t invalid_pte_table[PAGE_SIZE/sizeof(pmd_t)];
 210 
 211 /*
 212  * Conversion functions: convert a page and protection to a page entry,
 213  * and a page entry and page directory to the page they refer to.
 214  */
 215 extern inline unsigned long pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 216 { return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK); }
 217 
 218 extern inline unsigned long pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 { return PAGE_OFFSET + (pmd_val(pmd) & PAGE_MASK); }
 220 
 221 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 { pmd_val(*pmdp) = _PAGE_TABLE | ((unsigned long) ptep - PT_OFFSET); }
 223 
 224 extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
 225 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
 226 extern inline int pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 227 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 228 extern inline void pte_reuse(pte_t * ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
 229 {
 230         if (!mem_map[MAP_NR(ptep)].reserved)
 231                 mem_map[MAP_NR(ptep)].count++;
 232 }
 233 
 234 /*
 235  * Empty pgd/pmd entries point to the invalid_pte_table.
 236  */
 237 extern inline int pmd_none(pmd_t pmd)           { return (pmd_val(pmd) & PAGE_MASK) == ((unsigned long) invalid_pte_table - PAGE_OFFSET); }
     /* [previous][next][first][last][top][bottom][index][help] */
 238 
 239 extern inline int pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 240 {
 241         return (pmd_val(pmd) & ~PAGE_MASK) != _PAGE_TABLE ||
 242                 pmd_page(pmd) > high_memory ||
 243                 pmd_page(pmd) < PAGE_OFFSET;
 244 }
 245 extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
 246 extern inline int pmd_inuse(pmd_t *pmdp)        { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 247 extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = ((unsigned long) invalid_pte_table - PAGE_OFFSET); }
     /* [previous][next][first][last][top][bottom][index][help] */
 248 extern inline void pmd_reuse(pmd_t * pmdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
 249 
 250 /*
 251  * The "pgd_xxx()" functions here are trivial for a folded two-level
 252  * setup: the pgd is never bad, and a pmd always exists (as it's folded
 253  * into the pgd entry)
 254  */
 255 extern inline int pgd_none(pgd_t pgd)           { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 256 extern inline int pgd_bad(pgd_t pgd)            { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
 257 extern inline int pgd_present(pgd_t pgd)        { return 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
 258 extern inline int pgd_inuse(pgd_t * pgdp)       { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
 259 extern inline void pgd_clear(pgd_t * pgdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
 260 
 261 /*
 262  * The following only work if pte_present() is true.
 263  * Undefined behaviour if not..
 264  */
 265 extern inline int pte_read(pte_t pte)           { return pte_val(pte) & _PAGE_READ; }
     /* [previous][next][first][last][top][bottom][index][help] */
 266 extern inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_WRITE; }
     /* [previous][next][first][last][top][bottom][index][help] */
 267 extern inline int pte_exec(pte_t pte)           { return pte_val(pte) & _PAGE_READ; }
     /* [previous][next][first][last][top][bottom][index][help] */
 268 extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_MODIFIED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 269 extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
     /* [previous][next][first][last][top][bottom][index][help] */
 270 extern inline int pte_cow(pte_t pte)            { return pte_val(pte) & _PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 271 
 272 extern inline pte_t pte_wrprotect(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 273 {
 274         pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
 275         return pte;
 276 }
 277 extern inline pte_t pte_rdprotect(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 278 {
 279         pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte;
 280 }
 281 extern inline pte_t pte_exprotect(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte;
 284 }
 285 extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 286 extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ|_PAGE_SILENT_WRITE); return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 287 extern inline pte_t pte_uncow(pte_t pte)        { pte_val(pte) &= ~_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 288 extern inline pte_t pte_mkwrite(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290         pte_val(pte) |= _PAGE_WRITE;
 291         if (pte_val(pte) & _PAGE_MODIFIED)
 292                 pte_val(pte) |= _PAGE_SILENT_WRITE;
 293         return pte;
 294 }
 295 extern inline pte_t pte_mkread(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 296 {
 297         pte_val(pte) |= _PAGE_READ;
 298         if (pte_val(pte) & _PAGE_ACCESSED)
 299                 pte_val(pte) |= _PAGE_SILENT_READ;
 300         return pte;
 301 }
 302 extern inline pte_t pte_mkexec(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 303 {
 304         pte_val(pte) |= _PAGE_READ;
 305         if (pte_val(pte) & _PAGE_ACCESSED)
 306                 pte_val(pte) |= _PAGE_SILENT_READ;
 307         return pte;
 308 }
 309 extern inline pte_t pte_mkdirty(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311         pte_val(pte) |= _PAGE_MODIFIED;
 312         if (pte_val(pte) & _PAGE_WRITE)
 313                 pte_val(pte) |= _PAGE_SILENT_WRITE;
 314         return pte;
 315 }
 316 extern inline pte_t pte_mkyoung(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 317 {
 318         pte_val(pte) |= _PAGE_ACCESSED;
 319         if (pte_val(pte) & _PAGE_READ)
 320         {
 321                 pte_val(pte) |= _PAGE_SILENT_READ;
 322                 if ((pte_val(pte) & (_PAGE_WRITE|_PAGE_MODIFIED)) == (_PAGE_WRITE|_PAGE_MODIFIED))
 323                         pte_val(pte) |= _PAGE_SILENT_WRITE;
 324         }
 325         return pte;
 326 }
 327 extern inline pte_t pte_mkcow(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 328 {
 329         pte_val(pte) |= _PAGE_COW;
 330         return pte;
 331 }
 332 
 333 /*
 334  * Conversion functions: convert a page and protection to a page entry,
 335  * and a page entry and page directory to the page they refer to.
 336  */
 337 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 { pte_t pte; pte_val(pte) = (page - PAGE_OFFSET) | pgprot_val(pgprot); return pte; }
 339 
 340 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 341 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 342 
 343 /* to find an entry in a page-table-directory */
 344 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346         return mm->pgd + (address >> PGDIR_SHIFT);
 347 }
 348 
 349 /* Find an entry in the second-level page table.. */
 350 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352         return (pmd_t *) dir;
 353 }
 354 
 355 /* Find an entry in the third-level page table.. */ 
 356 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         return (pte_t *) (pmd_page(*dir) + (PT_OFFSET - PAGE_OFFSET)) +
 359                ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 360 }
 361 
 362 /*
 363  * Allocate and free page tables. The xxx_kernel() versions are
 364  * used to allocate a kernel page table - this turns on ASN bits
 365  * if any, and marks the page tables reserved.
 366  */
 367 extern inline void pte_free_kernel(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         unsigned long page = (unsigned long) pte;
 370 
 371         mem_map[MAP_NR(pte)].reserved = 0;
 372         if(!page)
 373                 return;
 374         page -= (PT_OFFSET - PAGE_OFFSET);
 375         free_page(page);
 376 }
 377 
 378 extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 379 {
 380         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 381         if (pmd_none(*pmd)) {
 382                 unsigned long page = __get_free_page(GFP_KERNEL);
 383                 if (pmd_none(*pmd)) {
 384                         if (page) {
 385                                 mem_map[MAP_NR(page)].reserved = 1;
 386                                 memset((void *) page, 0, PAGE_SIZE);
 387                                 sys_cacheflush((void *)page, PAGE_SIZE, DCACHE);
 388                                 sync_mem();
 389                                 page += (PT_OFFSET - PAGE_OFFSET);
 390                                 pmd_set(pmd, (pte_t *)page);
 391                                 return ((pte_t *)page) + address;
 392                         }
 393                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 394                         return NULL;
 395                 }
 396                 free_page(page);
 397         }
 398         if (pmd_bad(*pmd)) {
 399                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 400                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 401                 return NULL;
 402         }
 403         return (pte_t *) (pmd_page(*pmd) + (PT_OFFSET - PAGE_OFFSET)) + address;
 404 }
 405 
 406 /*
 407  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 408  * inside the pgd, so has no extra memory associated with it.
 409  */
 410 extern inline void pmd_free_kernel(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 411 {
 412 }
 413 
 414 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 415 {
 416         return (pmd_t *) pgd;
 417 }
 418 
 419 extern inline void pte_free(pte_t * pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 420 {
 421         unsigned long page = (unsigned long) pte;
 422 
 423         if(!page)
 424                 return;
 425         page -= (PT_OFFSET - PAGE_OFFSET);
 426         free_page(page);
 427 }
 428 
 429 extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 430 {
 431         address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 432         if (pmd_none(*pmd)) {
 433                 unsigned long page = __get_free_page(GFP_KERNEL);
 434                 if (pmd_none(*pmd)) {
 435                         if (page) {
 436                                 memset((void *) page, 0, PAGE_SIZE);
 437                                 sys_cacheflush((void *)page, PAGE_SIZE, DCACHE);
 438                                 sync_mem();
 439                                 page += (PT_OFFSET - PAGE_OFFSET);
 440                                 pmd_set(pmd, (pte_t *)page);
 441                                 return ((pte_t *)page) + address;
 442                         }
 443                         pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 444                         return NULL;
 445                 }
 446                 free_page(page);
 447         }
 448         if (pmd_bad(*pmd)) {
 449                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 450                 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
 451                 return NULL;
 452         }
 453         return (pte_t *) (pmd_page(*pmd) + (PT_OFFSET - PAGE_OFFSET)) + address;
 454 }
 455 
 456 /*
 457  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 458  * inside the pgd, so has no extra memory associated with it.
 459  */
 460 extern inline void pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 461 {
 462 }
 463 
 464 extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 465 {
 466         return (pmd_t *) pgd;
 467 }
 468 
 469 extern inline void pgd_free(pgd_t * pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 470 {
 471         unsigned long page = (unsigned long) pgd;
 472 
 473         if(!page)
 474                 return;
 475         page -= (PT_OFFSET - PAGE_OFFSET);
 476         free_page(page);
 477 }
 478 
 479 /*
 480  * Initialize new page directory with pointers to invalid ptes
 481  */
 482 extern inline void pgd_init(unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 483 {
 484         unsigned long dummy1, dummy2;
 485 
 486         page += (PT_OFFSET - PAGE_OFFSET);
 487 #if __mips >= 3
 488         /*
 489          * Ich will Spass - ich geb Gas ich geb Gas...
 490          */
 491         __asm__ __volatile__(
 492                 ".set\tnoreorder\n\t"
 493                 ".set\tnoat\n\t"
 494                 ".set\tmips3\n\t"
 495                 "dsll32\t$1,%2,0\n\t"
 496                 "dsrl32\t%2,$1,0\n\t"
 497                 "or\t%2,$1\n"
 498                 "1:\tsd\t%2,(%0)\n\t"
 499                 "subu\t%1,1\n\t"
 500                 "bnez\t%1,1b\n\t"
 501                 "addiu\t%0,8\n\t"
 502                 ".set\tmips0\n\t"
 503                 ".set\tat\n\t"
 504                 ".set\treorder"
 505                 :"=r" (dummy1),
 506                  "=r" (dummy2)
 507                 :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) |
 508                        _PAGE_TABLE),
 509                  "0" (page),
 510                  "1" (PAGE_SIZE/(sizeof(pmd_t)*2))
 511                 :"$1");
 512 #else
 513         __asm__ __volatile__(
 514                 ".set\tnoreorder\n"
 515                 "1:\tsw\t%2,(%0)\n\t"
 516                 "subu\t%1,1\n\t"
 517                 "bnez\t%1,1b\n\t"
 518                 "addiu\t%0,4\n\t"
 519                 ".set\treorder"
 520                 :"=r" (dummy1),
 521                  "=r" (dummy2)
 522                 :"r" (((unsigned long) invalid_pte_table - PAGE_OFFSET) |
 523                        _PAGE_TABLE),
 524                  "0" (page),
 525                  "1" (PAGE_SIZE/sizeof(pmd_t)));
 526 #endif
 527 }
 528 
 529 extern inline pgd_t * pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 530 {
 531         unsigned long page;
 532 
 533         if(!(page = __get_free_page(GFP_KERNEL)))
 534                 return NULL;
 535 
 536         sys_cacheflush((void *)page, PAGE_SIZE, DCACHE);
 537         sync_mem();
 538         pgd_init(page);
 539 
 540         return (pgd_t *) (page + (PT_OFFSET - PAGE_OFFSET));
 541 }
 542 
 543 extern pgd_t swapper_pg_dir[1024];
 544 
 545 /*
 546  * MIPS doesn't need any external MMU info: the kernel page tables contain
 547  * all the necessary information.  We use this hook though to load the
 548  * TLB as early as possible with uptodate information avoiding unecessary
 549  * exceptions.
 550  */
 551 extern void update_mmu_cache(struct vm_area_struct * vma,
 552         unsigned long address, pte_t pte);
 553 
 554 #if __mips >= 3
 555 
 556 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
 557 #define SWP_OFFSET(entry) ((entry) >> 40)
 558 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
 559 
 560 #else
 561 
 562 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
 563 #define SWP_OFFSET(entry) ((entry) >> 8)
 564 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
 565 
 566 #endif
 567 
 568 #endif /* !defined (__LANGUAGE_ASSEMBLY__) */
 569 
 570 #endif /* __ASM_MIPS_PGTABLE_H */

/* [previous][next][first][last][top][bottom][index][help] */