root/include/asm-sparc/pgtable.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. remove_from_ctx_list
  2. add_to_ctx_list

   1 /* $Id: pgtable.h,v 1.46 1996/04/21 11:01:53 davem Exp $ */
   2 #ifndef _SPARC_PGTABLE_H
   3 #define _SPARC_PGTABLE_H
   4 
   5 /*  asm-sparc/pgtable.h:  Defines and functions used to work
   6  *                        with Sparc page tables.
   7  *
   8  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   9  */
  10 
  11 #include <linux/mm.h>
  12 #include <asm/asi.h>
  13 #include <asm/pgtsun4c.h>
  14 #include <asm/pgtsrmmu.h>
  15 #include <asm/vac-ops.h>
  16 #include <asm/oplib.h>
  17 #include <asm/sbus.h>
  18 
  19 extern void load_mmu(void);
  20 extern int io_remap_page_range(unsigned long from, unsigned long to,
  21                                unsigned long size, pgprot_t prot, int space);
  22 
  23 extern void (*quick_kernel_fault)(unsigned long);
  24 
  25 /* mmu-specific process creation/cloning/etc hooks. */
  26 extern void (*mmu_exit_hook)(void);
  27 extern void (*mmu_flush_hook)(void);
  28 
  29 /* translate between physical and virtual addresses */
  30 extern unsigned long (*mmu_v2p)(unsigned long);
  31 extern unsigned long (*mmu_p2v)(unsigned long);
  32 
  33 /* Routines for data transfer buffers. */
  34 extern char *(*mmu_lockarea)(char *, unsigned long);
  35 extern void  (*mmu_unlockarea)(char *, unsigned long);
  36 
  37 /* Routines for getting a dvma scsi buffer. */
  38 struct mmu_sglist {
  39         /* ick, I know... */
  40         char *addr;
  41         char *alt_addr;
  42         unsigned int len;
  43 };
  44 extern char *(*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
  45 extern void  (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
  46 extern void  (*mmu_release_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
  47 extern void  (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
  48 
  49 extern unsigned int pmd_shift;
  50 extern unsigned int pmd_size;
  51 extern unsigned int pmd_mask;
  52 extern unsigned int (*pmd_align)(unsigned int);
  53 
  54 extern unsigned int pgdir_shift;
  55 extern unsigned int pgdir_size;
  56 extern unsigned int pgdir_mask;
  57 extern unsigned int (*pgdir_align)(unsigned int);
  58 
  59 extern unsigned int ptrs_per_pte;
  60 extern unsigned int ptrs_per_pmd;
  61 extern unsigned int ptrs_per_pgd;
  62 
  63 extern unsigned int ptrs_per_page;
  64 
  65 extern unsigned long (*(vmalloc_start))(void);
  66 
  67 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
  68 #define VMALLOC_START vmalloc_start()
  69 
  70 extern pgprot_t page_none;
  71 extern pgprot_t page_shared;
  72 extern pgprot_t page_copy;
  73 extern pgprot_t page_readonly;
  74 extern pgprot_t page_kernel;
  75 
  76 #define PMD_SHIFT      (pmd_shift)
  77 #define PMD_SIZE       (pmd_size)
  78 #define PMD_MASK       (pmd_mask)
  79 #define PMD_ALIGN      (pmd_align)
  80 #define PGDIR_SHIFT    (pgdir_shift)
  81 #define PGDIR_SIZE     (pgdir_size)
  82 #define PGDIR_MASK     (pgdir_mask)
  83 #define PGDIR_ALIGN    (pgdir_align)
  84 #define PTRS_PER_PTE   (ptrs_per_pte)
  85 #define PTRS_PER_PMD   (ptrs_per_pmd)
  86 #define PTRS_PER_PGD   (ptrs_per_pgd)
  87 
  88 #define PAGE_NONE      (page_none)
  89 #define PAGE_SHARED    (page_shared)
  90 #define PAGE_COPY      (page_copy)
  91 #define PAGE_READONLY  (page_readonly)
  92 #define PAGE_KERNEL    (page_kernel)
  93 #define PAGE_INVALID   (page_invalid)
  94 
  95 /* Top-level page directory */
  96 extern pgd_t swapper_pg_dir[1024];
  97 
  98 /* Page table for 0-4MB for everybody, on the Sparc this
  99  * holds the same as on the i386.
 100  */
 101 extern pte_t pg0[1024];
 102 
 103 extern unsigned long ptr_in_current_pgd;
 104 
 105 /* the no. of pointers that fit on a page: this will go away */
 106 #define PTRS_PER_PAGE   (PAGE_SIZE/sizeof(void*))
 107 
 108 /* Here is a trick, since mmap.c need the initializer elements for
 109  * protection_map[] to be constant at compile time, I set the following
 110  * to all zeros.  I set it to the real values after I link in the
 111  * appropriate MMU page table routines at boot time.
 112  */
 113 #define __P000  __pgprot(0)
 114 #define __P001  __pgprot(0)
 115 #define __P010  __pgprot(0)
 116 #define __P011  __pgprot(0)
 117 #define __P100  __pgprot(0)
 118 #define __P101  __pgprot(0)
 119 #define __P110  __pgprot(0)
 120 #define __P111  __pgprot(0)
 121 
 122 #define __S000  __pgprot(0)
 123 #define __S001  __pgprot(0)
 124 #define __S010  __pgprot(0)
 125 #define __S011  __pgprot(0)
 126 #define __S100  __pgprot(0)
 127 #define __S101  __pgprot(0)
 128 #define __S110  __pgprot(0)
 129 #define __S111  __pgprot(0)
 130 
 131 extern int num_contexts;
 132 
 133 /*
 134  * BAD_PAGETABLE is used when we need a bogus page-table, while
 135  * BAD_PAGE is used for a bogus page.
 136  *
 137  * ZERO_PAGE is a global shared page that is always zero: used
 138  * for zero-mapped memory areas etc..
 139  */
 140 extern pte_t __bad_page(void);
 141 extern pte_t * __bad_pagetable(void);
 142 
 143 extern unsigned long empty_zero_page;
 144 
 145 #define BAD_PAGETABLE __bad_pagetable()
 146 #define BAD_PAGE __bad_page()
 147 #define ZERO_PAGE ((unsigned long)(&(empty_zero_page)))
 148 
 149 /* number of bits that fit into a memory pointer */
 150 #define BITS_PER_PTR      (8*sizeof(unsigned long))
 151 
 152 /* to align the pointer to a pointer address */
 153 #define PTR_MASK          (~(sizeof(void*)-1))
 154 
 155 #define SIZEOF_PTR_LOG2   2
 156 
 157 extern unsigned long (*pte_page)(pte_t);
 158 extern unsigned long (*pmd_page)(pmd_t);
 159 extern unsigned long (*pgd_page)(pgd_t);
 160 
 161 extern void (*sparc_update_rootmmu_dir)(struct task_struct *, pgd_t *pgdir);
 162 
 163 #define SET_PAGE_DIR(tsk,pgdir) sparc_update_rootmmu_dir(tsk, pgdir)
 164        
 165 /* to find an entry in a page-table */
 166 #define PAGE_PTR(address) \
 167 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 168 
 169 extern unsigned long high_memory;
 170 
 171 extern int (*pte_none)(pte_t);
 172 extern int (*pte_present)(pte_t);
 173 extern void (*pte_clear)(pte_t *);
 174 
 175 extern int (*pmd_none)(pmd_t);
 176 extern int (*pmd_bad)(pmd_t);
 177 extern int (*pmd_present)(pmd_t);
 178 extern void (*pmd_clear)(pmd_t *);
 179 
 180 extern int (*pgd_none)(pgd_t);
 181 extern int (*pgd_bad)(pgd_t);
 182 extern int (*pgd_present)(pgd_t);
 183 extern void (*pgd_clear)(pgd_t *);
 184 
 185 /*
 186  * The following only work if pte_present() is true.
 187  * Undefined behaviour if not..
 188  */
 189 extern int (*pte_write)(pte_t);
 190 extern int (*pte_dirty)(pte_t);
 191 extern int (*pte_young)(pte_t);
 192 
 193 extern pte_t (*pte_wrprotect)(pte_t);
 194 extern pte_t (*pte_mkclean)(pte_t);
 195 extern pte_t (*pte_mkold)(pte_t);
 196 extern pte_t (*pte_mkwrite)(pte_t);
 197 extern pte_t (*pte_mkdirty)(pte_t);
 198 extern pte_t (*pte_mkyoung)(pte_t);
 199 
 200 /*
 201  * Conversion functions: convert a page and protection to a page entry,
 202  * and a page entry and page directory to the page they refer to.
 203  */
 204 extern pte_t (*mk_pte)(unsigned long, pgprot_t);
 205 extern pte_t (*mk_pte_io)(unsigned long, pgprot_t, int);
 206 
 207 extern void (*pgd_set)(pgd_t *, pmd_t *);
 208 
 209 extern pte_t (*pte_modify)(pte_t, pgprot_t);
 210 
 211 /* to find an entry in a page-table-directory */
 212 extern pgd_t * (*pgd_offset)(struct mm_struct *, unsigned long);
 213 
 214 /* Find an entry in the second-level page table.. */
 215 extern pmd_t * (*pmd_offset)(pgd_t *, unsigned long);
 216 
 217 /* Find an entry in the third-level page table.. */ 
 218 extern pte_t * (*pte_offset)(pmd_t *, unsigned long);
 219 
 220 /*
 221  * Allocate and free page tables. The xxx_kernel() versions are
 222  * used to allocate a kernel page table - this turns on ASN bits
 223  * if any, and marks the page tables reserved.
 224  */
 225 extern void (*pte_free_kernel)(pte_t *);
 226 
 227 extern pte_t * (*pte_alloc_kernel)(pmd_t *, unsigned long);
 228 
 229 /*
 230  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 231  * inside the pgd, so has no extra memory associated with it.
 232  */
 233 extern void (*pmd_free_kernel)(pmd_t *);
 234 
 235 extern pmd_t * (*pmd_alloc_kernel)(pgd_t *, unsigned long);
 236 
 237 extern void (*pte_free)(pte_t *);
 238 
 239 extern pte_t * (*pte_alloc)(pmd_t *, unsigned long);
 240 
 241 /*
 242  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 243  * inside the pgd, so has no extra memory associated with it.
 244  */
 245 extern void (*pmd_free)(pmd_t *);
 246 
 247 extern pmd_t * (*pmd_alloc)(pgd_t *, unsigned long);
 248 
 249 extern void (*pgd_free)(pgd_t *);
 250 
 251 extern pgd_t * (*pgd_alloc)(void);
 252 
 253 /* Fine grained cache/tlb flushing. */
 254 
 255 #ifdef __SMP__
 256 extern void (*local_flush_cache_all)(void);
 257 extern void (*local_flush_cache_mm)(struct mm_struct *);
 258 extern void (*local_flush_cache_range)(struct mm_struct *, unsigned long start,
 259                                      unsigned long end);
 260 extern void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address);
 261 
 262 extern void (*local_flush_tlb_all)(void);
 263 extern void (*local_flush_tlb_mm)(struct mm_struct *);
 264 extern void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start,
 265                                      unsigned long end);
 266 extern void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address);
 267 
 268 extern void (*local_flush_page_to_ram)(unsigned long address);
 269 
 270 extern void smp_flush_cache_all(void);
 271 extern void smp_flush_cache_mm(struct mm_struct *mm);
 272 extern void smp_flush_cache_range(struct mm_struct *mm,
 273                                   unsigned long start,
 274                                   unsigned long end);
 275 extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
 276 
 277 extern void smp_flush_tlb_all(void);
 278 extern void smp_flush_tlb_mm(struct mm_struct *mm);
 279 extern void smp_flush_tlb_range(struct mm_struct *mm,
 280                                   unsigned long start,
 281                                   unsigned long end);
 282 extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
 283 extern void smp_flush_page_to_ram(unsigned long page);
 284 #endif
 285 
 286 extern void (*flush_cache_all)(void);
 287 extern void (*flush_cache_mm)(struct mm_struct *);
 288 extern void (*flush_cache_range)(struct mm_struct *, unsigned long start,
 289                                  unsigned long end);
 290 extern void (*flush_cache_page)(struct vm_area_struct *, unsigned long address);
 291 
 292 extern void (*flush_tlb_all)(void);
 293 extern void (*flush_tlb_mm)(struct mm_struct *);
 294 extern void (*flush_tlb_range)(struct mm_struct *, unsigned long start, unsigned long end);
 295 extern void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address);
 296 
 297 extern void (*flush_page_to_ram)(unsigned long page);
 298 
 299 /* The permissions for pgprot_val to make a page mapped on the obio space */
 300 extern unsigned int pg_iobits;
 301 
 302 /* MMU context switching. */
 303 extern void (*switch_to_context)(struct task_struct *tsk);
 304 
 305 /* Certain architectures need to do special things when pte's
 306  * within a page table are directly modified.  Thus, the following
 307  * hook is made available.
 308  */
 309 
 310 #if 0 /* XXX try this soon XXX */
 311 extern void (*set_pte)(struct vm_area_struct *vma, unsigned long address,
 312                        pte_t *pteptr, pte_t pteval);
 313 #else
 314 extern void (*set_pte)(pte_t *pteptr, pte_t pteval);
 315 #endif
 316 
 317 extern char *(*mmu_info)(void);
 318 
 319 /* Fault handler stuff... */
 320 #define FAULT_CODE_PROT     0x1
 321 #define FAULT_CODE_WRITE    0x2
 322 #define FAULT_CODE_USER     0x4
 323 extern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
 324 
 325 extern int invalid_segment;
 326 
 327 #define SWP_TYPE(entry) (((entry)>>2) & 0x7f)
 328 #define SWP_OFFSET(entry) (((entry) >> 9) & 0x7ffff)
 329 #define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
 330 
 331 struct ctx_list {
 332         struct ctx_list *next;
 333         struct ctx_list *prev;
 334         unsigned int ctx_number;
 335         struct mm_struct *ctx_mm;
 336 };
 337 
 338 extern struct ctx_list *ctx_list_pool;  /* Dynamically allocated */
 339 extern struct ctx_list ctx_free;        /* Head of free list */
 340 extern struct ctx_list ctx_used;        /* Head of used contexts list */
 341 
 342 #define NO_CONTEXT     -1
 343 
 344 extern inline void remove_from_ctx_list(struct ctx_list *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346         entry->next->prev = entry->prev;
 347         entry->prev->next = entry->next;
 348 }
 349 
 350 extern inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352         entry->next = head;
 353         (entry->prev = head->prev)->next = entry;
 354         head->prev = entry;
 355 }
 356 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
 357 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
 358 
 359 #endif /* !(_SPARC_PGTABLE_H) */

/* [previous][next][first][last][top][bottom][index][help] */