root/arch/sparc/mm/sun4c.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sun4c_pmd_align
  2. sun4c_pgdir_align
  3. sun4c_vmalloc_start
  4. sun4c_update_rootmmu_dir
  5. sun4c_pte_none
  6. sun4c_pte_present
  7. sun4c_pte_inuse
  8. sun4c_pte_clear
  9. sun4c_pte_reuse
  10. sun4c_pmd_none
  11. sun4c_pmd_bad
  12. sun4c_pmd_present
  13. sun4c_pmd_inuse
  14. sun4c_pmd_clear
  15. sun4c_pmd_reuse
  16. sun4c_pgd_none
  17. sun4c_pgd_bad
  18. sun4c_pgd_present
  19. sun4c_pgd_inuse
  20. sun4c_pgd_clear
  21. sun4c_pte_read
  22. sun4c_pte_write
  23. sun4c_pte_exec
  24. sun4c_pte_dirty
  25. sun4c_pte_young
  26. sun4c_pte_cow
  27. sun4c_pte_wrprotect
  28. sun4c_pte_rdprotect
  29. sun4c_pte_exprotect
  30. sun4c_pte_mkclean
  31. sun4c_pte_mkold
  32. sun4c_pte_uncow
  33. sun4c_pte_mkwrite
  34. sun4c_pte_mkread
  35. sun4c_pte_mkexec
  36. sun4c_pte_mkdirty
  37. sun4c_pte_mkyoung
  38. sun4c_pte_mkcow
  39. sun4c_mk_pte
  40. sun4c_pte_modify
  41. sun4c_pte_page
  42. sun4c_pmd_page
  43. sun4c_pgd_offset
  44. sun4c_pmd_offset
  45. sun4c_pte_offset
  46. add_pseg_list
  47. remove_pseg_list
  48. add_pseg_ctxlist
  49. remove_pseg_ctxlist
  50. sun4c_init_pseg_lists
  51. sun4c_distribute_kernel_mapping
  52. sun4c_delete_kernel_mapping
  53. sun4c_lock_tlb_entry
  54. sun4c_unlock_tlb_entry
  55. sun4c_unload_context_from_tlb
  56. sun4c_unload_page_from_tlb
  57. sun4c_invalidate
  58. sun4c_set_pte
  59. sun4c_pte_free_kernel
  60. sun4c_pte_alloc_kernel
  61. sun4c_pmd_free_kernel
  62. sun4c_pmd_alloc_kernel
  63. sun4c_pte_free
  64. sun4c_pte_alloc
  65. sun4c_pmd_free
  66. sun4c_pmd_alloc
  67. sun4c_pgd_free
  68. sun4c_pgd_alloc
  69. sun4c_switch_to_context
  70. sun4c_mapioaddr
  71. sun4c_lockarea
  72. sun4c_unlockarea
  73. sun4c_get_scsi_buffer
  74. sun4c_release_scsi_buffer
  75. sun4c_get_fault_info
  76. sun4c_alloc_pseg
  77. sun4c_update_mmu_cache
  78. sun4c_free_all_nonlocked_psegs
  79. sun4c_alloc_pseg_from_free_list
  80. sun4c_init_lock_area
  81. sun4c_check_for_ss2_cache_bug
  82. sun4c_paging_init
  83. sun4c_test_wp
  84. sun4c_lock_entire_kernel
  85. sun4c_fork_hook
  86. sun4c_release_hook
  87. sun4c_flush_hook
  88. sun4c_task_cacheflush
  89. sun4c_exit_hook
  90. ld_mmu_sun4c

   1 /* $Id: sun4c.c,v 1.56 1995/11/25 00:59:39 davem Exp $
   2  * sun4c.c:  Sun4C specific mm routines.
   3  *
   4  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   5  */
   6 
   7 /* The SUN4C has an MMU based upon a Translation Lookaside Buffer scheme
   8  * where only so many translations can be loaded at once.  As Linus said
   9  * in Boston, this is a broken way of doing things.
  10  */
  11 
  12 #include <linux/kernel.h>
  13 #include <linux/sched.h>
  14 
  15 #include <asm/processor.h>
  16 #include <asm/page.h>
  17 #include <asm/pgtable.h>
  18 #include <asm/vac-ops.h>
  19 #include <asm/vaddrs.h>
  20 #include <asm/asi.h>
  21 #include <asm/system.h>
  22 #include <asm/contregs.h>
  23 #include <asm/oplib.h>
  24 #include <asm/idprom.h>
  25 #include <asm/machines.h>
  26 #include <asm/memreg.h>
  27 #include <asm/kdebug.h>
  28 
  29 /* Pseg allocation structures. */
  30 static struct pseg_list s4cpseg_pool[256];
  31 
  32 struct pseg_list s4cpseg_free;
  33 struct pseg_list s4cpseg_used;
  34 static struct pseg_list s4cpseg_locked;
  35 static struct pseg_list s4cpseg_per_context[16];
  36 
  37 static unsigned char pseg_count_per_context[16];
  38 
  39 unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  40 unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
     /* [previous][next][first][last][top][bottom][index][help] */
  41 
  42 extern int num_segmaps, num_contexts;
  43 
  44 /* First the functions which the mid-level code uses to directly
  45  * manipulate the software page tables.  Some defines since we are
  46  * emulating the i386 page directory layout.
  47  */
  48 #define PGD_PRESENT  0x001
  49 #define PGD_RW       0x002
  50 #define PGD_USER     0x004
  51 #define PGD_ACCESSED 0x020
  52 #define PGD_DIRTY    0x040
  53 #define PGD_TABLE    (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
  54 
  55 unsigned long sun4c_vmalloc_start(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  56 {
  57         return SUN4C_VMALLOC_START;
  58 }
  59 
  60 /* Update the root mmu directory on the sun4c mmu. */
  61 void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
     /* [previous][next][first][last][top][bottom][index][help] */
  62 {
  63         (tsk)->tss.pgd_ptr = (unsigned long) (pgdir);
  64 }
  65 
  66 int sun4c_pte_none(pte_t pte)           { return !pte_val(pte); }
     /* [previous][next][first][last][top][bottom][index][help] */
  67 int sun4c_pte_present(pte_t pte)        { return pte_val(pte) & _SUN4C_PAGE_VALID; }
     /* [previous][next][first][last][top][bottom][index][help] */
  68 int sun4c_pte_inuse(pte_t *ptep)        { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
  69 void sun4c_pte_clear(pte_t *ptep)       { pte_val(*ptep) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  70 void sun4c_pte_reuse(pte_t *ptep)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         if(!mem_map[MAP_NR(ptep)].reserved)
  73                 mem_map[MAP_NR(ptep)].count++;
  74 }
  75 
  76 int sun4c_pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
     /* [previous][next][first][last][top][bottom][index][help] */
  77 int sun4c_pmd_bad(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
  78 {
  79         return (pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE || pmd_val(pmd) > high_memory;
  80 }
  81 
  82 int sun4c_pmd_present(pmd_t pmd)        { return pmd_val(pmd) & PGD_PRESENT; }
     /* [previous][next][first][last][top][bottom][index][help] */
  83 int sun4c_pmd_inuse(pmd_t *pmdp)        { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  84 void sun4c_pmd_clear(pmd_t *pmdp)       { pmd_val(*pmdp) = 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  85 void sun4c_pmd_reuse(pmd_t * pmdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
  86 
  87 int sun4c_pgd_none(pgd_t pgd)           { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  88 int sun4c_pgd_bad(pgd_t pgd)            { return 0; }
     /* [previous][next][first][last][top][bottom][index][help] */
  89 int sun4c_pgd_present(pgd_t pgd)        { return 1; }
     /* [previous][next][first][last][top][bottom][index][help] */
  90 int sun4c_pgd_inuse(pgd_t *pgdp)        { return mem_map[MAP_NR(pgdp)].reserved; }
     /* [previous][next][first][last][top][bottom][index][help] */
  91 void sun4c_pgd_clear(pgd_t * pgdp)      { }
     /* [previous][next][first][last][top][bottom][index][help] */
  92 
  93 /*
  94  * The following only work if pte_present() is true.
  95  * Undefined behaviour if not..
  96  */
  97 int sun4c_pte_read(pte_t pte)           { return !(pte_val(pte) & _SUN4C_PAGE_PRIV); }
     /* [previous][next][first][last][top][bottom][index][help] */
  98 int sun4c_pte_write(pte_t pte)          { return pte_val(pte) & _SUN4C_PAGE_WRITE; }
     /* [previous][next][first][last][top][bottom][index][help] */
  99 int sun4c_pte_exec(pte_t pte)           { return !(pte_val(pte) & _SUN4C_PAGE_PRIV); }
     /* [previous][next][first][last][top][bottom][index][help] */
 100 int sun4c_pte_dirty(pte_t pte)          { return pte_val(pte) & _SUN4C_PAGE_DIRTY; }
     /* [previous][next][first][last][top][bottom][index][help] */
 101 int sun4c_pte_young(pte_t pte)          { return pte_val(pte) & _SUN4C_PAGE_REF; }
     /* [previous][next][first][last][top][bottom][index][help] */
 102 int sun4c_pte_cow(pte_t pte)            { return pte_val(pte) & _SUN4C_PAGE_COW; }
     /* [previous][next][first][last][top][bottom][index][help] */
 103 
 104 pte_t sun4c_pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~_SUN4C_PAGE_WRITE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 105 pte_t sun4c_pte_rdprotect(pte_t pte)    { pte_val(pte) |= _SUN4C_PAGE_PRIV; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 106 pte_t sun4c_pte_exprotect(pte_t pte)    { pte_val(pte) |= _SUN4C_PAGE_PRIV; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 107 pte_t sun4c_pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_SUN4C_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 108 pte_t sun4c_pte_mkold(pte_t pte)        { pte_val(pte) &= ~_SUN4C_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 109 pte_t sun4c_pte_uncow(pte_t pte)        { pte_val(pte) &= ~_SUN4C_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 110 pte_t sun4c_pte_mkwrite(pte_t pte)      { pte_val(pte) |= _SUN4C_PAGE_WRITE; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 111 pte_t sun4c_pte_mkread(pte_t pte)       { pte_val(pte) &= ~_SUN4C_PAGE_PRIV; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 112 pte_t sun4c_pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_SUN4C_PAGE_PRIV; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 113 pte_t sun4c_pte_mkdirty(pte_t pte)      { pte_val(pte) |= _SUN4C_PAGE_DIRTY; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 114 pte_t sun4c_pte_mkyoung(pte_t pte)      { pte_val(pte) |= _SUN4C_PAGE_REF; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 115 pte_t sun4c_pte_mkcow(pte_t pte)        { pte_val(pte) |= _SUN4C_PAGE_COW; return pte; }
     /* [previous][next][first][last][top][bottom][index][help] */
 116 
 117 /*
 118  * Conversion functions: convert a page and protection to a page entry,
 119  * and a page entry and page directory to the page they refer to.
 120  */
 121 pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 {
 123         return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
 124 }
 125 
 126 pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) | pgprot_val(newprot));
 129 }
 130 
 131 unsigned long sun4c_pte_page(pte_t pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 132 {
 133         return (PAGE_OFFSET + ((pte_val(pte) & 0xffff) << (PAGE_SHIFT)));
 134 }
 135 
 136 unsigned long sun4c_pmd_page(pmd_t pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         return (pmd_val(pmd) & PAGE_MASK);
 139 }
 140 
 141 /* to find an entry in a page-table-directory */
 142 pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
 145 }
 146 
 147 /* Find an entry in the second-level page table.. */
 148 pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         return (pmd_t *) dir;
 151 }
 152 
 153 /* Find an entry in the third-level page table.. */ 
 154 pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
 157 }
 158 
 159 /* Here comes the sun4c mmu-tlb management engine.  It is here because
 160  * some of the mid-level mm support needs to be able to lock down
 161  * critical areas of kernel memory into the tlb.
 162  */
 163 static inline void add_pseg_list(struct pseg_list *head, struct pseg_list *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165         entry->next = head;
 166         (entry->prev = head->prev)->next = entry;
 167         head->prev = entry;
 168 }
 169 #define add_to_used_pseg_list(entry) add_pseg_list(&s4cpseg_used, entry)
 170 #define add_to_free_pseg_list(entry) add_pseg_list(&s4cpseg_free, entry)
 171 #define add_to_locked_pseg_list(entry) add_pseg_list(&s4cpseg_locked, entry)
 172 
 173 static inline void remove_pseg_list(struct pseg_list *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         entry->next->prev = entry->prev;
 176         entry->prev->next = entry->next;
 177 }
 178 
 179 static inline void add_pseg_ctxlist(struct pseg_list *entry, int ctx)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         struct pseg_list *head = &s4cpseg_per_context[ctx];
 182 
 183         entry->ctx_next = head;
 184         (entry->ctx_prev = head->ctx_prev)->ctx_next = entry;
 185         head->ctx_prev = entry;
 186         pseg_count_per_context[ctx]++;
 187 }
 188 
 189 static inline void remove_pseg_ctxlist(struct pseg_list *entry, int ctx)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         entry->ctx_next->ctx_prev = entry->ctx_prev;
 192         entry->ctx_prev->ctx_next = entry->ctx_next;
 193         pseg_count_per_context[ctx]--;
 194 }
 195 
 196 static inline void sun4c_init_pseg_lists(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         int i;
 199 
 200         s4cpseg_free.prev = s4cpseg_free.next = &s4cpseg_free;
 201         s4cpseg_used.prev = s4cpseg_used.next = &s4cpseg_used;
 202         s4cpseg_locked.prev = s4cpseg_locked.next = &s4cpseg_locked;
 203         for(i = 0; i < num_contexts; i++) {
 204                 s4cpseg_per_context[i].ctx_prev = s4cpseg_per_context[i].ctx_next =
 205                         &s4cpseg_per_context[i];
 206         }
 207         for(i = 0; i <= invalid_segment; i++) {
 208                 s4cpseg_pool[i].vaddr = 0;
 209                 s4cpseg_pool[i].context = 0;
 210                 s4cpseg_pool[i].ref_cnt = 0;
 211                 s4cpseg_pool[i].hardlock = 0;
 212                 s4cpseg_pool[i].pseg = i;
 213         }
 214         s4cpseg_pool[invalid_segment].hardlock = 1;
 215 }
 216 
 217 static inline void sun4c_distribute_kernel_mapping(unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 218                                                    unsigned char pseg)
 219 {
 220         unsigned int flags;
 221         int ctx, save_ctx;
 222 
 223         save_flags(flags); cli();
 224         save_ctx = get_context();
 225         flush_user_windows();
 226         for(ctx = 0; ctx < num_contexts; ctx++) {
 227                 set_context(ctx);
 228                 put_segmap(address, pseg);
 229         }
 230         set_context(save_ctx);
 231         restore_flags(flags);
 232 }
 233 
 234 static inline void sun4c_delete_kernel_mapping(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         unsigned int flags;
 237         int ctx, save_ctx;
 238 
 239         save_flags(flags); cli();
 240         save_ctx = get_context();
 241         flush_user_windows();
 242 
 243         /* Flush only needed in one context for kernel mappings. */
 244         sun4c_flush_segment(address);
 245         for(ctx = 0; ctx < num_contexts; ctx++) {
 246                 set_context(ctx);
 247                 put_segmap(address, invalid_segment);
 248         }
 249         set_context(save_ctx);
 250         restore_flags(flags);
 251 }
 252 
 253 /* NOTE: You can only lock kernel tlb entries, attempts to lock
 254  *       pages in user vm will bolix the entire system.
 255  */
 256 static inline void sun4c_lock_tlb_entry(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 257 {
 258         unsigned long flags;
 259         unsigned char pseg;
 260 
 261         save_flags(flags); cli();
 262         /* Fault it in. */
 263         __asm__ __volatile__("ldub [%0], %%g0\n\t" : : "r" (address));
 264         address &= SUN4C_REAL_PGDIR_MASK;
 265         pseg = get_segmap(address);
 266         if(address < KERNBASE)
 267                 panic("locking user address space into tlb!");
 268         if(pseg == invalid_segment)
 269                 panic("cannot lock kernel tlb entry...");
 270         if(!s4cpseg_pool[pseg].ref_cnt++ && !s4cpseg_pool[pseg].hardlock) {
 271                 /* Move from used to locked list. */
 272                 remove_pseg_list(&s4cpseg_pool[pseg]);
 273                 add_to_locked_pseg_list(&s4cpseg_pool[pseg]);
 274         }
 275         restore_flags(flags);
 276 }
 277 
 278 static inline void sun4c_unlock_tlb_entry(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         unsigned long flags;
 281         struct pseg_list *psegp;
 282         unsigned char pseg;
 283 
 284         save_flags(flags); cli();
 285         address &= SUN4C_REAL_PGDIR_MASK;
 286         pseg = get_segmap(address);
 287         if(address < KERNBASE)
 288                 panic("unlocking user tlb entry!");
 289         if(pseg == invalid_segment)
 290                 panic("unlocking non-locked kernel tlb entry...");
 291         psegp = &s4cpseg_pool[pseg];
 292         if(!--psegp->ref_cnt && !psegp->hardlock) {
 293                 /* Move from locked list to used list. */
 294                 remove_pseg_list(psegp);
 295                 add_to_used_pseg_list(psegp);
 296         }
 297         restore_flags(flags);
 298 }
 299 
 300 /* Anyone who calls this must turn _all_ interrupts off and flush
 301  * any necessary user windows beforehand.
 302  */
 303 static inline void sun4c_unload_context_from_tlb(unsigned char ctx)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         struct pseg_list *psegp, *pnextp;
 306 
 307         if(pseg_count_per_context[ctx]) {
 308                 sun4c_flush_context(); /* Most efficient */
 309                 psegp = s4cpseg_per_context[ctx].ctx_next;
 310                 while(psegp != &s4cpseg_per_context[ctx]) {
 311                         pnextp = psegp->ctx_next;
 312                         if(psegp->vaddr >= KERNBASE)
 313                                 panic("Unloading kernel from tlb, not good.");
 314                         put_segmap(psegp->vaddr, invalid_segment);
 315                         remove_pseg_ctxlist(psegp, ctx);
 316                         remove_pseg_list(psegp);
 317                         add_to_free_pseg_list(psegp);
 318                         psegp = pnextp;
 319                 }
 320                 if(pseg_count_per_context[ctx])
 321                         panic("pseg_count_per_context inconsistant after "
 322                               "invalidate.");
 323         }
 324 }
 325 
 326 /* This page must be a page in user vma... again all IRQ's gotta be off. */
 327 static inline void sun4c_unload_page_from_tlb(unsigned long addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 328                                               struct task_struct *tsk)
 329 {
 330         unsigned char save_ctx;
 331 
 332         if(tsk->tss.context != -1) {
 333                 save_ctx = get_context();
 334                 flush_user_windows();
 335                 set_context(tsk->tss.context);
 336                 sun4c_flush_page(addr);
 337                 put_pte(addr, 0);
 338                 set_context(save_ctx);
 339         }
 340 }
 341 
 342 /* NOTE: When we have finer grained invalidate()'s (RSN) this
 343  *       whole scheme will be much more efficient and need to
 344  *       be re-written.  Also note that this routine only
 345  *       unloads user page translations, this may need to
 346  *       be changed at some point.
 347  */
 348 void sun4c_invalidate(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350         int orig_ctx, cur_ctx, flags;
 351 
 352         save_flags(flags); cli();
 353         flush_user_windows();
 354         orig_ctx = get_context();
 355         for(cur_ctx = 0; cur_ctx < num_contexts; cur_ctx++) {
 356                 set_context(cur_ctx);
 357                 sun4c_unload_context_from_tlb(cur_ctx);
 358         }
 359         set_context(orig_ctx);
 360         restore_flags(flags);
 361 }
 362 
 363 /* We're only updating software tables on the sun4c. */
 364 void sun4c_set_pte(pte_t *ptep, pte_t pteval)
     /* [previous][next][first][last][top][bottom][index][help] */
 365 {
 366         *ptep = pteval;
 367 }
 368 
 369 /* Now back to the mid-level interface code:
 370  *
 371  * Allocate and free page tables. The xxx_kernel() versions are
 372  * used to allocate a kernel page table - this turns on ASN bits
 373  * if any, and marks the page tables reserved.
 374  */
 375 void sun4c_pte_free_kernel(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 376 {
 377         mem_map[MAP_NR(pte)].reserved = 0;
 378         free_page((unsigned long) pte);
 379 }
 380 
 381 pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383         address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
 384         if (sun4c_pmd_none(*pmd)) {
 385                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 386                 if (sun4c_pmd_none(*pmd)) {
 387                         if (page) {
 388                                 pmd_val(*pmd) = PGD_TABLE | (unsigned long) page;
 389                                 mem_map[MAP_NR(page)].reserved = 1;
 390                                 return page + address;
 391                         }
 392                         pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
 393                         return NULL;
 394                 }
 395                 free_page((unsigned long) page);
 396         }
 397         if (sun4c_pmd_bad(*pmd)) {
 398                 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
 399                 pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
 400                 return NULL;
 401         }
 402         return (pte_t *) sun4c_pmd_page(*pmd) + address;
 403 }
 404 
 405 /*
 406  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 407  * inside the pgd, so has no extra memory associated with it.
 408  */
 409 void sun4c_pmd_free_kernel(pmd_t *pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 410 {
 411         pmd_val(*pmd) = 0;
 412 }
 413 
 414 pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 415 {
 416         return (pmd_t *) pgd;
 417 }
 418 
 419 void sun4c_pte_free(pte_t *pte)
     /* [previous][next][first][last][top][bottom][index][help] */
 420 {
 421         free_page((unsigned long) pte);
 422 }
 423 
 424 pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 425 {
 426         address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
 427         if (sun4c_pmd_none(*pmd)) {
 428                 pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
 429                 if (sun4c_pmd_none(*pmd)) {
 430                         if (page) {
 431                                 pmd_val(*pmd) = PGD_TABLE | (unsigned long) page;
 432                                 return page + address;
 433                         }
 434                         pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
 435                         return NULL;
 436                 }
 437                 free_page((unsigned long) page);
 438         }
 439         if (sun4c_pmd_bad(*pmd)) {
 440                 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
 441                 pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
 442                 return NULL;
 443         }
 444         return (pte_t *) sun4c_pmd_page(*pmd) + address;
 445 }
 446 
 447 /*
 448  * allocating and freeing a pmd is trivial: the 1-entry pmd is
 449  * inside the pgd, so has no extra memory associated with it.
 450  */
 451 void sun4c_pmd_free(pmd_t * pmd)
     /* [previous][next][first][last][top][bottom][index][help] */
 452 {
 453         pmd_val(*pmd) = 0;
 454 }
 455 
 456 pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 457 {
 458         return (pmd_t *) pgd;
 459 }
 460 
 461 void sun4c_pgd_free(pgd_t *pgd)
     /* [previous][next][first][last][top][bottom][index][help] */
 462 {
 463         free_page((unsigned long) pgd);
 464         sun4c_unlock_tlb_entry((unsigned long) pgd);
 465 }
 466 
 467 pgd_t *sun4c_pgd_alloc(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 468 {
 469         unsigned long new_pgd = get_free_page(GFP_KERNEL);
 470         sun4c_lock_tlb_entry(new_pgd);
 471         return (pgd_t *) new_pgd;
 472 }
 473 
 474 /* Jumping to and fro different contexts, the modifying of the pseg lists
 475  * must be atomic during the switch, or else...
 476  */
 477 void sun4c_switch_to_context(void *new_task)
     /* [previous][next][first][last][top][bottom][index][help] */
 478 {
 479         struct task_struct *tsk = (struct task_struct *) new_task;
 480         struct task_struct *old_tsk;
 481         struct ctx_list *ctxp;
 482         unsigned long flags;
 483         int ctx = tsk->tss.context;
 484 
 485         /* Swapper can execute in any context, or this task
 486          * has already been allocated a piece of the mmu real-
 487          * estate.
 488          */
 489         if(tsk->pid == 0 || ctx != -1)
 490                 return;
 491         ctxp = ctx_free.next;
 492         if(ctxp != &ctx_free) {
 493                 save_flags(flags); cli();
 494                 ctx = ctxp->ctx_number;
 495                 remove_from_ctx_list(ctxp);
 496                 add_to_used_ctxlist(ctxp);
 497                 tsk->tss.context = ctx;
 498                 ctxp->ctx_task = tsk;
 499                 restore_flags(flags);
 500                 return;
 501         }
 502         save_flags(flags); cli();
 503         ctxp = ctx_used.prev;
 504         /* Don't steal from current, thank you. */
 505         if(ctxp->ctx_task == current)
 506                 ctxp = ctxp->prev;
 507         if(ctxp == &ctx_used)
 508                 panic("out of contexts");
 509         remove_from_ctx_list(ctxp);
 510         old_tsk = ctxp->ctx_task;
 511         old_tsk->tss.context = -1;
 512         ctxp->ctx_task = tsk;
 513         tsk->tss.context = ctxp->ctx_number;
 514         add_to_used_ctxlist(ctxp);
 515         /* User windows flushed already by switch_to(p) macro. */
 516         set_context(ctxp->ctx_number);
 517         sun4c_unload_context_from_tlb(ctxp->ctx_number);
 518         restore_flags(flags);
 519 }
 520 
 521 /* Low level IO area allocation on the Sun4c MMU.  This function is called
 522  * for each page of IO area you need.  Kernel code should not call this
 523  * routine directly, use sparc_alloc_io() instead.
 524  */
 525 void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 526                      int bus_type, int rdonly)
 527 {
 528         unsigned long page_entry;
 529 
 530         page_entry = ((physaddr >> PAGE_SHIFT) & 0xffff);
 531         page_entry |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_WRITE |
 532                        _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_IO);
 533         if(rdonly)
 534                 page_entry &= (~_SUN4C_PAGE_WRITE);
 535         sun4c_flush_page(virt_addr);
 536         put_pte(virt_addr, page_entry);
 537 }
 538 
 539 /* These routines are used to lock down and unlock data transfer
 540  * areas in the sun4c tlb.  If the pages need to be uncached the
 541  * caller must do that himself.
 542  */
 543 inline char *sun4c_lockarea(char *vaddr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 544 {
 545         unsigned long flags;
 546         unsigned long orig_addr = (unsigned long) vaddr;
 547         unsigned long first_seg = (orig_addr & SUN4C_REAL_PGDIR_MASK);
 548         unsigned long last_seg = ((orig_addr + size) & SUN4C_REAL_PGDIR_MASK);
 549 
 550         save_flags(flags); cli();
 551         for(; first_seg <= last_seg; first_seg += SUN4C_REAL_PGDIR_SIZE)
 552                 sun4c_lock_tlb_entry(first_seg);
 553 
 554         restore_flags(flags);
 555         return vaddr;
 556 }
 557 
 558 /* Note that when calling unlockarea you pass as 'vaddr' the address that
 559  * was returned to you by lockarea for this pool above.
 560  */
 561 inline void sun4c_unlockarea(char *vaddr, unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 562 {
 563         unsigned long flags;
 564         unsigned long orig_addr = (unsigned long) vaddr;
 565         unsigned long first_seg = (orig_addr & SUN4C_REAL_PGDIR_MASK);
 566         unsigned long last_seg = ((orig_addr + size) & SUN4C_REAL_PGDIR_MASK);
 567 
 568         save_flags(flags); cli();
 569         for(; first_seg <= last_seg; first_seg += SUN4C_REAL_PGDIR_SIZE)
 570                 sun4c_unlock_tlb_entry(first_seg);
 571 
 572         restore_flags(flags);
 573 }
 574 
 575 /* Getting and Releasing scsi dvma buffers. */
 576 char *sun4c_get_scsi_buffer(char *bufptr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 577 {
 578         unsigned long first_page = ((unsigned long) bufptr) & PAGE_MASK;
 579         unsigned long last_page = (((unsigned long) bufptr) + len) & PAGE_MASK;
 580 
 581         /* First lock down the area. */
 582         bufptr = sun4c_lockarea(bufptr, len);
 583 
 584         /* Uncache and flush all the pages. */
 585         for(; first_page <= last_page; first_page += PAGE_SIZE) {
 586                 sun4c_flush_page(first_page);
 587                 put_pte(first_page, get_pte(first_page) | PTE_NC);
 588         }
 589         return bufptr;
 590 }
 591 
 592 void sun4c_release_scsi_buffer(char *bufptr, unsigned long len)
     /* [previous][next][first][last][top][bottom][index][help] */
 593 {
 594         unsigned long first_page = ((unsigned long) bufptr) & PAGE_MASK;
 595         unsigned long last_page = (((unsigned long) bufptr) + len) & PAGE_MASK;
 596 
 597 
 598         /* Recache all the pages. */
 599         for(; first_page <= last_page; first_page += PAGE_SIZE)
 600                 put_pte(first_page, get_pte(first_page) & ~PTE_NC);
 601 
 602         sun4c_unlockarea(bufptr, len);
 603 }
 604 
 605 /* Code to fill the sun4c tlb during a fault.  Plus fault helper routine. */
 606 int sun4c_get_fault_info(unsigned long *address, unsigned long *error_code,
     /* [previous][next][first][last][top][bottom][index][help] */
 607                          unsigned long from_user)
 608 {
 609         unsigned long faddr, fstatus, new_code;
 610 
 611         faddr = sun4c_get_synchronous_address();
 612         *address = faddr;
 613         if(faddr >= 0x20000000 && faddr < 0xe0000000) {
 614                 printk("SUN4C: Fault in vm hole at %08lx\n", faddr);
 615                 *error_code = from_user;
 616                 return 1;
 617         }
 618         fstatus = sun4c_get_synchronous_error();
 619         if(fstatus & SUN4C_SYNC_BOLIXED)
 620                 panic("SUN4C: Unrecoverable fault type.");
 621         new_code = 0;
 622         if(fstatus & SUN4C_SYNC_PROT)
 623                 new_code |= FAULT_CODE_PROT;
 624         if(fstatus & SUN4C_SYNC_BADWRITE)
 625                 new_code |= FAULT_CODE_WRITE;
 626         *error_code = (new_code | from_user);
 627         return 0;
 628 }
 629 
 630 static inline void sun4c_alloc_pseg(unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 631 {
 632         struct pseg_list *psegp;
 633         unsigned char cur_ctx = get_context();
 634         int kernel_address = (address >= KERNBASE);
 635         int user_address = !kernel_address;
 636 
 637         psegp = s4cpseg_free.next;
 638         if(psegp != &s4cpseg_free) {
 639                 remove_pseg_list(psegp);
 640                 add_to_used_pseg_list(psegp);
 641                 if(user_address)
 642                         add_pseg_ctxlist(psegp, cur_ctx);
 643                 psegp->vaddr = address;
 644                 psegp->context = cur_ctx;
 645                 /* No cache flush needed */
 646                 if(kernel_address)
 647                         sun4c_distribute_kernel_mapping(address, psegp->pseg);
 648                 else
 649                         put_segmap(address, psegp->pseg);
 650                 return;
 651         }
 652         psegp = s4cpseg_used.prev; /* Take last used list entry. */
 653         if(psegp == &s4cpseg_used)
 654                 panic("Sun4c psegs have disappeared...");
 655         if(psegp->vaddr >= KERNBASE) {
 656                 sun4c_delete_kernel_mapping(psegp->vaddr);
 657         } else {
 658                 flush_user_windows();
 659                 set_context(psegp->context);
 660                 sun4c_flush_segment(psegp->vaddr);
 661                 put_segmap(psegp->vaddr, invalid_segment);
 662                 set_context(cur_ctx);
 663         }
 664         remove_pseg_list(psegp);
 665         if(psegp->vaddr < KERNBASE)
 666                 remove_pseg_ctxlist(psegp, psegp->context);
 667         psegp->vaddr = address;
 668         psegp->context = cur_ctx;
 669         if(kernel_address)
 670                 sun4c_distribute_kernel_mapping(address, psegp->pseg);
 671         else
 672                 put_segmap(address, psegp->pseg);
 673         add_to_used_pseg_list(psegp);
 674         if(user_address)
 675                 add_pseg_ctxlist(psegp, cur_ctx);
 676 }
 677 
 678 /*
 679  * handle_mm_fault() gets here so that we can update our 'view'
 680  * of a new address translation.  A lot of the time, mappings
 681  * don't change and we are just 'working the tlb cache'.
 682  */
 683 void sun4c_update_mmu_cache(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 684                             unsigned long address, pte_t pte)
 685 {
 686         unsigned long flags, segmap, segaddr, clean;
 687 
 688         save_flags(flags); cli();
 689         address &= PAGE_MASK;
 690         segaddr = address & SUN4C_REAL_PGDIR_MASK;
 691         segmap = get_segmap(segaddr);
 692         if(segmap == invalid_segment) {
 693                 sun4c_alloc_pseg(segaddr);
 694                 /* XXX make segmap freeing routines do this. XXX */
 695                 for(clean = segaddr; clean < (segaddr + SUN4C_REAL_PGDIR_SIZE);
 696                     clean += PAGE_SIZE)
 697                         put_pte(clean, 0);
 698         }
 699 
 700         /* If this is a user fault, only load the one pte so that
 701          * the kernel's ref/mod bits accurately reflect what is
 702          * in the tlb.  handle_pte_fault() causes this to work.
 703          */
 704         if(address < TASK_SIZE)
 705                 put_pte(address, pte_val(pte));
 706         else {
 707                 /* We have a kernel fault here, load entire segment. */
 708                 pgd_t *pgdp;
 709                 pte_t *ptable;
 710                 int pnum = 64;
 711 
 712                 pgdp = sun4c_pgd_offset(&init_mm, segaddr);
 713                 ptable = sun4c_pte_offset((pmd_t *)pgdp, segaddr);
 714                 while(pnum--) {
 715                         put_pte(segaddr, pte_val(*ptable++));
 716                         segaddr += PAGE_SIZE;
 717                 };
 718         }
 719         restore_flags(flags);
 720 }
 721 
 722 /* Paging initialization on the Sun4c. */
 723 static inline void sun4c_free_all_nonlocked_psegs(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 724 {
 725         struct pseg_list *plp;
 726         int i;
 727 
 728         for(i=0; i < invalid_segment; i++)
 729                 if(!s4cpseg_pool[i].hardlock)
 730                         add_to_free_pseg_list(&s4cpseg_pool[i]);
 731         /* Now for every free pseg, make all the ptes invalid. */
 732         plp = s4cpseg_free.next;
 733         while(plp != &s4cpseg_free) {
 734                 put_segmap(0x0, plp->pseg);
 735                 for(i=0; i<64; i++)
 736                         put_pte((i * PAGE_SIZE), 0x0);
 737                 plp = plp->next;
 738         }
 739         put_segmap(0x0, invalid_segment);
 740 }
 741 
 742 static inline struct pseg_list *sun4c_alloc_pseg_from_free_list(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 743 {
 744         struct pseg_list *psegp;
 745 
 746         psegp = s4cpseg_free.next;
 747         if(psegp != &s4cpseg_free) {
 748                 remove_pseg_list(psegp);
 749                 return psegp;
 750         }
 751         return 0;
 752 }
 753 
 754 static inline void sun4c_init_lock_area(unsigned long start_addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 755                                         unsigned long end_addr)
 756 {
 757         struct pseg_list *psegp;
 758         unsigned long a;
 759         int ctx;
 760 
 761         for(a = start_addr; a < end_addr; a += SUN4C_REAL_PGDIR_SIZE) {
 762                 psegp = sun4c_alloc_pseg_from_free_list();
 763                 if(!psegp) {
 764                         prom_printf("whoops...");
 765                         prom_halt();
 766                 }
 767                 for(ctx=0;ctx<num_contexts;ctx++)
 768                         prom_putsegment(ctx,a,psegp->pseg);
 769                 add_to_locked_pseg_list(psegp);
 770                 psegp->hardlock = 1;
 771         }
 772 }
 773 
 774 static inline void sun4c_check_for_ss2_cache_bug(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776         extern unsigned long start;
 777 
 778         /* Well we've now got a problem, on the SS2 a cache bug
 779          * causes line entries to get severely corrupted if the
 780          * trap table is able to be cached.  A sane and simple
 781          * workaround, at least for now, is to mark the trap
 782          * table page as uncacheable.
 783          *
 784          * XXX Investigate other possible workarounds and see
 785          * XXX if they help performance enough to warrant using
 786          * XXX them.                      -- 8/6/95 davem
 787          */
 788         if(idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) {
 789                 /* Whee.. */
 790                 printk("SS2 cache bug detected, uncaching trap table page\n");
 791                 sun4c_flush_page((unsigned int) &start);
 792                 put_pte(((unsigned long) &start),
 793                         (get_pte((unsigned long) &start) | PTE_NC));
 794         }
 795 }
 796 
 797 extern unsigned long free_area_init(unsigned long, unsigned long);
 798 
 799 /* Whee, this is now *ultra* clean and more managable */
 800 extern unsigned long end;
 801 extern void probe_mmu(void);
 802 
 803 unsigned long sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 804 {
 805         unsigned long addr, vaddr, kern_begin, kern_end;
 806         unsigned long prom_begin, prom_end, kadb_begin;
 807         pgd_t *pgdp;
 808         pte_t *pg_table;
 809         int phys_seg, i, ctx;
 810 
 811         start_mem = PAGE_ALIGN(start_mem);
 812 
 813         probe_mmu();
 814         invalid_segment = (num_segmaps - 1);
 815         sun4c_init_pseg_lists();
 816         for(kern_begin = KERNBASE;
 817             kern_begin < (unsigned long) &end;
 818             kern_begin += SUN4C_REAL_PGDIR_SIZE) {
 819                 unsigned char pseg = get_segmap(kern_begin);
 820 
 821                 s4cpseg_pool[pseg].hardlock=1;
 822                 for(ctx=0; ctx<num_contexts;ctx++)
 823                         prom_putsegment(ctx,kern_begin,pseg);
 824         }
 825         for(kern_begin = SUN4C_REAL_PGDIR_ALIGN((unsigned long) &end);
 826             kern_begin < KADB_DEBUGGER_BEGVM;
 827             kern_begin += SUN4C_REAL_PGDIR_SIZE)
 828                 for(ctx=0; ctx<num_contexts;ctx++)
 829                         prom_putsegment(ctx, kern_begin, invalid_segment);
 830         for(prom_begin = KADB_DEBUGGER_BEGVM;
 831             prom_begin < LINUX_OPPROM_ENDVM;
 832             prom_begin += SUN4C_REAL_PGDIR_SIZE) {
 833                 unsigned long pseg = get_segmap(prom_begin);
 834 
 835                 if(pseg != invalid_segment) {
 836                         s4cpseg_pool[pseg].hardlock=1;
 837                         for(ctx=0; ctx<num_contexts; ctx++)
 838                                 prom_putsegment(ctx,prom_begin,pseg);
 839                 }
 840         }
 841         /* Clean the MMU of excess garbage... */
 842         for(ctx=0; ctx<num_contexts;ctx++) {
 843                 set_context(ctx);
 844                 for(vaddr = 0; vaddr < 0x20000000;
 845                     vaddr += SUN4C_REAL_PGDIR_SIZE)
 846                         put_segmap(vaddr,invalid_segment);
 847                 for(vaddr = 0xe0000000; vaddr < KERNBASE;
 848                     vaddr += SUN4C_REAL_PGDIR_SIZE)
 849                         put_segmap(vaddr,invalid_segment);
 850                 for(vaddr = LINUX_OPPROM_ENDVM; vaddr != 0;
 851                     vaddr += SUN4C_REAL_PGDIR_SIZE)
 852                         put_segmap(vaddr,invalid_segment);
 853         }
 854         set_context(0);
 855         sun4c_free_all_nonlocked_psegs();
 856         /* Lock I/O and DVMA areas for the system. */
 857         sun4c_init_lock_area(IOBASE_VADDR, IOBASE_END);
 858         sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
 859         /* Zero out swapper_pg_dir and pg0 */
 860         memset(swapper_pg_dir, 0, PAGE_SIZE);
 861         memset(pg0, 0, PAGE_SIZE);
 862         /* This makes us Solaris boot-loader 'safe' */
 863         pgd_val(swapper_pg_dir[KERNBASE>>SUN4C_PGDIR_SHIFT]) =
 864                 PGD_TABLE | (unsigned long) pg0;
 865 
 866         /* Initialize swapper_pg_dir to map the kernel
 867          * addresses in high memory.  Note that as soon as we get past
 868          * the 4MB lower mapping and start using dynamic memory from
 869          * start_mem we can start faulting and this is ok since our
 870          * pseg free list and the lower 4MB of the kernel is mapped
 871          * properly in the software page tables.
 872          */
 873         pgdp = swapper_pg_dir;
 874         kern_end = PAGE_ALIGN(end_mem);
 875         kern_begin = KERNBASE;
 876         while(kern_begin < kern_end) {
 877                 unsigned long pte, tmp;
 878 
 879                 /* We only need _one_ mapping, the high address one. */
 880                 pg_table = (pte_t *) (PAGE_MASK & pgd_val(pgdp[KERNBASE>>SUN4C_PGDIR_SHIFT]));
 881                 if(!pg_table) {
 882                         pg_table = (pte_t *) start_mem;
 883                         start_mem += PAGE_SIZE;
 884                 }
 885                 pgd_val(pgdp[KERNBASE>>SUN4C_PGDIR_SHIFT]) =
 886                         PGD_TABLE | (unsigned long) pg_table;
 887                 pgdp++;
 888                 for(tmp = 0; tmp < SUN4C_PTRS_PER_PTE; tmp++, pg_table++) {
 889                         if(kern_begin < kern_end)
 890                                 sun4c_set_pte(pg_table,
 891                                               mk_pte(kern_begin,
 892                                                      SUN4C_PAGE_SHARED));
 893                         else
 894                                 sun4c_pte_clear(pg_table);
 895                         pte = get_pte(kern_begin);
 896                         if(pte & _SUN4C_PAGE_VALID) {
 897                                 pte &= ~(_SUN4C_PAGE_NOCACHE);
 898                                 pte |= (_SUN4C_PAGE_PRIV | _SUN4C_PAGE_WRITE |
 899                                         _SUN4C_PAGE_REF | _SUN4C_PAGE_DIRTY);
 900                                 put_pte(kern_begin, pte);
 901                         }
 902                         kern_begin += PAGE_SIZE;
 903                 }
 904         }
 905         sun4c_check_for_ss2_cache_bug();
 906         /* Fix kadb/prom permissions. */
 907         kadb_begin = KADB_DEBUGGER_BEGVM;
 908         prom_end = LINUX_OPPROM_ENDVM;
 909         for(; kadb_begin < prom_end; kadb_begin += PAGE_SIZE) {
 910                 unsigned long pte = get_pte(kadb_begin);
 911                 if(pte & _SUN4C_PAGE_VALID)
 912                         put_pte(kadb_begin, (pte | _SUN4C_PAGE_PRIV));
 913         }
 914         /* Allocate the DVMA pages */
 915         addr = DVMA_VADDR;
 916         start_mem = PAGE_ALIGN(start_mem);
 917         while(addr < DVMA_END) {
 918                 unsigned long dvmapte = start_mem - PAGE_OFFSET;
 919 
 920                 start_mem += PAGE_SIZE;
 921                 dvmapte = ((dvmapte>>PAGE_SHIFT) & 0xffff);
 922                 dvmapte |= (_SUN4C_PAGE_VALID |
 923                             _SUN4C_PAGE_WRITE |
 924                             _SUN4C_PAGE_NOCACHE);
 925                 put_pte(addr, dvmapte);
 926                 addr += PAGE_SIZE;
 927         }
 928         /* Tell the user our allocations */
 929         for(phys_seg=0, i=0; i<=invalid_segment; i++)
 930                 if(s4cpseg_pool[i].hardlock)
 931                         phys_seg++;
 932         printk("SUN4C: Hard locked %d boot-up psegs\n", phys_seg);
 933         /* Init the context pool and lists */
 934         ctx_list_pool = (struct ctx_list *) start_mem;
 935         start_mem += (num_contexts * sizeof(struct ctx_list));
 936         for(ctx = 0; ctx < num_contexts; ctx++) {
 937                 struct ctx_list *clist;
 938 
 939                 clist = (ctx_list_pool + ctx);
 940                 clist->ctx_number = ctx;
 941                 clist->ctx_task = 0;
 942         }
 943         ctx_free.next = ctx_free.prev = &ctx_free;
 944         ctx_used.next = ctx_used.prev = &ctx_used;
 945         for(ctx = 0; ctx < num_contexts; ctx++)
 946                 add_to_free_ctxlist(ctx_list_pool + ctx);
 947         start_mem = PAGE_ALIGN(start_mem);
 948         start_mem = free_area_init(start_mem, end_mem);
 949         start_mem = PAGE_ALIGN(start_mem);
 950         return start_mem;
 951 }
 952 
 953 /* Test the WP bit on the sun4c. */
 954 void sun4c_test_wp(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 955 {
 956         wp_works_ok = -1;
 957 
 958         /* Let it rip... */
 959         put_pte((unsigned long) 0x0, (PTE_V | PTE_P));
 960         __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
 961         put_pte((unsigned long) 0x0, 0x0);
 962         if (wp_works_ok < 0)
 963                 wp_works_ok = 0;
 964 }
 965 
 966 void sun4c_lock_entire_kernel(unsigned long start_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 967 {
 968         unsigned long addr = (unsigned long) &end;
 969 
 970         addr = (addr & SUN4C_REAL_PGDIR_MASK);
 971         start_mem = SUN4C_REAL_PGDIR_ALIGN(start_mem);
 972         while(addr < start_mem) {
 973                 int pseg;
 974 
 975                 sun4c_lock_tlb_entry(addr);
 976                 pseg = get_segmap(addr);
 977                 if(!s4cpseg_pool[pseg].hardlock) {
 978                         s4cpseg_pool[pseg].hardlock = 1;
 979                         remove_pseg_list(&s4cpseg_pool[pseg]);
 980                 }
 981                 addr += SUN4C_REAL_PGDIR_SIZE;
 982         }
 983 }
 984 
 985 static void sun4c_fork_hook(void *vtask, unsigned long kthread_usp)
     /* [previous][next][first][last][top][bottom][index][help] */
 986 {
 987         struct task_struct *new_task = vtask;
 988 
 989         /* These pages must not cause a fault when traps
 990          * are off (such as in a window spill/fill) so
 991          * lock them down for the life of the task.
 992          */
 993         sun4c_lock_tlb_entry((unsigned long) new_task);
 994         sun4c_lock_tlb_entry(new_task->kernel_stack_page);
 995         if(kthread_usp)
 996                 sun4c_lock_tlb_entry(kthread_usp);
 997 }
 998 
 999 static void sun4c_release_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
1000 {
1001         struct task_struct *old_task = vtask;
1002         struct ctx_list *ctx_old;
1003         struct pt_regs *regs;
1004         unsigned char this_ctx = get_context();
1005         unsigned long flags;
1006 
1007         save_flags(flags); cli();
1008         if(old_task == &init_task)
1009                 panic("AIEEE releasing swapper");
1010         if(old_task->tss.context != -1) {
1011 
1012                 /* Clear from the mmu, all notions of this dead task. */
1013                 flush_user_windows();
1014                 set_context(old_task->tss.context);
1015                 sun4c_unload_context_from_tlb(old_task->tss.context);
1016                 set_context(this_ctx);
1017 
1018                 ctx_old = ctx_list_pool + old_task->tss.context;
1019                 remove_from_ctx_list(ctx_old);
1020                 add_to_free_ctxlist(ctx_old);
1021                 old_task->tss.context = -1;
1022         }
1023         regs = (struct pt_regs *) 
1024                 (((old_task->tss.ksp & ~0xfff)) + (0x1000 - TRACEREG_SZ));
1025         if(regs->u_regs[UREG_FP] > KERNBASE)
1026                 sun4c_unlock_tlb_entry(regs->u_regs[UREG_FP] & PAGE_MASK);
1027         sun4c_unlock_tlb_entry(old_task->kernel_stack_page);
1028         sun4c_unlock_tlb_entry((unsigned long) old_task);
1029         restore_flags(flags);
1030         /* bye bye... */
1031 }
1032 
1033 static void sun4c_flush_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
1034 {
1035         struct task_struct *dead_task = vtask;
1036 
1037         if(dead_task->tss.context != -1)
1038                 sun4c_flush_context();
1039 }
1040 
1041 static void sun4c_task_cacheflush(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
1042 {
1043         struct task_struct *flush_task = vtask;
1044 
1045         if(flush_task->tss.context != -1)
1046                 sun4c_flush_context();
1047 }
1048 
1049 static void sun4c_exit_hook(void *vtask)
     /* [previous][next][first][last][top][bottom][index][help] */
1050 {
1051 }
1052 
1053 /* Load up routines and constants for sun4c mmu */
1054 void ld_mmu_sun4c(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1055 {
1056         printk("Loading sun4c MMU routines\n");
1057 
1058         /* First the constants */
1059         pmd_shift = SUN4C_PMD_SHIFT;
1060         pmd_size = SUN4C_PMD_SIZE;
1061         pmd_mask = SUN4C_PMD_MASK;
1062         pgdir_shift = SUN4C_PGDIR_SHIFT;
1063         pgdir_size = SUN4C_PGDIR_SIZE;
1064         pgdir_mask = SUN4C_PGDIR_MASK;
1065 
1066         ptrs_per_pte = SUN4C_PTRS_PER_PTE;
1067         ptrs_per_pmd = SUN4C_PTRS_PER_PMD;
1068         ptrs_per_pgd = SUN4C_PTRS_PER_PGD;
1069 
1070         page_none = SUN4C_PAGE_NONE;
1071         page_shared = SUN4C_PAGE_SHARED;
1072         page_copy = SUN4C_PAGE_COPY;
1073         page_readonly = SUN4C_PAGE_READONLY;
1074         page_kernel = SUN4C_PAGE_KERNEL;
1075         page_invalid = SUN4C_PAGE_INVALID;
1076         
1077         /* Functions */
1078         invalidate = sun4c_invalidate;
1079         set_pte = sun4c_set_pte;
1080         switch_to_context = sun4c_switch_to_context;
1081         pmd_align = sun4c_pmd_align;
1082         pgdir_align = sun4c_pgdir_align;
1083         vmalloc_start = sun4c_vmalloc_start;
1084 
1085         pte_page = sun4c_pte_page;
1086         pmd_page = sun4c_pmd_page;
1087 
1088         sparc_update_rootmmu_dir = sun4c_update_rootmmu_dir;
1089 
1090         pte_none = sun4c_pte_none;
1091         pte_present = sun4c_pte_present;
1092         pte_inuse = sun4c_pte_inuse;
1093         pte_clear = sun4c_pte_clear;
1094         pte_reuse = sun4c_pte_reuse;
1095 
1096         pmd_none = sun4c_pmd_none;
1097         pmd_bad = sun4c_pmd_bad;
1098         pmd_present = sun4c_pmd_present;
1099         pmd_inuse = sun4c_pmd_inuse;
1100         pmd_clear = sun4c_pmd_clear;
1101         pmd_reuse = sun4c_pmd_reuse;
1102 
1103         pgd_none = sun4c_pgd_none;
1104         pgd_bad = sun4c_pgd_bad;
1105         pgd_present = sun4c_pgd_present;
1106         pgd_inuse = sun4c_pgd_inuse;
1107         pgd_clear = sun4c_pgd_clear;
1108 
1109         mk_pte = sun4c_mk_pte;
1110         pte_modify = sun4c_pte_modify;
1111         pgd_offset = sun4c_pgd_offset;
1112         pmd_offset = sun4c_pmd_offset;
1113         pte_offset = sun4c_pte_offset;
1114         pte_free_kernel = sun4c_pte_free_kernel;
1115         pmd_free_kernel = sun4c_pmd_free_kernel;
1116         pte_alloc_kernel = sun4c_pte_alloc_kernel;
1117         pmd_alloc_kernel = sun4c_pmd_alloc_kernel;
1118         pte_free = sun4c_pte_free;
1119         pte_alloc = sun4c_pte_alloc;
1120         pmd_free = sun4c_pmd_free;
1121         pmd_alloc = sun4c_pmd_alloc;
1122         pgd_free = sun4c_pgd_free;
1123         pgd_alloc = sun4c_pgd_alloc;
1124 
1125         pte_read = sun4c_pte_read;
1126         pte_write = sun4c_pte_write;
1127         pte_exec = sun4c_pte_exec;
1128         pte_dirty = sun4c_pte_dirty;
1129         pte_young = sun4c_pte_young;
1130         pte_cow = sun4c_pte_cow;
1131         pte_wrprotect = sun4c_pte_wrprotect;
1132         pte_rdprotect = sun4c_pte_rdprotect;
1133         pte_exprotect = sun4c_pte_exprotect;
1134         pte_mkclean = sun4c_pte_mkclean;
1135         pte_mkold = sun4c_pte_mkold;
1136         pte_uncow = sun4c_pte_uncow;
1137         pte_mkwrite = sun4c_pte_mkwrite;
1138         pte_mkread = sun4c_pte_mkread;
1139         pte_mkexec = sun4c_pte_mkexec;
1140         pte_mkdirty = sun4c_pte_mkdirty;
1141         pte_mkyoung = sun4c_pte_mkyoung;
1142         pte_mkcow = sun4c_pte_mkcow;
1143         get_fault_info = sun4c_get_fault_info;
1144         update_mmu_cache = sun4c_update_mmu_cache;
1145         mmu_exit_hook = sun4c_exit_hook;
1146         mmu_fork_hook = sun4c_fork_hook;
1147         mmu_release_hook = sun4c_release_hook;
1148         mmu_flush_hook = sun4c_flush_hook;
1149         mmu_task_cacheflush = sun4c_task_cacheflush;
1150         mmu_lockarea = sun4c_lockarea;
1151         mmu_unlockarea = sun4c_unlockarea;
1152         mmu_get_scsi_buffer = sun4c_get_scsi_buffer;
1153         mmu_release_scsi_buffer = sun4c_release_scsi_buffer;
1154 
1155         /* These should _never_ get called with two level tables. */
1156         pgd_set = 0;
1157         pgd_reuse = 0;
1158         pgd_page = 0;
1159 }

/* [previous][next][first][last][top][bottom][index][help] */