root/arch/sparc/kernel/etrap.S

/* [previous][next][first][last][top][bottom][index][help] */
   1 /* $Id: etrap.S,v 1.16 1996/02/20 07:45:01 davem Exp $
   2  * etrap.S: Sparc trap window preparation for entry into the
   3  *          Linux kernel.
   4  *
   5  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
   6  */
   7 
   8 #include <asm/cprefix.h>
   9 #include <asm/head.h>
  10 #include <asm/asi.h>
  11 #include <asm/contregs.h>
  12 #include <asm/page.h>
  13 #include <asm/psr.h>
  14 #include <asm/ptrace.h>
  15 #include <asm/winmacro.h>
  16 
  17 /* Registers to not touch at all. */
  18 #define t_psr        l0 /* Set by caller */
  19 #define t_pc         l1 /* Set by caller */
  20 #define t_npc        l2 /* Set by caller */
  21 #define t_wim        l3 /* Set by caller */
  22 #define t_twinmask   l4 /* Set at beginning of this entry routine. */
  23 #define t_kstack     l5 /* Set right before pt_regs frame is built */
  24 #define t_retpc      l6 /* If you change this, change winmacro.h header file */
  25 #define t_systable   l7 /* Never touch this, could be the syscall table ptr. */
  26 #define curptr       g4 /* Set after pt_regs frame is built */
  27 
  28         .text
  29         .align 4
  30 
  31         /* SEVEN WINDOW PATCH INSTRUCTIONS */
  32         .globl  tsetup_7win_patch1, tsetup_7win_patch2
  33         .globl  tsetup_7win_patch3, tsetup_7win_patch4
  34         .globl  tsetup_7win_patch5, tsetup_7win_patch6
  35 tsetup_7win_patch1:     sll     %t_wim, 0x6, %t_wim
  36 tsetup_7win_patch2:     and     %g2, 0x7f, %g2
  37 tsetup_7win_patch3:     and     %g2, 0x7f, %g2
  38 tsetup_7win_patch4:     and     %g1, 0x7f, %g1
  39 tsetup_7win_patch5:     sll     %t_wim, 0x6, %t_wim
  40 tsetup_7win_patch6:     and     %g2, 0x7f, %g2
  41         /* END OF PATCH INSTRUCTIONS */
  42 
  43         /* At trap time, interrupts and all generic traps do the
  44          * following:
  45          *
  46          * rd   %psr, %l0
  47          * b    some_handler
  48          * rd   %wim, %l3
  49          * nop
  50          *
  51          * Then 'some_handler' if it needs a trap frame (ie. it has
  52          * to call c-code and the trap cannot be handled in-window)
  53          * then it does the SAVE_ALL macro in entry.S which does
  54          *
  55          * sethi        %hi(trap_setup), %l4
  56          * jmpl         %l4 + %lo(trap_setup), %l6
  57          * mov          1, %l4
  58          */
  59 
  60         /* 2 3 4  window number
  61          * -----
  62          * O T S  mnemonic
  63          *
  64          * O == Current window before trap
  65          * T == Window entered when trap occurred
  66          * S == Window we will need to save if (1<<T) == %wim
  67          *
  68          * Before execution gets here, it must be guarenteed that
  69          * %l0 contains trap time %psr, %l1 and %l2 contain the
  70          * trap pc and npc, and %l3 contains the trap time %wim.
  71          */
  72 
  73         .globl  trap_setup, tsetup_patch1, tsetup_patch2
  74         .globl  tsetup_patch3, tsetup_patch4
  75         .globl  tsetup_patch5, tsetup_patch6
  76 trap_setup:
  77         /* Calculate mask of trap window.  See if from user
  78          * or kernel and branch conditionally.
  79          */
  80         mov     1, %t_twinmask
  81         sll     %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
  82         andcc   %t_psr, PSR_PS, %g0              ! fromsupv_p = (psr & PSR_PS)
  83         be      trap_setup_from_user             ! nope, from user mode
  84          nop
  85 
  86         /* From kernel, allocate more kernel stack and
  87          * build a pt_regs trap frame.
  88          */
  89         sub     %fp, (REGWIN_SZ + TRACEREG_SZ), %t_kstack
  90         STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
  91 
  92         /* See if we are in the trap window. */
  93         andcc   %t_twinmask, %t_wim, %g0
  94         be      1f
  95          nop
  96 
  97         b       trap_setup_kernel_spill         ! in trap window, clean up
  98          nop
  99 
 100         /* Trap from kernel with a window available.
 101          * Just do it...
 102          */
 103 1:
 104         jmpl    %t_retpc + 0x8, %g0     ! return to caller
 105          mov    %t_kstack, %sp          ! jump onto new stack
 106 
 107 trap_setup_kernel_spill:
 108         LOAD_CURRENT(curptr, g1)
 109         ld      [%curptr + THREAD_UMASK], %g1
 110         orcc    %g0, %g1, %g0
 111         bne     trap_setup_user_spill   ! there are some user windows, yuck
 112          nop
 113 
 114                 /* Spill from kernel, but only kernel windows, adjust
 115                  * %wim and go.
 116                  */
 117                 srl     %t_wim, 0x1, %g2        ! begin computation of new %wim
 118 tsetup_patch1:  sll     %t_wim, 0x7, %t_wim     ! patched on 7 window Sparcs
 119                 or      %t_wim, %g2, %g2
 120 tsetup_patch2:  and     %g2, 0xff, %g2          ! patched on 7 window Sparcs
 121 
 122         save    %g0, %g0, %g0
 123 
 124         /* Set new %wim value */
 125         wr      %g2, 0x0, %wim
 126         WRITE_PAUSE
 127 
 128         /* Save the kernel window onto the corresponding stack. */
 129         STORE_WINDOW(sp)
 130 
 131         restore %g0, %g0, %g0
 132 
 133         jmpl    %t_retpc + 0x8, %g0     ! return to caller
 134          mov    %t_kstack, %sp          ! and onto new kernel stack
 135 
 136 trap_setup_from_user:
 137         /* We can't use %curptr yet. */
 138         LOAD_CURRENT(t_kstack, t_twinmask)
 139         mov     1, %t_twinmask
 140         ld      [%t_kstack + TASK_SAVED_KSTACK], %t_kstack
 141         sll     %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
 142 
 143         /* Build pt_regs frame. */
 144         STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
 145 
 146         /* Clear current->tss.w_saved */
 147         LOAD_CURRENT(curptr, g1)
 148         st      %g0, [%curptr + THREAD_W_SAVED]
 149 
 150         /* See if we are in the trap window. */
 151         andcc   %t_twinmask, %t_wim, %g0
 152         bne     trap_setup_user_spill           ! yep we are
 153          orn    %g0, %t_twinmask, %g1           ! negate trap win mask into %g1
 154 
 155                 /* Trap from user, but not into the invalid window.
 156                  * Calculate new umask.  The way this works is,
 157                  * any window from the %wim at trap time until
 158                  * the window right before the one we are in now,
 159                  * is a user window.  A diagram:
 160                  *
 161                  *      7 6 5 4 3 2 1 0    window number
 162                  *      ---------------
 163                  *        I     L T        mnemonic
 164                  *
 165                  * Window 'I' is the invalid window in our example,
 166                  * window 'L' is the window the user was in when
 167                  * the trap occurred, window T is the trap window
 168                  * we are in now.  So therefore, windows 5, 4 and
 169                  * 3 are user windows.  The following sequence
 170                  * computes the user winmask to represent this.
 171                  */
 172                 subcc   %t_wim, %t_twinmask, %g2
 173                 bneg,a  1f
 174                  sub    %g2, 0x1, %g2
 175 1:
 176                 andn    %g2, %t_twinmask, %g2
 177 tsetup_patch3:  and     %g2, 0xff, %g2                  ! patched on 7win Sparcs
 178                 st      %g2, [%curptr + THREAD_UMASK]   ! store new umask
 179 
 180                 jmpl    %t_retpc + 0x8, %g0             ! return to caller
 181                  mov    %t_kstack, %sp                  ! and onto kernel stack
 182 
 183 trap_setup_user_spill:
 184                 /* A spill occured from either kernel or user mode
 185                  * and there exist some user windows to deal with.
 186                  * A mask of the currently valid user windows
 187                  * is in %g1 upon entry to here.
 188                  */
 189 
 190 tsetup_patch4:  and     %g1, 0xff, %g1          ! patched on 7win Sparcs, mask
 191                 srl     %t_wim, 0x1, %g2        ! compute new %wim
 192 tsetup_patch5:  sll     %t_wim, 0x7, %t_wim     ! patched on 7win Sparcs
 193                 or      %t_wim, %g2, %g2        ! %g2 is new %wim
 194 tsetup_patch6:  and     %g2, 0xff, %g2          ! patched on 7win Sparcs
 195                 andn    %g1, %g2, %g1           ! clear this bit in %g1
 196                 st      %g1, [%curptr + THREAD_UMASK]
 197 
 198         save    %g0, %g0, %g0
 199 
 200         wr      %g2, 0x0, %wim
 201         WRITE_PAUSE
 202 
 203         /* Call MMU-architecture dependant stack checking
 204          * routine.
 205          */
 206         .globl  C_LABEL(tsetup_mmu_patchme)
 207 C_LABEL(tsetup_mmu_patchme):    b       C_LABEL(tsetup_sun4c_stackchk)
 208                                  andcc  %sp, 0x7, %g0
 209 
 210 trap_setup_user_stack_is_bolixed:
 211         /* From user/kernel into invalid window w/bad user
 212          * stack. Save bad user stack, and return to caller.
 213          */
 214         SAVE_BOLIXED_USER_STACK(curptr, g3)
 215         restore %g0, %g0, %g0
 216 
 217         jmpl    %t_retpc + 0x8, %g0
 218          mov    %t_kstack, %sp
 219 
 220 trap_setup_good_ustack:
 221         STORE_WINDOW(sp)
 222 
 223 trap_setup_finish_up:
 224         restore %g0, %g0, %g0
 225 
 226         jmpl    %t_retpc + 0x8, %g0
 227          mov    %t_kstack, %sp
 228 
 229         /* Architecture specific stack checking routines.  When either
 230          * of these routines are called, the globals are free to use
 231          * as they have been safely stashed on the new kernel stack
 232          * pointer.  Thus the definition below for simplicity.
 233          */
 234 #define glob_tmp     g1
 235 
 236         .globl  C_LABEL(tsetup_sun4c_stackchk)
 237 C_LABEL(tsetup_sun4c_stackchk):
 238         /* Done by caller: andcc %sp, 0x7, %g0 */
 239         be      1f
 240          sra    %sp, 29, %glob_tmp
 241 
 242         b       trap_setup_user_stack_is_bolixed
 243          nop
 244 
 245 1:
 246         add     %glob_tmp, 0x1, %glob_tmp
 247         andncc  %glob_tmp, 0x1, %g0
 248         be      1f
 249          and    %sp, 0xfff, %glob_tmp           ! delay slot
 250 
 251         b       trap_setup_user_stack_is_bolixed
 252          nop
 253 
 254         /* See if our dump area will be on more than one
 255          * page.
 256          */
 257 1:
 258         add     %glob_tmp, 0x38, %glob_tmp
 259         andncc  %glob_tmp, 0xff8, %g0
 260         be      tsetup_sun4c_onepage            ! only one page to check
 261          lda    [%sp] ASI_PTE, %glob_tmp        ! have to check first page anyways
 262 
 263 tsetup_sun4c_twopages:
 264         /* Is first page ok permission wise? */
 265         srl     %glob_tmp, 29, %glob_tmp
 266         cmp     %glob_tmp, 0x6
 267         be      1f
 268          add    %sp, 0x38, %glob_tmp            /* Is second page in vma hole? */
 269 
 270         b       trap_setup_user_stack_is_bolixed
 271          nop
 272 
 273 1:
 274         sra     %glob_tmp, 29, %glob_tmp
 275         add     %glob_tmp, 0x1, %glob_tmp
 276         andncc  %glob_tmp, 0x1, %g0
 277         be      1f
 278          add    %sp, 0x38, %glob_tmp
 279 
 280         b       trap_setup_user_stack_is_bolixed
 281          nop
 282 
 283 1:
 284         lda     [%glob_tmp] ASI_PTE, %glob_tmp
 285 
 286 tsetup_sun4c_onepage:
 287         srl     %glob_tmp, 29, %glob_tmp
 288         cmp     %glob_tmp, 0x6                          ! can user write to it?
 289         be      trap_setup_good_ustack                  ! success
 290          nop
 291 
 292         b       trap_setup_user_stack_is_bolixed
 293          nop
 294 
 295         .globl  C_LABEL(tsetup_srmmu_stackchk)
 296 C_LABEL(tsetup_srmmu_stackchk):
 297         /* Check results of callers andcc %sp, 0x7, %g0 */
 298         bne     trap_setup_user_stack_is_bolixed
 299          sethi  %hi(KERNBASE), %glob_tmp
 300         cmp     %glob_tmp, %sp
 301         bleu    trap_setup_user_stack_is_bolixed
 302          nop
 303 
 304         /* Clear the fault status and turn on the no_fault bit. */
 305         lda     [%g0] ASI_M_MMUREGS, %glob_tmp          ! read MMU control
 306         or      %glob_tmp, 0x2, %glob_tmp               ! or in no_fault bit
 307         sta     %glob_tmp, [%g0] ASI_M_MMUREGS          ! set it
 308 
 309         /* Dump the registers and cross fingers. */
 310         STORE_WINDOW(sp)
 311 
 312         /* Clear the no_fault bit and check the status. */
 313         andn    %glob_tmp, 0x2, %glob_tmp
 314         sta     %glob_tmp, [%g0] ASI_M_MMUREGS
 315         mov     AC_M_SFAR, %glob_tmp
 316         lda     [%glob_tmp] ASI_M_MMUREGS, %g0
 317         mov     AC_M_SFSR, %glob_tmp
 318         lda     [%glob_tmp] ASI_M_MMUREGS, %glob_tmp    ! save away status of winstore
 319         andcc   %glob_tmp, 0x2, %g0                     ! did we fault?
 320         be      trap_setup_finish_up                    ! cool beans, success
 321          nop
 322 
 323         b       trap_setup_user_stack_is_bolixed        ! we faulted, ugh
 324          nop
 325 

/* [previous][next][first][last][top][bottom][index][help] */