root/arch/sparc/lib/mul.S

/* [previous][next][first][last][top][bottom][index][help] */
   1 /* mul.S:       This routine was taken from glibc-1.09 and is covered
   2  *              by the GNU Library General Public License Version 2.
   3  */
   4 
   5 /*
   6  * Signed multiply, from Appendix E of the Sparc Version 8
   7  * Architecture Manual.
   8  */
   9 
  10 /*
  11  * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
  12  * the 64-bit product).
  13  *
  14  * This code optimizes short (less than 13-bit) multiplies.
  15  */
  16 
  17         .globl .mul
  18 .mul:
  19         mov     %o0, %y         ! multiplier -> Y
  20         andncc  %o0, 0xfff, %g0 ! test bits 12..31
  21         be      Lmul_shortway   ! if zero, can do it the short way
  22         andcc   %g0, %g0, %o4   ! zero the partial product and clear N and V
  23 
  24         /*
  25          * Long multiply.  32 steps, followed by a final shift step.
  26          */
  27         mulscc  %o4, %o1, %o4   ! 1
  28         mulscc  %o4, %o1, %o4   ! 2
  29         mulscc  %o4, %o1, %o4   ! 3
  30         mulscc  %o4, %o1, %o4   ! 4
  31         mulscc  %o4, %o1, %o4   ! 5
  32         mulscc  %o4, %o1, %o4   ! 6
  33         mulscc  %o4, %o1, %o4   ! 7
  34         mulscc  %o4, %o1, %o4   ! 8
  35         mulscc  %o4, %o1, %o4   ! 9
  36         mulscc  %o4, %o1, %o4   ! 10
  37         mulscc  %o4, %o1, %o4   ! 11
  38         mulscc  %o4, %o1, %o4   ! 12
  39         mulscc  %o4, %o1, %o4   ! 13
  40         mulscc  %o4, %o1, %o4   ! 14
  41         mulscc  %o4, %o1, %o4   ! 15
  42         mulscc  %o4, %o1, %o4   ! 16
  43         mulscc  %o4, %o1, %o4   ! 17
  44         mulscc  %o4, %o1, %o4   ! 18
  45         mulscc  %o4, %o1, %o4   ! 19
  46         mulscc  %o4, %o1, %o4   ! 20
  47         mulscc  %o4, %o1, %o4   ! 21
  48         mulscc  %o4, %o1, %o4   ! 22
  49         mulscc  %o4, %o1, %o4   ! 23
  50         mulscc  %o4, %o1, %o4   ! 24
  51         mulscc  %o4, %o1, %o4   ! 25
  52         mulscc  %o4, %o1, %o4   ! 26
  53         mulscc  %o4, %o1, %o4   ! 27
  54         mulscc  %o4, %o1, %o4   ! 28
  55         mulscc  %o4, %o1, %o4   ! 29
  56         mulscc  %o4, %o1, %o4   ! 30
  57         mulscc  %o4, %o1, %o4   ! 31
  58         mulscc  %o4, %o1, %o4   ! 32
  59         mulscc  %o4, %g0, %o4   ! final shift
  60 
  61         ! If %o0 was negative, the result is
  62         !       (%o0 * %o1) + (%o1 << 32))
  63         ! We fix that here.
  64 
  65 #if 0
  66         tst     %o0
  67         bge     1f
  68         rd      %y, %o0
  69 
  70         ! %o0 was indeed negative; fix upper 32 bits of result by subtracting 
  71         ! %o1 (i.e., return %o4 - %o1 in %o1).
  72         retl
  73         sub     %o4, %o1, %o1
  74 
  75 1:
  76         retl
  77         mov     %o4, %o1
  78 #else
  79         /* Faster code adapted from tege@sics.se's code for umul.S.  */
  80         sra     %o0, 31, %o2    ! make mask from sign bit
  81         and     %o1, %o2, %o2   ! %o2 = 0 or %o1, depending on sign of %o0
  82         rd      %y, %o0         ! get lower half of product
  83         retl
  84         sub     %o4, %o2, %o1   ! subtract compensation 
  85                                 !  and put upper half in place
  86 #endif
  87 
  88 Lmul_shortway:
  89         /*
  90          * Short multiply.  12 steps, followed by a final shift step.
  91          * The resulting bits are off by 12 and (32-12) = 20 bit positions,
  92          * but there is no problem with %o0 being negative (unlike above).
  93          */
  94         mulscc  %o4, %o1, %o4   ! 1
  95         mulscc  %o4, %o1, %o4   ! 2
  96         mulscc  %o4, %o1, %o4   ! 3
  97         mulscc  %o4, %o1, %o4   ! 4
  98         mulscc  %o4, %o1, %o4   ! 5
  99         mulscc  %o4, %o1, %o4   ! 6
 100         mulscc  %o4, %o1, %o4   ! 7
 101         mulscc  %o4, %o1, %o4   ! 8
 102         mulscc  %o4, %o1, %o4   ! 9
 103         mulscc  %o4, %o1, %o4   ! 10
 104         mulscc  %o4, %o1, %o4   ! 11
 105         mulscc  %o4, %o1, %o4   ! 12
 106         mulscc  %o4, %g0, %o4   ! final shift
 107 
 108         /*
 109          *  %o4 has 20 of the bits that should be in the low part of the
 110          * result; %y has the bottom 12 (as %y's top 12).  That is:
 111          *
 112          *        %o4               %y
 113          * +----------------+----------------+
 114          * | -12- |   -20-  | -12- |   -20-  |
 115          * +------(---------+------)---------+
 116          *  --hi-- ----low-part----
 117          *
 118          * The upper 12 bits of %o4 should be sign-extended to form the
 119          * high part of the product (i.e., highpart = %o4 >> 20).
 120          */
 121 
 122         rd      %y, %o5
 123         sll     %o4, 12, %o0    ! shift middle bits left 12
 124         srl     %o5, 20, %o5    ! shift low bits right 20, zero fill at left
 125         or      %o5, %o0, %o0   ! construct low part of result
 126         retl
 127         sra     %o4, 20, %o1    ! ... and extract high part of result

/* [previous][next][first][last][top][bottom][index][help] */