root/arch/i386/math-emu/mul_Xsig.S

/* [previous][next][first][last][top][bottom][index][help] */
   1 /*---------------------------------------------------------------------------+
   2  |  mul_Xsig.S                                                               |
   3  |                                                                           |
   4  | Multiply a 12 byte fixed point number by another fixed point number.      |
   5  |                                                                           |
   6  | Copyright (C) 1992,1994                                                   |
   7  |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
   8  |                       Australia.  E-mail   billm@vaxc.cc.monash.edu.au    |
   9  |                                                                           |
  10  | Call from C as:                                                           |
  11  |   void mul32_Xsig(Xsig *x, unsigned b)                                    |
  12  |                                                                           |
  13  |   void mul64_Xsig(Xsig *x, unsigned long long *b)                         |
  14  |                                                                           |
  15  |   void mul_Xsig_Xsig(Xsig *x, unsigned *b)                                |
  16  |                                                                           |
  17  | The result is neither rounded nor normalized, and the ls bit or so may    |
  18  | be wrong.                                                                 |
  19  |                                                                           |
  20  +---------------------------------------------------------------------------*/
  21         .file   "mul_Xsig.S"
  22 
  23 
  24 #include "fpu_asm.h"
  25 
  26 .text
  27         .align 2,144
  28 .globl _mul32_Xsig
  29 _mul32_Xsig:
  30         pushl %ebp
  31         movl %esp,%ebp
  32         subl $16,%esp
  33         pushl %esi
  34 
  35         movl PARAM1,%esi
  36         movl PARAM2,%ecx
  37 
  38         xor %eax,%eax
  39         movl %eax,-4(%ebp)
  40         movl %eax,-8(%ebp)
  41 
  42         movl (%esi),%eax        /* lsl of Xsig */
  43         mull %ecx               /* msl of b */
  44         movl %edx,-12(%ebp)
  45 
  46         movl 4(%esi),%eax       /* midl of Xsig */
  47         mull %ecx               /* msl of b */
  48         addl %eax,-12(%ebp)
  49         adcl %edx,-8(%ebp)
  50         adcl $0,-4(%ebp)
  51 
  52         movl 8(%esi),%eax       /* msl of Xsig */
  53         mull %ecx               /* msl of b */
  54         addl %eax,-8(%ebp)
  55         adcl %edx,-4(%ebp)
  56 
  57         movl -12(%ebp),%eax
  58         movl %eax,(%esi)
  59         movl -8(%ebp),%eax
  60         movl %eax,4(%esi)
  61         movl -4(%ebp),%eax
  62         movl %eax,8(%esi)
  63 
  64         popl %esi
  65         leave
  66         ret
  67 
  68 
  69         .align 2,144
  70 .globl _mul64_Xsig
  71 _mul64_Xsig:
  72         pushl %ebp
  73         movl %esp,%ebp
  74         subl $16,%esp
  75         pushl %esi
  76 
  77         movl PARAM1,%esi
  78         movl PARAM2,%ecx
  79 
  80         xor %eax,%eax
  81         movl %eax,-4(%ebp)
  82         movl %eax,-8(%ebp)
  83 
  84         movl (%esi),%eax        /* lsl of Xsig */
  85         mull 4(%ecx)            /* msl of b */
  86         movl %edx,-12(%ebp)
  87 
  88         movl 4(%esi),%eax       /* midl of Xsig */
  89         mull (%ecx)             /* lsl of b */
  90         addl %edx,-12(%ebp)
  91         adcl $0,-8(%ebp)
  92         adcl $0,-4(%ebp)
  93 
  94         movl 4(%esi),%eax       /* midl of Xsig */
  95         mull 4(%ecx)            /* msl of b */
  96         addl %eax,-12(%ebp)
  97         adcl %edx,-8(%ebp)
  98         adcl $0,-4(%ebp)
  99 
 100         movl 8(%esi),%eax       /* msl of Xsig */
 101         mull (%ecx)             /* lsl of b */
 102         addl %eax,-12(%ebp)
 103         adcl %edx,-8(%ebp)
 104         adcl $0,-4(%ebp)
 105 
 106         movl 8(%esi),%eax       /* msl of Xsig */
 107         mull 4(%ecx)            /* msl of b */
 108         addl %eax,-8(%ebp)
 109         adcl %edx,-4(%ebp)
 110 
 111         movl -12(%ebp),%eax
 112         movl %eax,(%esi)
 113         movl -8(%ebp),%eax
 114         movl %eax,4(%esi)
 115         movl -4(%ebp),%eax
 116         movl %eax,8(%esi)
 117 
 118         popl %esi
 119         leave
 120         ret
 121 
 122 
 123 
 124         .align 2,144
 125 .globl _mul_Xsig_Xsig
 126 _mul_Xsig_Xsig:
 127         pushl %ebp
 128         movl %esp,%ebp
 129         subl $16,%esp
 130         pushl %esi
 131 
 132         movl PARAM1,%esi
 133         movl PARAM2,%ecx
 134 
 135         xor %eax,%eax
 136         movl %eax,-4(%ebp)
 137         movl %eax,-8(%ebp)
 138 
 139         movl (%esi),%eax        /* lsl of Xsig */
 140         mull 8(%ecx)            /* msl of b */
 141         movl %edx,-12(%ebp)
 142 
 143         movl 4(%esi),%eax       /* midl of Xsig */
 144         mull 4(%ecx)            /* midl of b */
 145         addl %edx,-12(%ebp)
 146         adcl $0,-8(%ebp)
 147         adcl $0,-4(%ebp)
 148 
 149         movl 8(%esi),%eax       /* msl of Xsig */
 150         mull (%ecx)             /* lsl of b */
 151         addl %edx,-12(%ebp)
 152         adcl $0,-8(%ebp)
 153         adcl $0,-4(%ebp)
 154 
 155         movl 4(%esi),%eax       /* midl of Xsig */
 156         mull 8(%ecx)            /* msl of b */
 157         addl %eax,-12(%ebp)
 158         adcl %edx,-8(%ebp)
 159         adcl $0,-4(%ebp)
 160 
 161         movl 8(%esi),%eax       /* msl of Xsig */
 162         mull 4(%ecx)            /* midl of b */
 163         addl %eax,-12(%ebp)
 164         adcl %edx,-8(%ebp)
 165         adcl $0,-4(%ebp)
 166 
 167         movl 8(%esi),%eax       /* msl of Xsig */
 168         mull 8(%ecx)            /* msl of b */
 169         addl %eax,-8(%ebp)
 170         adcl %edx,-4(%ebp)
 171 
 172         movl -12(%ebp),%edx
 173         movl %edx,(%esi)
 174         movl -8(%ebp),%edx
 175         movl %edx,4(%esi)
 176         movl -4(%ebp),%edx
 177         movl %edx,8(%esi)
 178 
 179         popl %esi
 180         leave
 181         ret
 182 

/* [previous][next][first][last][top][bottom][index][help] */