1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/euser/epoc/x86/uc_realx.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,3348 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\euser\epoc\x86\uc_realx.cia
1.18 +//
1.19 +//
1.20 +
1.21 +
1.22 +#include "u32std.h"
1.23 +#include <e32math.h>
1.24 +
1.25 +
1.26 +void TRealXPanic(TInt aErr);
1.27 +
1.28 +LOCAL_C __NAKED__ void TRealXPanicEax(void)
1.29 + {
1.30 + asm("push eax");
1.31 + asm("call %a0": : "i"(&TRealXPanic));
1.32 + }
1.33 +
1.34 +LOCAL_C __NAKED__ void TRealXRealIndefinite(void)
1.35 + {
1.36 + // return 'real indefinite' NaN in ecx,edx:ebx
1.37 + asm("mov ecx, 0xFFFF0001"); // exponent=FFFF, sign negative
1.38 + asm("mov edx, 0xC0000000"); // mantissa=C0000000 00000000
1.39 + asm("xor ebx, ebx");
1.40 + asm("mov eax, -6"); // return KErrArgument
1.41 + asm("ret");
1.42 + }
1.43 +
1.44 +LOCAL_C __NAKED__ void TRealXBinOpNaN(void)
1.45 + {
1.46 + // generic routine to process NaN's in binary operations
1.47 + // destination operand in ecx,edx:eax
1.48 + // source operand at [esi]
1.49 +
1.50 + asm("mov eax, [esi+8]"); // source operand into eax,edi:ebp
1.51 + asm("mov edi, [esi+4]");
1.52 + asm("mov ebp, [esi]");
1.53 + asm("cmp ecx, 0xFFFF0000"); // check if dest is a NaN
1.54 + asm("jb short TRealXBinOpNaN1"); // if not, swap them
1.55 + asm("cmp edx, 0x80000000");
1.56 + asm("jne short TRealXBinOpNaN2");
1.57 + asm("test ebx, ebx");
1.58 + asm("jne short TRealXBinOpNaN2");
1.59 + asm("TRealXBinOpNaN1:"); // swap the operands
1.60 + asm("xchg ecx, eax");
1.61 + asm("xchg edx, edi");
1.62 + asm("xchg ebx, ebp");
1.63 + asm("TRealXBinOpNaN2:");
1.64 + asm("cmp eax, 0xFFFF0000"); // check if both operands are NaNs
1.65 + asm("jb short TRealXBinOpNaN4"); // if not, ignore non-NaN operand
1.66 + asm("cmp edi, 0x80000000");
1.67 + asm("jne short TRealXBinOpNaN3");
1.68 + asm("test ebp, ebp");
1.69 + asm("je short TRealXBinOpNaN4");
1.70 + asm("TRealXBinOpNaN3:"); // if both operands are NaN's, compare significands
1.71 + asm("cmp edx, edi");
1.72 + asm("ja short TRealXBinOpNaN4");
1.73 + asm("jb short TRealXBinOpNaN5");
1.74 + asm("cmp ebx, ebp");
1.75 + asm("jae short TRealXBinOpNaN4");
1.76 + asm("TRealXBinOpNaN5:"); // come here if dest is smaller - copy source to dest
1.77 + asm("mov ecx, eax");
1.78 + asm("mov edx, edi");
1.79 + asm("mov ebx, ebp");
1.80 + asm("TRealXBinOpNaN4:"); // NaN with larger significand is in ecx,edx:ebx
1.81 + asm("or edx, 0x40000000"); // convert an SNaN to a QNaN
1.82 + asm("mov eax, -6"); // return KErrArgument
1.83 + asm("ret");
1.84 + }
1.85 +
1.86 +// Add TRealX at [esi] + ecx,edx:ebx
1.87 +// Result in ecx,edx:ebx
1.88 +// Error code in eax
1.89 +// Note: +0 + +0 = +0, -0 + -0 = -0, +0 + -0 = -0 + +0 = +0,
1.90 +// +/-0 + X = X + +/-0 = X, X + -X = -X + X = +0
1.91 +LOCAL_C __NAKED__ void TRealXAdd()
1.92 + {
1.93 + asm("xor ch, ch"); // clear rounding flags
1.94 + asm("cmp ecx, 0xFFFF0000"); // check if dest=NaN or infinity
1.95 + asm("jnc addfpsd"); // branch if it is
1.96 + asm("mov eax, [esi+8]"); // fetch sign/exponent of source
1.97 + asm("cmp eax, 0xFFFF0000"); // check if source=NaN or infinity
1.98 + asm("jnc addfpss"); // branch if it is
1.99 + asm("cmp eax, 0x10000"); // check if source=0
1.100 + asm("jc addfp0s"); // branch if it is
1.101 + asm("cmp ecx, 0x10000"); // check if dest=0
1.102 + asm("jc addfp0d"); // branch if it is
1.103 + asm("and cl, 1"); // clear bits 1-7 of ecx
1.104 + asm("and al, 1"); // clear bits 1-7 of eax
1.105 + asm("mov ch, cl");
1.106 + asm("xor ch, al"); // xor of signs into ch bit 0
1.107 + asm("add ch, ch");
1.108 + asm("or cl, ch"); // and into cl bit 1
1.109 + asm("or al, ch"); // and al bit 1
1.110 + asm("xor ch, ch"); // clear rounding flags
1.111 + asm("mov ebp, [esi]"); // fetch source mantissa 0-31
1.112 + asm("mov edi, [esi+4]"); // fetch source mantissa 32-63
1.113 + asm("ror ecx, 16"); // dest exponent into cx
1.114 + asm("ror eax, 16"); // source exponent into ax
1.115 + asm("push ecx"); // push dest exponent/sign
1.116 + asm("sub cx, ax"); // cx = dest exponent - source exponent
1.117 + asm("je short addfp3b"); // if equal, no shifting required
1.118 + asm("ja short addfp1"); // branch if dest exponent >= source exponent
1.119 + asm("xchg ebx, ebp"); // make sure edi:ebp contains the mantissa to be shifted
1.120 + asm("xchg edx, edi");
1.121 + asm("xchg eax, [esp]"); // and larger exponent and corresponding sign is on the stack
1.122 + asm("neg cx"); // make cx positive = number of right shifts needed
1.123 + asm("addfp1:");
1.124 + asm("cmp cx, 64"); // if more than 64 shifts needed
1.125 + asm("ja addfp2"); // branch to output larger number
1.126 + asm("jb addfp3"); // branch if <64 shifts
1.127 + asm("mov eax, edi"); // exactly 64 shifts needed - rounding word=mant high
1.128 + asm("test ebp, ebp"); // check bits lost
1.129 + asm("jz short addfp3a");
1.130 + asm("or ch, 1"); // if not all zero, set rounded-down flag
1.131 + asm("addfp3a:");
1.132 + asm("xor edi, edi"); // clear edx:ebx
1.133 + asm("xor ebp, ebp");
1.134 + asm("jmp short addfp5"); // finished shifting
1.135 + asm("addfp3b:"); // exponents equal
1.136 + asm("xor eax, eax"); // set rounding word=0
1.137 + asm("jmp short addfp5");
1.138 + asm("addfp3:");
1.139 + asm("cmp cl, 32"); // 32 or more shifts needed ?
1.140 + asm("jb short addfp4"); // skip if <32
1.141 + asm("mov eax, ebp"); // rounding word=mant low
1.142 + asm("mov ebp, edi"); // mant low=mant high
1.143 + asm("xor edi, edi"); // mant high=0
1.144 + asm("sub cl, 32"); // reduce count by 32
1.145 + asm("jz short addfp5"); // if now zero, finished shifting
1.146 + asm("shrd edi, eax, cl"); // shift ebp:eax:edi right by cl bits
1.147 + asm("shrd eax, ebp, cl"); //
1.148 + asm("shr ebp, cl"); //
1.149 + asm("test edi, edi"); // check bits lost in shift
1.150 + asm("jz short addfp5"); // if all zero, finished
1.151 + asm("or ch, 1"); // else set rounded-down flag
1.152 + asm("xor edi, edi"); // clear edx again
1.153 + asm("jmp short addfp5"); // finished shifting
1.154 + asm("addfp4:"); // <32 shifts needed now
1.155 + asm("xor eax, eax"); // clear rounding word initially
1.156 + asm("shrd eax, ebp, cl"); // shift edi:ebp:eax right by cl bits
1.157 + asm("shrd ebp, edi, cl"); //
1.158 + asm("shr edi, cl"); //
1.159 +
1.160 + asm("addfp5:");
1.161 + asm("mov [esp+3], ch"); // rounding flag into ch image on stack
1.162 + asm("pop ecx"); // recover sign and exponent into ecx, with rounding flag
1.163 + asm("ror ecx, 16"); // into normal position
1.164 + asm("test cl, 2"); // addition or subtraction needed ?
1.165 + asm("jnz short subfp1"); // branch if subtraction
1.166 + asm("add ebx,ebp"); // addition required - add mantissas
1.167 + asm("adc edx,edi"); //
1.168 + asm("jnc short roundfp"); // branch if no carry
1.169 + asm("rcr edx,1"); // shift carry right into mantissa
1.170 + asm("rcr ebx,1"); //
1.171 + asm("rcr eax,1"); // and into rounding word
1.172 + asm("jnc short addfp5a");
1.173 + asm("or ch, 1"); // if 1 shifted out, set rounded-down flag
1.174 + asm("addfp5a:");
1.175 + asm("add ecx, 0x10000"); // and increment exponent
1.176 +
1.177 + // perform rounding based on rounding word in eax and rounding flag in ch
1.178 + asm("roundfp:");
1.179 + asm("cmp eax, 0x80000000");
1.180 + asm("jc roundfp0"); // if rounding word<80000000, round down
1.181 + asm("ja roundfp1"); // if >80000000, round up
1.182 + asm("test ch, 1");
1.183 + asm("jnz short roundfp1"); // if rounded-down flag set, round up
1.184 + asm("test ch, 2");
1.185 + asm("jnz short roundfp0"); // if rounded-up flag set, round down
1.186 + asm("test bl, 1"); // else test mantissa lsb
1.187 + asm("jz short roundfp0"); // round down if 0, up if 1 [round to even]
1.188 + asm("roundfp1:"); // Come here to round up
1.189 + asm("add ebx, 1"); // increment mantissa
1.190 + asm("adc edx,0"); //
1.191 + asm("jnc roundfp1a"); // if no carry OK
1.192 + asm("rcr edx,1"); // else shift carry into mantissa [edx:ebx=0 here]
1.193 + asm("add ecx, 0x10000"); // and increment exponent
1.194 + asm("roundfp1a:");
1.195 + asm("cmp ecx, 0xFFFF0000"); // check for overflow
1.196 + asm("jae short addfpovfw"); // jump if overflow
1.197 + asm("mov ch, 2"); // else set rounded-up flag
1.198 + asm("xor eax, eax"); // return KErrNone
1.199 + asm("ret");
1.200 +
1.201 + asm("roundfp0:"); // Come here to round down
1.202 + asm("cmp ecx, 0xFFFF0000"); // check for overflow
1.203 + asm("jae short addfpovfw"); // jump if overflow
1.204 + asm("test eax, eax"); // else check if rounding word zero
1.205 + asm("jz short roundfp0a"); // if so, leave rounding flags as they are
1.206 + asm("mov ch, 1"); // else set rounded-down flag
1.207 + asm("roundfp0a:");
1.208 + asm("xor eax, eax"); // return KErrNone
1.209 + asm("ret");
1.210 +
1.211 + asm("addfpovfw:"); // Come here if overflow occurs
1.212 + asm("xor ch, ch"); // clear rounding flags, exponent=FFFF
1.213 + asm("xor ebx, ebx");
1.214 + asm("mov edx, 0x80000000"); // mantissa=80000000 00000000 for infinity
1.215 + asm("mov eax, -9"); // return KErrOverflow
1.216 + asm("ret");
1.217 +
1.218 + // exponents differ by more than 64 - output larger number
1.219 + asm("addfp2:");
1.220 + asm("pop ecx"); // recover exponent and sign
1.221 + asm("ror ecx, 16"); // into normal position
1.222 + asm("or ch, 1"); // set rounded-down flag
1.223 + asm("test cl, 2"); // check if signs the same
1.224 + asm("jz addfp2a");
1.225 + asm("xor ch, 3"); // if not, set rounded-up flag
1.226 + asm("addfp2a:");
1.227 + asm("xor eax, eax"); // return KErrNone
1.228 + asm("ret");
1.229 +
1.230 + // signs differ, so must subtract mantissas
1.231 + asm("subfp1:");
1.232 + asm("add ch, ch"); // if rounded-down flag set, change it to rounded-up
1.233 + asm("neg eax"); // subtract rounding word from 0
1.234 + asm("sbb ebx, ebp"); // and subtract mantissas with borrow
1.235 + asm("sbb edx, edi"); //
1.236 + asm("jnc short subfp2"); // if no borrow, sign is correct
1.237 + asm("xor cl, 1"); // else change sign of result
1.238 + asm("shr ch, 1"); // change rounding back to rounded-down
1.239 + asm("not eax"); // negate rounding word
1.240 + asm("not ebx"); // and mantissa
1.241 + asm("not edx"); //
1.242 + asm("add eax,1"); // two's complement negation
1.243 + asm("adc ebx,0"); //
1.244 + asm("adc edx,0"); //
1.245 + asm("subfp2:");
1.246 + asm("jnz short subfp3"); // branch if edx non-zero at this point
1.247 + asm("mov edx, ebx"); // else shift ebx into edx
1.248 + asm("or edx, edx"); //
1.249 + asm("jz short subfp4"); // if still zero, branch
1.250 + asm("mov ebx, eax"); // else shift rounding word into ebx
1.251 + asm("xor eax, eax"); // and zero rounding word
1.252 + asm("sub ecx, 0x200000"); // decrease exponent by 32 due to shift
1.253 + asm("jnc short subfp3"); // if no borrow, carry on
1.254 + asm("jmp short subfpundflw"); // if borrow here, underflow
1.255 + asm("subfp4:");
1.256 + asm("mov edx, eax"); // move rounding word into edx
1.257 + asm("or edx, edx"); // is edx still zero ?
1.258 + asm("jz short subfp0"); // if so, result is precisely zero
1.259 + asm("xor ebx, ebx"); // else zero ebx and rounding word
1.260 + asm("xor eax, eax"); //
1.261 + asm("sub ecx, 0x400000"); // and decrease exponent by 64 due to shift
1.262 + asm("jc short subfpundflw"); // if borrow, underflow
1.263 + asm("subfp3:");
1.264 + asm("mov edi, ecx"); // preserve sign and exponent
1.265 + asm("bsr ecx, edx"); // position of most significant 1 into ecx
1.266 + asm("neg ecx"); //
1.267 + asm("add ecx, 31"); // cl = 31-position of MS 1 = number of shifts to normalise
1.268 + asm("shld edx, ebx, cl"); // shift edx:ebx:eax left by cl bits
1.269 + asm("shld ebx, eax, cl"); //
1.270 + asm("shl eax, cl"); //
1.271 + asm("mov ebp, ecx"); // bit count into ebp for subtraction
1.272 + asm("shl ebp, 16"); // shift left by 16 to align with exponent
1.273 + asm("mov ecx, edi"); // exponent, sign, rounding flags back into ecx
1.274 + asm("sub ecx, ebp"); // subtract shift count from exponent
1.275 + asm("jc short subfpundflw"); // if borrow, underflow
1.276 + asm("cmp ecx, 0x10000"); // check if exponent 0
1.277 + asm("jnc roundfp"); // if not, jump to round result, else underflow
1.278 +
1.279 + // come here if underflow
1.280 + asm("subfpundflw:");
1.281 + asm("and ecx, 1"); // set exponent to zero, leave sign
1.282 + asm("xor edx, edx");
1.283 + asm("xor ebx, ebx");
1.284 + asm("mov eax, -10"); // return KErrUnderflow
1.285 + asm("ret");
1.286 +
1.287 + // come here to return zero result
1.288 + asm("subfp0:");
1.289 + asm("xor ecx, ecx"); // set exponent to zero, positive sign
1.290 + asm("xor edx, edx");
1.291 + asm("xor ebx, ebx");
1.292 + asm("addfp0snzd:");
1.293 + asm("xor eax, eax"); // return KErrNone
1.294 + asm("ret");
1.295 +
1.296 + // come here if source=0 - eax=source exponent/sign
1.297 + asm("addfp0s:");
1.298 + asm("cmp ecx, 0x10000"); // check if dest=0
1.299 + asm("jnc addfp0snzd"); // if not, return dest unaltered
1.300 + asm("and ecx, eax"); // else both zero, result negative iff both zeros negative
1.301 + asm("and ecx, 1");
1.302 + asm("xor eax, eax"); // return KErrNone
1.303 + asm("ret");
1.304 +
1.305 + // come here if dest=0, source nonzero
1.306 + asm("addfp0d:");
1.307 + asm("mov ebx, [esi]"); // return source unaltered
1.308 + asm("mov edx, [esi+4]");
1.309 + asm("mov ecx, [esi+8]");
1.310 + asm("xor eax, eax"); // return KErrNone
1.311 + asm("ret");
1.312 +
1.313 + // come here if dest=NaN or infinity
1.314 + asm("addfpsd:");
1.315 + asm("cmp edx, 0x80000000"); // check for infinity
1.316 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.317 + asm("test ebx, ebx");
1.318 + _ASM_jn(e,TRealXBinOpNaN)
1.319 + asm("mov eax, [esi+8]"); // eax=second operand exponent
1.320 + asm("cmp eax, 0xFFFF0000"); // check second operand for NaN or infinity
1.321 + asm("jae short addfpsd1"); // branch if NaN or infinity
1.322 + asm("addfpsd2:");
1.323 + asm("mov eax, -9"); // else return dest unaltered [infinity] and KErrOverflow
1.324 + asm("ret");
1.325 + asm("addfpsd1:");
1.326 + asm("mov ebp, [esi]"); // source mantissa into edi:ebp
1.327 + asm("mov edi, [esi+4]");
1.328 + asm("cmp edi, 0x80000000"); // check for infinity
1.329 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.330 + asm("test ebp, ebp");
1.331 + _ASM_jn(e,TRealXBinOpNaN)
1.332 + asm("xor al, cl"); // both operands are infinity - check signs
1.333 + asm("test al, 1");
1.334 + asm("jz short addfpsd2"); // if both the same, return KErrOverflow
1.335 + asm("jmp %a0": : "i"(&TRealXRealIndefinite)); // else return 'real indefinite'
1.336 +
1.337 + // come here if source=NaN or infinity, dest finite
1.338 + asm("addfpss:");
1.339 + asm("mov ebp, [esi]"); // source mantissa into edi:ebp
1.340 + asm("mov edi, [esi+4]");
1.341 + asm("cmp edi, 0x80000000"); // check for infinity
1.342 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.343 + asm("test ebp, ebp");
1.344 + _ASM_jn(e,TRealXBinOpNaN)
1.345 + asm("mov ecx, eax"); // if source=infinity, return source unaltered
1.346 + asm("mov edx, edi");
1.347 + asm("mov ebx, ebp");
1.348 + asm("mov eax, -9"); // return KErrOverflow
1.349 + asm("ret");
1.350 + }
1.351 +
1.352 +// Subtract TRealX at [esi] - ecx,edx:ebx
1.353 +// Result in ecx,edx:ebx
1.354 +// Error code in eax
1.355 +LOCAL_C __NAKED__ void TRealXSubtract()
1.356 + {
1.357 + asm("xor cl, 1"); // negate subtrahend
1.358 + asm("jmp %a0": :"i"(&TRealXAdd));
1.359 + }
1.360 +
1.361 +// Multiply TRealX at [esi] * ecx,edx:ebx
1.362 +// Result in ecx,edx:ebx
1.363 +// Error code in eax
1.364 +LOCAL_C __NAKED__ void TRealXMultiply()
1.365 + {
1.366 + asm("xor ch, ch"); // clear rounding flags
1.367 + asm("mov eax, [esi+8]"); // fetch sign/exponent of source
1.368 + asm("xor cl, al"); // xor signs
1.369 + asm("cmp ecx, 0xFFFF0000"); // check if dest=NaN or infinity
1.370 + asm("jnc mulfpsd"); // branch if it is
1.371 + asm("cmp eax, 0xFFFF0000"); // check if source=NaN or infinity
1.372 + asm("jnc mulfpss"); // branch if it is
1.373 + asm("cmp eax, 0x10000"); // check if source=0
1.374 + asm("jc mulfp0"); // branch if it is
1.375 + asm("cmp ecx, 0x10000"); // check if dest=0
1.376 + asm("jc mulfp0"); // branch if it is
1.377 + asm("push ecx"); // save result sign
1.378 + asm("shr ecx, 16"); // dest exponent into cx
1.379 + asm("shr eax, 16"); // source exponent into ax
1.380 + asm("add eax, ecx"); // add exponents
1.381 + asm("sub eax, 0x7FFE"); // eax now contains result exponent
1.382 + asm("push eax"); // save it
1.383 + asm("mov edi, edx"); // save dest mantissa high
1.384 + asm("mov eax, ebx"); // dest mantissa low -> eax
1.385 + asm("mul dword ptr [esi]"); // dest mantissa low * source mantissa low -> edx:eax
1.386 + asm("xchg ebx, eax"); // result dword 0 -> ebx, dest mant low -> eax
1.387 + asm("mov ebp, edx"); // result dword 1 -> ebp
1.388 + asm("mul dword ptr [esi+4]"); // dest mant low * src mant high -> edx:eax
1.389 + asm("add ebp, eax"); // add in partial product to dwords 1 and 2
1.390 + asm("adc edx, 0"); //
1.391 + asm("mov ecx, edx"); // result dword 2 -> ecx
1.392 + asm("mov eax, edi"); // dest mant high -> eax
1.393 + asm("mul dword ptr [esi+4]"); // dest mant high * src mant high -> edx:eax
1.394 + asm("add ecx, eax"); // add in partial product to dwords 2, 3
1.395 + asm("adc edx, 0"); //
1.396 + asm("mov eax, edi"); // dest mant high -> eax
1.397 + asm("mov edi, edx"); // result dword 3 -> edi
1.398 + asm("mul dword ptr [esi]"); // dest mant high * src mant low -> edx:eax
1.399 + asm("add ebp, eax"); // add in partial product to dwords 1, 2
1.400 + asm("adc ecx, edx"); //
1.401 + asm("adc edi, 0"); // 128-bit mantissa product is now in edi:ecx:ebp:ebx
1.402 + asm("mov edx, edi"); // top 64 bits into edx:ebx
1.403 + asm("mov edi, ebx");
1.404 + asm("mov ebx, ecx"); // bottom 64 bits now in ebp:edi
1.405 + asm("pop ecx"); // recover exponent
1.406 + asm("js short mulfp1"); // skip if mantissa normalised
1.407 + asm("add edi, edi"); // else shift left [only one shift will be needed]
1.408 + asm("adc ebp, ebp");
1.409 + asm("adc ebx, ebx");
1.410 + asm("adc edx, edx");
1.411 + asm("dec ecx"); // and decrement exponent
1.412 + asm("mulfp1:");
1.413 + asm("cmp ebp, 0x80000000"); // compare bottom 64 bits with 80000000 00000000 for rounding
1.414 + asm("ja short mulfp2"); // branch to round up
1.415 + asm("jb short mulfp3"); // branch to round down
1.416 + asm("test edi, edi");
1.417 + asm("jnz short mulfp2"); // branch to round up
1.418 + asm("test bl, 1"); // if exactly half-way, test LSB of result mantissa
1.419 + asm("jz short mulfp4"); // if LSB=0, round down [round to even]
1.420 + asm("mulfp2:");
1.421 + asm("add ebx, 1"); // round up - increment mantissa
1.422 + asm("adc edx, 0");
1.423 + asm("jnc short mulfp2a");
1.424 + asm("rcr edx, 1");
1.425 + asm("inc ecx");
1.426 + asm("mulfp2a:");
1.427 + asm("mov al, 2"); // set rounded-up flag
1.428 + asm("jmp short mulfp5");
1.429 + asm("mulfp3:"); // round down
1.430 + asm("xor al, al"); // clear rounding flags
1.431 + asm("or ebp, edi"); // check for exact result
1.432 + asm("jz short mulfp5"); // skip if exact
1.433 + asm("mulfp4:"); // come here to round down when we know result inexact
1.434 + asm("mov al, 1"); // else set rounded-down flag
1.435 + asm("mulfp5:"); // final mantissa now in edx:ebx, exponent in ecx
1.436 + asm("cmp ecx, 0xFFFF"); // check for overflow
1.437 + asm("jge short mulfp6"); // branch if overflow
1.438 + asm("cmp ecx, 0"); // check for underflow
1.439 + asm("jle short mulfp7"); // branch if underflow
1.440 + asm("shl ecx, 16"); // else exponent up to top end of ecx
1.441 + asm("mov ch, al"); // rounding flags into ch
1.442 + asm("pop eax"); // recover result sign
1.443 + asm("mov cl, al"); // into cl
1.444 + asm("xor eax, eax"); // return KErrNone
1.445 + asm("ret");
1.446 +
1.447 + // come here if overflow
1.448 + asm("mulfp6:");
1.449 + asm("pop eax"); // recover result sign
1.450 + asm("mov ecx, 0xFFFF0000"); // exponent=FFFF
1.451 + asm("mov cl, al"); // sign into cl
1.452 + asm("mov edx, 0x80000000"); // set mantissa to 80000000 00000000 for infinity
1.453 + asm("xor ebx, ebx");
1.454 + asm("mov eax, -9"); // return KErrOverflow
1.455 + asm("ret");
1.456 +
1.457 + // come here if underflow
1.458 + asm("mulfp7:");
1.459 + asm("pop eax"); // recover result sign
1.460 + asm("xor ecx, ecx"); // exponent=0
1.461 + asm("mov cl, al"); // sign into cl
1.462 + asm("xor edx, edx");
1.463 + asm("xor ebx, ebx");
1.464 + asm("mov eax, -10"); // return KErrUnderflow
1.465 + asm("ret");
1.466 +
1.467 + // come here if either operand zero
1.468 + asm("mulfp0:");
1.469 + asm("and ecx, 1"); // set exponent=0, keep sign
1.470 + asm("xor edx, edx");
1.471 + asm("xor ebx, ebx");
1.472 + asm("xor eax, eax"); // return KErrNone
1.473 + asm("ret");
1.474 +
1.475 + // come here if destination operand NaN or infinity
1.476 + asm("mulfpsd:");
1.477 + asm("cmp edx, 0x80000000"); // check for infinity
1.478 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.479 + asm("test ebx, ebx");
1.480 + _ASM_jn(e,TRealXBinOpNaN)
1.481 + asm("cmp eax, 0xFFFF0000"); // check second operand for NaN or infinity
1.482 + asm("jae short mulfpsd1"); // branch if NaN or infinity
1.483 + asm("cmp eax, 0x10000"); // check if second operand zero
1.484 + _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite'
1.485 + asm("mov eax, -9"); // else return dest [infinity] with xor sign and KErrOverflow
1.486 + asm("ret");
1.487 + asm("mulfpsd1:");
1.488 + asm("mov ebp, [esi]"); // source mantissa into edi:ebp
1.489 + asm("mov edi, [esi+4]");
1.490 + asm("cmp edi, 0x80000000"); // check for infinity
1.491 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.492 + asm("test ebp, ebp");
1.493 + _ASM_jn(e,TRealXBinOpNaN)
1.494 + asm("mov eax, -9"); // both operands infinity - return infinity with xor sign
1.495 + asm("ret"); // and KErrOverflow
1.496 +
1.497 + // come here if source operand NaN or infinity, destination finite
1.498 + asm("mulfpss:");
1.499 + asm("mov ebp, [esi]"); // source mantissa into edi:ebp
1.500 + asm("mov edi, [esi+4]");
1.501 + asm("cmp edi, 0x80000000"); // check for infinity
1.502 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.503 + asm("test ebp, ebp");
1.504 + _ASM_jn(e,TRealXBinOpNaN)
1.505 + asm("cmp ecx, 0x10000"); // source=infinity, check if dest=0
1.506 + _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite'
1.507 + asm("or ecx, 0xFFFF0000"); // set exp=FFFF, leave xor sign in cl
1.508 + asm("mov edx, edi"); // set mantissa for infinity
1.509 + asm("mov ebx, ebp");
1.510 + asm("mov eax, -9"); // return KErrOverflow
1.511 + asm("ret");
1.512 + }
1.513 +
1.514 +// Divide 96-bit unsigned dividend EDX:EAX:0 by 64-bit unsigned divisor ECX:EBX
1.515 +// Assume ECX bit 31 = 1, ie 2^63 <= divisor < 2^64
1.516 +// Assume the quotient fits in 32 bits
1.517 +// Return 32 bit quotient in EDI
1.518 +// Return 64 bit remainder in EBP:ESI
1.519 +LOCAL_C __NAKED__ void LongDivide(void)
1.520 + {
1.521 + asm("push edx"); // save dividend
1.522 + asm("push eax"); //
1.523 + asm("cmp edx, ecx"); // check if truncation of divisor will overflow DIV instruction
1.524 + asm("jb short longdiv1"); // skip if not
1.525 + asm("xor eax, eax"); // else return quotient of 0xFFFFFFFF
1.526 + asm("dec eax"); //
1.527 + asm("jmp short longdiv2"); //
1.528 + asm("longdiv1:");
1.529 + asm("div ecx"); // divide EDX:EAX by ECX to give approximate quotient in EAX
1.530 + asm("longdiv2:");
1.531 + asm("mov edi, eax"); // save approx quotient
1.532 + asm("mul ebx"); // multiply approx quotient by full divisor ECX:EBX
1.533 + asm("mov esi, eax"); // first partial product into EBP:ESI
1.534 + asm("mov ebp, edx"); //
1.535 + asm("mov eax, edi"); // approx quotient back into eax
1.536 + asm("mul ecx"); // upper partial product now in EDX:EAX
1.537 + asm("add eax, ebp"); // add to form 96-bit product in EDX:EAX:ESI
1.538 + asm("adc edx, 0"); //
1.539 + asm("neg esi"); // remainder = dividend - approx quotient * divisor
1.540 + asm("mov ebp, [esp]"); // fetch dividend bits 32-63
1.541 + asm("sbb ebp, eax"); //
1.542 + asm("mov eax, [esp+4]"); // fetch dividend bits 64-95
1.543 + asm("sbb eax, edx"); // remainder is now in EAX:EBP:ESI
1.544 + asm("jns short longdiv4"); // if remainder positive, quotient is correct, so exit
1.545 + asm("longdiv3:");
1.546 + asm("dec edi"); // else quotient is too big, so decrement it
1.547 + asm("add esi, ebx"); // and add divisor to remainder
1.548 + asm("adc ebp, ecx"); //
1.549 + asm("adc eax, 0"); //
1.550 + asm("js short longdiv3"); // if still negative, repeat [requires <4 iterations]
1.551 + asm("longdiv4:");
1.552 + asm("add esp, 8"); // remove dividend from stack
1.553 + asm("ret"); // return with quotient in EDI, remainder in EBP:ESI
1.554 + }
1.555 +
1.556 +// Divide TRealX at [esi] / ecx,edx:ebx
1.557 +// Result in ecx,edx:ebx
1.558 +// Error code in eax
1.559 +LOCAL_C __NAKED__ void TRealXDivide(void)
1.560 + {
1.561 + asm("xor ch, ch"); // clear rounding flags
1.562 + asm("mov eax, [esi+8]"); // fetch sign/exponent of dividend
1.563 + asm("xor cl, al"); // xor signs
1.564 + asm("cmp eax, 0xFFFF0000"); // check if dividend=NaN or infinity
1.565 + asm("jnc divfpss"); // branch if it is
1.566 + asm("cmp ecx, 0xFFFF0000"); // check if divisor=NaN or infinity
1.567 + asm("jnc divfpsd"); // branch if it is
1.568 + asm("cmp ecx, 0x10000"); // check if divisor=0
1.569 + asm("jc divfpdv0"); // branch if it is
1.570 + asm("cmp eax, 0x10000"); // check if dividend=0
1.571 + asm("jc divfpdd0"); // branch if it is
1.572 + asm("push esi"); // save pointer to dividend
1.573 + asm("push ecx"); // save result sign
1.574 + asm("shr ecx, 16"); // divisor exponent into cx
1.575 + asm("shr eax, 16"); // dividend exponent into ax
1.576 + asm("sub eax, ecx"); // subtract exponents
1.577 + asm("add eax, 0x7FFE"); // eax now contains result exponent
1.578 + asm("push eax"); // save it
1.579 + asm("mov ecx, edx"); // divisor mantissa into ecx:ebx
1.580 + asm("mov edx, [esi+4]"); // dividend mantissa into edx:eax
1.581 + asm("mov eax, [esi]");
1.582 + asm("xor edi, edi"); // clear edi initially
1.583 + asm("cmp edx, ecx"); // compare EDX:EAX with ECX:EBX
1.584 + asm("jb short divfp1"); // if EDX:EAX < ECX:EBX, leave everything as is
1.585 + asm("ja short divfp2"); //
1.586 + asm("cmp eax, ebx"); // if EDX=ECX, then compare ls dwords
1.587 + asm("jb short divfp1"); // if dividend mant < divisor mant, leave everything as is
1.588 + asm("divfp2:");
1.589 + asm("sub eax, ebx"); // else dividend mant -= divisor mant
1.590 + asm("sbb edx, ecx"); //
1.591 + asm("inc edi"); // and EDI=1 [bit 0 of EDI is the integer part of the result]
1.592 + asm("inc dword ptr [esp]"); // also increment result exponent
1.593 + asm("divfp1:");
1.594 + asm("push edi"); // save top bit of result
1.595 + asm("call %a0": : "i"(&LongDivide)); // divide EDX:EAX:0 by ECX:EBX to give next 32 bits of result in EDI
1.596 + asm("push edi"); // save next 32 bits of result
1.597 + asm("mov edx, ebp"); // remainder from EBP:ESI into EDX:EAX
1.598 + asm("mov eax, esi"); //
1.599 + asm("call %a0": : "i"(&LongDivide)); // divide EDX:EAX:0 by ECX:EBX to give next 32 bits of result in EDI
1.600 + asm("test byte ptr [esp+4], 1"); // test integer bit of result
1.601 + asm("jnz short divfp4"); // if set, no need to calculate another bit
1.602 + asm("xor eax, eax"); //
1.603 + asm("add esi, esi"); // 2*remainder into EAX:EBP:ESI
1.604 + asm("adc ebp, ebp"); //
1.605 + asm("adc eax, eax"); //
1.606 + asm("sub esi, ebx"); // subtract divisor to generate final quotient bit
1.607 + asm("sbb ebp, ecx"); //
1.608 + asm("sbb eax, 0"); //
1.609 + asm("jnc short divfp3"); // skip if no borrow - in this case eax=0
1.610 + asm("add esi, ebx"); // if borrow add back - final remainder now in EBP:ESI
1.611 + asm("adc ebp, ecx"); //
1.612 + asm("adc eax, 0"); // eax will be zero after this and carry will be set
1.613 + asm("divfp3:");
1.614 + asm("cmc"); // final bit = 1-C
1.615 + asm("rcr eax, 1"); // shift it into eax bit 31
1.616 + asm("mov ebx, edi"); // result into EDX:EBX:EAX, remainder in EBP:ESI
1.617 + asm("pop edx");
1.618 + asm("add esp, 4"); // discard integer bit [zero]
1.619 + asm("jmp short divfp5"); // branch to round
1.620 +
1.621 + asm("divfp4:"); // integer bit was set
1.622 + asm("mov ebx, edi"); // result into EDX:EBX:EAX
1.623 + asm("pop edx"); //
1.624 + asm("pop eax"); // integer part of result into eax [=1]
1.625 + asm("stc"); // shift a 1 into top end of mantissa
1.626 + asm("rcr edx,1"); //
1.627 + asm("rcr ebx,1"); //
1.628 + asm("rcr eax,1"); // bottom bit into eax bit 31
1.629 +
1.630 + // when we get to here we have 65 bits of quotient mantissa in
1.631 + // EDX:EBX:EAX (bottom bit in eax bit 31)
1.632 + // and the remainder is in EBP:ESI
1.633 + asm("divfp5:");
1.634 + asm("pop ecx"); // recover result exponent
1.635 + asm("add eax, eax"); // test rounding bit
1.636 + asm("jnc short divfp6"); // branch to round down
1.637 + asm("or ebp, esi"); // test remainder to see if we are exactly half-way
1.638 + asm("jnz short divfp7"); // if not, round up
1.639 + asm("test bl, 1"); // exactly halfway - test LSB of mantissa
1.640 + asm("jz short divfp8"); // round down if LSB=0 [round to even]
1.641 + asm("divfp7:");
1.642 + asm("add ebx, 1"); // round up - increment mantissa
1.643 + asm("adc edx, 0");
1.644 + asm("jnc short divfp7a");
1.645 + asm("rcr edx, 1"); // if carry, shift 1 into mantissa MSB
1.646 + asm("inc ecx"); // and increment exponent
1.647 + asm("divfp7a:");
1.648 + asm("mov al, 2"); // set rounded-up flag
1.649 + asm("jmp short divfp9");
1.650 + asm("divfp6:");
1.651 + asm("xor al, al"); // round down - first clear rounding flags
1.652 + asm("or ebp, esi"); // test if result exact
1.653 + asm("jz short divfp9"); // skip if exact
1.654 + asm("divfp8:"); // come here to round down when we know result is inexact
1.655 + asm("mov al, 1"); // set rounded-down flag
1.656 + asm("divfp9:"); // final mantissa now in edx:ebx, exponent in ecx
1.657 + asm("cmp ecx, 0xFFFF"); // check for overflow
1.658 + asm("jge short divfp10"); // branch if overflow
1.659 + asm("cmp ecx, 0"); // check for underflow
1.660 + asm("jle short divfp11"); // branch if underflow
1.661 + asm("shl ecx, 16"); // else exponent up to top end of ecx
1.662 + asm("mov ch, al"); // rounding flags into ch
1.663 + asm("pop eax"); // recover result sign
1.664 + asm("mov cl, al"); // into cl
1.665 + asm("pop esi"); // recover dividend pointer
1.666 + asm("xor eax, eax"); // return KErrNone
1.667 + asm("ret");
1.668 +
1.669 + // come here if overflow
1.670 + asm("divfp10:");
1.671 + asm("pop eax"); // recover result sign
1.672 + asm("mov ecx, 0xFFFF0000"); // exponent=FFFF
1.673 + asm("mov cl, al"); // sign into cl
1.674 + asm("mov edx, 0x80000000"); // set mantissa to 80000000 00000000 for infinity
1.675 + asm("xor ebx, ebx");
1.676 + asm("mov eax, -9"); // return KErrOverflow
1.677 + asm("pop esi"); // recover dividend pointer
1.678 + asm("ret");
1.679 +
1.680 + // come here if underflow
1.681 + asm("divfp11:");
1.682 + asm("pop eax"); // recover result sign
1.683 + asm("xor ecx, ecx"); // exponent=0
1.684 + asm("mov cl, al"); // sign into cl
1.685 + asm("xor edx, edx");
1.686 + asm("xor ebx, ebx");
1.687 + asm("mov eax, -10"); // return KErrUnderflow
1.688 + asm("pop esi"); // recover dividend pointer
1.689 + asm("ret");
1.690 +
1.691 +
1.692 + // come here if divisor=0, dividend finite
1.693 + asm("divfpdv0:");
1.694 + asm("cmp eax, 0x10000"); // check if dividend also zero
1.695 + _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite'
1.696 + asm("or ecx, 0xFFFF0000"); // else set exponent=FFFF, leave xor sign in cl
1.697 + asm("mov edx, 0x80000000"); // set mantissa for infinity
1.698 + asm("xor ebx, ebx");
1.699 + asm("mov eax, -41"); // return KErrDivideByZero
1.700 + asm("ret");
1.701 +
1.702 + // come here if dividend=0, divisor finite and nonzero
1.703 + asm("divfpdd0:");
1.704 + asm("and ecx, 1"); // exponent=0, leave xor sign in cl
1.705 + asm("xor eax, eax"); // return KErrNone
1.706 + asm("ret");
1.707 +
1.708 + // come here if dividend is a NaN or infinity
1.709 + asm("divfpss:");
1.710 + asm("mov ebp, [esi]"); // dividend mantissa into edi:ebp
1.711 + asm("mov edi, [esi+4]");
1.712 + asm("cmp edi, 0x80000000"); // check for infinity
1.713 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.714 + asm("test ebp, ebp");
1.715 + _ASM_jn(e,TRealXBinOpNaN)
1.716 + asm("cmp ecx, 0xFFFF0000"); // check divisor for NaN or infinity
1.717 + asm("jae short divfpss1"); // branch if NaN or infinity
1.718 + asm("or ecx, 0xFFFF0000"); // infinity/finite - return infinity with xor sign
1.719 + asm("mov edx, 0x80000000");
1.720 + asm("xor ebx, ebx");
1.721 + asm("mov eax, -9"); // return KErrOverflow
1.722 + asm("ret");
1.723 + asm("divfpss1:");
1.724 + asm("cmp edx, 0x80000000"); // check for infinity
1.725 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.726 + asm("test ebx, ebx");
1.727 + _ASM_jn(e,TRealXBinOpNaN)
1.728 + asm("jmp %a0": : "i"(&TRealXRealIndefinite)); // if both operands infinite, return 'real indefinite'
1.729 +
1.730 + // come here if divisor is a NaN or infinity, dividend finite
1.731 + asm("divfpsd:");
1.732 + asm("cmp edx, 0x80000000"); // check for infinity
1.733 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.734 + asm("test ebx, ebx");
1.735 + _ASM_jn(e,TRealXBinOpNaN)
1.736 + asm("and ecx, 1"); // dividend is finite, divisor=infinity, so return 0 with xor sign
1.737 + asm("xor edx, edx");
1.738 + asm("xor ebx, ebx");
1.739 + asm("xor eax, eax"); // return KErrNone
1.740 + asm("ret");
1.741 + }
1.742 +
1.743 +// TRealX modulo - dividend at [esi], divisor in ecx,edx:ebx
1.744 +// Result in ecx,edx:ebx
1.745 +// Error code in eax
1.746 +LOCAL_C __NAKED__ void TRealXModulo(void)
1.747 + {
1.748 + asm("mov eax, [esi+8]"); // fetch sign/exponent of dividend
1.749 + asm("mov cl, al"); // result sign=dividend sign
1.750 + asm("xor ch, ch"); // clear rounding flags
1.751 + asm("cmp eax, 0xFFFF0000"); // check if dividend=NaN or infinity
1.752 + asm("jnc short modfpss"); // branch if it is
1.753 + asm("cmp ecx, 0xFFFF0000"); // check if divisor=NaN or infinity
1.754 + asm("jnc short modfpsd"); // branch if it is
1.755 + asm("cmp ecx, 0x10000"); // check if divisor=0
1.756 + _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite'
1.757 + asm("shr eax, 16"); // ax=dividend exponent
1.758 + asm("ror ecx, 16"); // cx=divisor exponent
1.759 + asm("sub ax, cx"); // ax=dividend exponent-divisor exponent
1.760 + asm("jc short modfpdd0"); // if dividend exponent is smaller, return dividend
1.761 + asm("cmp ax, 64"); // check if exponents differ by >= 64 bits
1.762 + asm("jnc short modfplp"); // if so, underflow
1.763 + asm("mov ah, 0"); // ah bit 0 acts as 65th accumulator bit
1.764 + asm("mov ebp, [esi]"); // edi:ebp=dividend mantissa
1.765 + asm("mov edi, [esi+4]"); //
1.766 + asm("jmp short modfp2"); // skip left shift on first iteration
1.767 + asm("modfp1:");
1.768 + asm("add ebp, ebp"); // shift accumulator left [65 bits]
1.769 + asm("adc edi, edi");
1.770 + asm("adc ah, ah");
1.771 + asm("modfp2:");
1.772 + asm("sub ebp, ebx"); // subtract divisor from dividend
1.773 + asm("sbb edi, edx");
1.774 + asm("sbb ah, 0");
1.775 + asm("jnc short modfp3"); // skip if no borrow
1.776 + asm("add ebp, ebx"); // else add back
1.777 + asm("adc edi, edx");
1.778 + asm("adc ah, 0");
1.779 + asm("modfp3:");
1.780 + asm("dec al"); // any more bits to do?
1.781 + asm("jns short modfp1"); // loop if there are
1.782 + asm("mov edx, edi"); // result mantissa [not yet normalised] into edx:ebx
1.783 + asm("mov ebx, ebp");
1.784 + asm("or edi, ebx"); // check for zero
1.785 + asm("jz short modfp0"); // jump if result zero
1.786 + asm("or edx, edx"); // check if ms dword zero
1.787 + asm("jnz short modfp4");
1.788 + asm("mov edx, ebx"); // if so, shift left by 32
1.789 + asm("xor ebx, ebx");
1.790 + asm("sub cx, 32"); // and decrement exponent by 32
1.791 + asm("jbe short modfpund"); // if borrow or exponent zero, underflow
1.792 + asm("modfp4:");
1.793 + asm("mov edi, ecx"); // preserve sign and exponent
1.794 + asm("bsr ecx, edx"); // position of most significant 1 into ecx
1.795 + asm("neg ecx"); //
1.796 + asm("add ecx, 31"); // cl = 31-position of MS 1 = number of shifts to normalise
1.797 + asm("shld edx, ebx, cl"); // shift edx:ebx left by cl bits
1.798 + asm("shl ebx, cl"); //
1.799 + asm("mov ebp, ecx"); // bit count into ebp for subtraction
1.800 + asm("mov ecx, edi"); // exponent & sign back into ecx
1.801 + asm("sub cx, bp"); // subtract shift count from exponent
1.802 + asm("jbe short modfpund"); // if borrow or exponent 0, underflow
1.803 + asm("rol ecx, 16"); // else ecx=exponent:sign
1.804 + asm("xor eax, eax"); // normal exit, result in ecx,edx:ebx
1.805 + asm("ret");
1.806 +
1.807 + // dividend=NaN or infinity
1.808 + asm("modfpss:");
1.809 + asm("mov ebp, [esi]"); // dividend mantissa into edi:ebp
1.810 + asm("mov edi, [esi+4]");
1.811 + asm("cmp edi, 0x80000000"); // check for infinity
1.812 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.813 + asm("test ebp, ebp");
1.814 + _ASM_jn(e,TRealXBinOpNaN)
1.815 + asm("cmp ecx, 0xFFFF0000"); // check divisor for NaN or infinity
1.816 + _ASM_j(b,TRealXRealIndefinite) // infinity%finite - return 'real indefinite'
1.817 + asm("cmp edx, 0x80000000"); // check for divisor=infinity
1.818 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.819 + asm("test ebx, ebx");
1.820 + _ASM_jn(e,TRealXBinOpNaN)
1.821 + asm("jmp %a0": : "i"(&TRealXRealIndefinite)); // if both operands infinite, return 'real indefinite'
1.822 +
1.823 + // divisor=NaN or infinity, dividend finite
1.824 + asm("modfpsd:");
1.825 + asm("cmp edx, 0x80000000"); // check for infinity
1.826 + _ASM_jn(e,TRealXBinOpNaN) // branch if NaN
1.827 + asm("test ebx, ebx");
1.828 + _ASM_jn(e,TRealXBinOpNaN)
1.829 + // finite%infinity - return dividend unaltered
1.830 +
1.831 + asm("modfpdd0:");
1.832 + asm("mov ebx, [esi]"); // normal exit, return dividend unaltered
1.833 + asm("mov edx, [esi+4]");
1.834 + asm("mov ecx, [esi+8]");
1.835 + asm("xor eax, eax");
1.836 + asm("ret");
1.837 +
1.838 + asm("modfp0:");
1.839 + asm("shr ecx, 16"); // normal exit, result 0
1.840 + asm("xor eax, eax");
1.841 + asm("ret");
1.842 +
1.843 + asm("modfpund:");
1.844 + asm("shr ecx, 16"); // underflow, result 0
1.845 + asm("mov eax, -10"); // return KErrUnderflow
1.846 + asm("ret");
1.847 +
1.848 + asm("modfplp:");
1.849 + asm("shr ecx, 16"); // loss of precision, result 0
1.850 + asm("mov eax, -7"); // return KErrTotalLossOfPrecision
1.851 + asm("ret");
1.852 + }
1.853 +
1.854 +
1.855 +
1.856 +
1.857 +__NAKED__ EXPORT_C TRealX::TRealX()
1.858 +/**
1.859 +Constructs a default extended precision object.
1.860 +
1.861 +This sets the value to zero.
1.862 +*/
1.863 + {
1.864 + THISCALL_PROLOG0()
1.865 + asm("xor eax, eax");
1.866 + asm("mov [ecx], eax"); // set value to zero
1.867 + asm("mov [ecx+4], eax");
1.868 + asm("mov [ecx+8], eax");
1.869 + asm("mov eax, ecx"); // must return this
1.870 + THISCALL_EPILOG0()
1.871 + }
1.872 +
1.873 +
1.874 +
1.875 +
1.876 +__NAKED__ EXPORT_C TRealX::TRealX(TUint /*aExp*/, TUint /*aMantHi*/, TUint /*aMantLo*/)
1.877 +/**
1.878 +Constructs an extended precision object from an explicit exponent and
1.879 +a 64 bit mantissa.
1.880 +
1.881 +@param aExp The exponent
1.882 +@param aMantHi The high order 32 bits of the 64 bit mantissa
1.883 +@param aMantLo The low order 32 bits of the 64 bit mantissa
1.884 +*/
1.885 + {
1.886 + THISCALL_PROLOG3()
1.887 + asm("mov eax, [esp+4]"); // eax=aExp
1.888 + asm("mov [ecx+8], eax");
1.889 + asm("mov eax, [esp+8]"); // eax=aMantHi
1.890 + asm("mov [ecx+4], eax");
1.891 + asm("mov eax, [esp+12]"); // eax=aMantLo
1.892 + asm("mov [ecx], eax");
1.893 + asm("mov eax, ecx"); // must return this
1.894 + THISCALL_EPILOG3()
1.895 + }
1.896 +
1.897 +
1.898 +__NAKED__ EXPORT_C TInt TRealX::Set(TInt /*aInt*/)
1.899 +/**
1.900 +Gives this extended precision object a new value taken
1.901 +from a signed integer.
1.902 +
1.903 +@param aInt The signed integer value.
1.904 +
1.905 +@return KErrNone, always.
1.906 +*/
1.907 + {
1.908 + THISCALL_PROLOG1()
1.909 + // on entry ecx=this, [esp+4]=aInt, return code in eax
1.910 + asm("mov edx, [esp+4]"); // edx=aInt
1.911 + asm("or edx, edx"); // test sign/zero
1.912 + asm("mov eax, 0x7FFF");
1.913 + asm("jz short trealxfromint0_2"); // branch if 0
1.914 + asm("jns short trealxfromint1_2");// skip if positive
1.915 + asm("neg edx"); // take absolute value
1.916 + asm("add eax, 0x10000"); // sign bit in eax bit 16
1.917 + asm("trealxfromint1_2:");
1.918 + asm("push ecx"); // save this
1.919 + asm("bsr ecx, edx"); // bit number of edx MSB into ecx
1.920 + asm("add eax, ecx"); // add to eax to form result exponent
1.921 + asm("neg cl");
1.922 + asm("add cl, 31"); // 31-bit number = number of shifts to normalise edx
1.923 + asm("shl edx, cl"); // normalise edx
1.924 + asm("pop ecx"); // this back into ecx
1.925 + asm("ror eax, 16"); // sign/exponent into normal positions
1.926 + asm("mov [ecx+4], edx"); // store mantissa high word
1.927 + asm("mov [ecx+8], eax"); // store sign/exponent
1.928 + asm("xor eax, eax");
1.929 + asm("mov [ecx], eax"); // zero mantissa low word
1.930 + THISCALL_EPILOG1() // return KErrNone
1.931 + asm("trealxfromint0_2:");
1.932 + asm("mov [ecx], edx");
1.933 + asm("mov [ecx+4], edx"); // store mantissa high word=0
1.934 + asm("mov [ecx+8], edx"); // store sign/exponent=0
1.935 + asm("xor eax, eax"); // return KErrNone
1.936 + THISCALL_EPILOG1()
1.937 + }
1.938 +
1.939 +
1.940 +
1.941 +__NAKED__ EXPORT_C TInt TRealX::Set(TUint /*aInt*/)
1.942 +/**
1.943 +Gives this extended precision object a new value taken from
1.944 +an unsigned integer.
1.945 +
1.946 +@param aInt The unsigned integer value.
1.947 +
1.948 +@return KErrNone, always.
1.949 +*/
1.950 + {
1.951 + THISCALL_PROLOG1()
1.952 + asm("mov edx, [esp+4]"); // edx=aInt
1.953 + asm("mov eax, 0x7FFF");
1.954 + asm("or edx, edx"); // test for 0
1.955 + asm("jz short trealxfromuint0_");// branch if 0
1.956 + asm("push ecx"); // save this
1.957 + asm("bsr ecx, edx"); // bit number of edx MSB into ecx
1.958 + asm("add eax, ecx"); // add to eax to form result exponent
1.959 + asm("neg cl");
1.960 + asm("add cl, 31"); // 31-bit number = number of shifts to normalise edx
1.961 + asm("shl edx, cl"); // normalise edx
1.962 + asm("pop ecx"); // this back into ecx
1.963 + asm("shl eax, 16"); // exponent into normal position
1.964 + asm("mov [ecx+4], edx"); // store mantissa high word
1.965 + asm("mov [ecx+8], eax"); // store exponent
1.966 + asm("xor eax, eax");
1.967 + asm("mov [ecx], eax"); // zero mantissa low word
1.968 + THISCALL_EPILOG1() // return KErrNone
1.969 + asm("trealxfromuint0_:");
1.970 + asm("mov [ecx], edx");
1.971 + asm("mov [ecx+4], edx"); // store mantissa high word=0
1.972 + asm("mov [ecx+8], edx"); // store sign/exponent=0
1.973 + asm("xor eax, eax"); // return KErrNone
1.974 + THISCALL_EPILOG1()
1.975 + }
1.976 +
1.977 +
1.978 +
1.979 +
1.980 +LOCAL_C __NAKED__ void TRealXFromTInt64(void)
1.981 + {
1.982 + // Convert TInt64 in edx:ebx to TRealX in ecx,edx:ebx
1.983 + asm("mov eax, 0x7FFF");
1.984 + asm("or edx, edx"); // test sign/zero
1.985 + asm("jz short trealxfromtint64a"); // branch if top word zero
1.986 + asm("jns short trealxfromtint64b");
1.987 + asm("add eax, 0x10000"); // sign bit into eax bit 16
1.988 + asm("neg edx"); // take absolute value
1.989 + asm("neg ebx");
1.990 + asm("sbb edx, 0");
1.991 + asm("jz short trealxfromtint64d"); // branch if top word zero
1.992 + asm("trealxfromtint64b:");
1.993 + asm("bsr ecx, edx"); // ecx=bit number of edx MSB
1.994 + asm("add eax, ecx"); // add to exponent in eax
1.995 + asm("add eax, 32");
1.996 + asm("neg cl");
1.997 + asm("add cl, 31"); // 31-bit number = number of left shifts to normalise
1.998 + asm("shld edx, ebx, cl"); // shift left to normalise edx:ebx
1.999 + asm("shl ebx, cl");
1.1000 + asm("mov ecx, eax"); // sign/exponent into ecx
1.1001 + asm("ror ecx, 16"); // and into normal positions
1.1002 + asm("ret");
1.1003 + asm("trealxfromtint64a:"); // come here if top word zero
1.1004 + asm("or ebx, ebx"); // test for bottom word also zero
1.1005 + asm("jz short trealxfromtint64c"); // branch if it is
1.1006 + asm("trealxfromtint64d:"); // come here if top word zero, bottom word not
1.1007 + asm("mov edx, ebx"); // shift edx:ebx left 32
1.1008 + asm("xor ebx, ebx");
1.1009 + asm("bsr ecx, edx"); // ecx=bit number of edx MSB
1.1010 + asm("add eax, ecx"); // add to exponent in eax
1.1011 + asm("neg cl");
1.1012 + asm("add cl, 31"); // 31-bit number = number of left shifts to normalise
1.1013 + asm("shl edx, cl"); // normalise
1.1014 + asm("mov ecx, eax"); // sign/exponent into ecx
1.1015 + asm("ror ecx, 16"); // and into normal positions
1.1016 + asm("ret");
1.1017 + asm("trealxfromtint64c:"); // entire number is zero
1.1018 + asm("xor ecx, ecx");
1.1019 + asm("ret");
1.1020 + }
1.1021 +
1.1022 +
1.1023 +
1.1024 +
1.1025 +__NAKED__ EXPORT_C TInt TRealX::Set(const TInt64& /*aInt*/)
1.1026 +/**
1.1027 +Gives this extended precision object a new value taken from
1.1028 +a 64 bit integer.
1.1029 +
1.1030 +@param aInt The 64 bit integer value.
1.1031 +
1.1032 +@return KErrNone, always.
1.1033 +*/
1.1034 + {
1.1035 + // on entry ecx=this, [esp+4]=address of aInt, return code in eax
1.1036 + THISCALL_PROLOG1()
1.1037 + asm("push ebx");
1.1038 + asm("push ecx");
1.1039 + asm("mov edx, [esp+12]"); // edx=address of aInt
1.1040 + asm("mov ebx, [edx]");
1.1041 + asm("mov edx, [edx+4]"); // edx:ebx=aInt
1.1042 + asm("call %a0": : "i"(&TRealXFromTInt64)); // convert to TRealX in ecx,edx:ebx
1.1043 + asm("pop eax"); // eax=this
1.1044 + asm("mov [eax], ebx"); // store result
1.1045 + asm("mov [eax+4], edx");
1.1046 + asm("mov [eax+8], ecx");
1.1047 + asm("xor eax, eax"); // return KErrNone
1.1048 + asm("pop ebx");
1.1049 + THISCALL_EPILOG1()
1.1050 + }
1.1051 +
1.1052 +
1.1053 +
1.1054 +LOCAL_C __NAKED__ void __6TRealXi()
1.1055 + {
1.1056 + // common function for int to TRealX
1.1057 + THISCALL_PROLOG1()
1.1058 + asm("mov edx, [esp+4]"); // edx=aInt
1.1059 + asm("or edx, edx"); // test sign/zero
1.1060 + asm("mov eax, 0x7FFF");
1.1061 + asm("jz short trealxfromint0"); // branch if 0
1.1062 + asm("jns short trealxfromint1"); // skip if positive
1.1063 + asm("neg edx"); // take absolute value
1.1064 + asm("add eax, 0x10000"); // sign bit in eax bit 16
1.1065 + asm("trealxfromint1:");
1.1066 + asm("push ecx"); // save this
1.1067 + asm("bsr ecx, edx"); // bit number of edx MSB into ecx
1.1068 + asm("add eax, ecx"); // add to eax to form result exponent
1.1069 + asm("neg cl");
1.1070 + asm("add cl, 31"); // 31-bit number = number of shifts to normalise edx
1.1071 + asm("shl edx, cl"); // normalise edx
1.1072 + asm("pop ecx"); // this back into ecx
1.1073 + asm("ror eax, 16"); // sign/exponent into normal positions
1.1074 + asm("mov [ecx+4], edx"); // store mantissa high word
1.1075 + asm("mov [ecx+8], eax"); // store sign/exponent
1.1076 + asm("xor eax, eax");
1.1077 + asm("mov [ecx], eax"); // zero mantissa low word
1.1078 + asm("mov eax, ecx"); // return eax=this
1.1079 + THISCALL_EPILOG1()
1.1080 + asm("trealxfromint0:");
1.1081 + asm("mov [ecx], edx");
1.1082 + asm("mov [ecx+4], edx"); // store mantissa high word=0
1.1083 + asm("mov [ecx+8], edx"); // store sign/exponent=0
1.1084 + asm("mov eax, ecx"); // return eax=this
1.1085 + THISCALL_EPILOG1()
1.1086 + }
1.1087 +
1.1088 +
1.1089 +__NAKED__ EXPORT_C TRealX::TRealX(TInt /*aInt*/)
1.1090 +/**
1.1091 +Constructs an extended precision object from a signed integer value.
1.1092 +
1.1093 +@param aInt The signed integer value.
1.1094 +*/
1.1095 + {
1.1096 + // on entry ecx=this, [esp+4]=aInt, return eax=this
1.1097 + asm("jmp %a0": : "i"(&__6TRealXi));
1.1098 + }
1.1099 +
1.1100 +
1.1101 +
1.1102 +
1.1103 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TInt /*aInt*/)
1.1104 +/**
1.1105 +Assigns the specified signed integer value to this extended precision object.
1.1106 +
1.1107 +@param aInt The signed integer value.
1.1108 +
1.1109 +@return A reference to this extended precision object.
1.1110 +*/
1.1111 + {
1.1112 + // on entry ecx=this, [esp+4]=aInt, return eax=this
1.1113 + asm("jmp %a0": : "i"(&__6TRealXi));
1.1114 + }
1.1115 +
1.1116 +
1.1117 +
1.1118 +LOCAL_C __NAKED__ void __6TRealXui()
1.1119 + {
1.1120 + // common function for unsigned int to TRealX
1.1121 + THISCALL_PROLOG1()
1.1122 + asm("mov edx, [esp+4]"); // edx=aInt
1.1123 + asm("mov eax, 0x7FFF");
1.1124 + asm("or edx, edx"); // test for zero
1.1125 + asm("jz short trealxfromuint0"); // branch if 0
1.1126 + asm("push ecx"); // save this
1.1127 + asm("bsr ecx, edx"); // bit number of edx MSB into ecx
1.1128 + asm("add eax, ecx"); // add to eax to form result exponent
1.1129 + asm("neg cl");
1.1130 + asm("add cl, 31"); // 31-bit number = number of shifts to normalise edx
1.1131 + asm("shl edx, cl"); // normalise edx
1.1132 + asm("pop ecx"); // this back into ecx
1.1133 + asm("shl eax, 16"); // exponent into normal position
1.1134 + asm("mov [ecx+4], edx"); // store mantissa high word
1.1135 + asm("mov [ecx+8], eax"); // store exponent
1.1136 + asm("xor eax, eax");
1.1137 + asm("mov [ecx], eax"); // zero mantissa low word
1.1138 + asm("mov eax, ecx"); // return eax=this
1.1139 + THISCALL_EPILOG1()
1.1140 + asm("trealxfromuint0:");
1.1141 + asm("mov [ecx], edx");
1.1142 + asm("mov [ecx+4], edx"); // store mantissa high word=0
1.1143 + asm("mov [ecx+8], edx"); // store sign/exponent=0
1.1144 + asm("mov eax, ecx"); // return eax=this
1.1145 + THISCALL_EPILOG1()
1.1146 + }
1.1147 +
1.1148 +
1.1149 +
1.1150 +__NAKED__ EXPORT_C TRealX::TRealX(TUint /*aInt*/)
1.1151 +/**
1.1152 +Constructs an extended precision object from an unsigned integer value.
1.1153 +
1.1154 +@param aInt The unsigned integer value.
1.1155 +*/
1.1156 + {
1.1157 + // on entry ecx=this, [esp+4]=aInt, return eax=this
1.1158 + asm("jmp %a0": : "i"(&__6TRealXui));
1.1159 + }
1.1160 +
1.1161 +
1.1162 +
1.1163 +
1.1164 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TUint /*aInt*/)
1.1165 +/**
1.1166 +Assigns the specified unsigned integer value to this extended precision object.
1.1167 +
1.1168 +@param aInt The unsigned integer value.
1.1169 +
1.1170 +@return A reference to this extended precision object.
1.1171 +*/
1.1172 + {
1.1173 + // on entry ecx=this, [esp+4]=aInt, return eax=this
1.1174 + asm("jmp %a0": : "i"(&__6TRealXui));
1.1175 + }
1.1176 +
1.1177 +
1.1178 +
1.1179 +
1.1180 +LOCAL_C __NAKED__ void __6TRealXRC6TInt64()
1.1181 + {
1.1182 + // common function for TInt64 to TRealX
1.1183 + THISCALL_PROLOG1()
1.1184 + asm("push ebx"); // preserve ebx
1.1185 + asm("push ecx"); // save this
1.1186 + asm("mov edx, [esp+12]"); // edx=address of aInt
1.1187 + asm("mov ebx, [edx]");
1.1188 + asm("mov edx, [edx+4]"); // edx:ebx=aInt
1.1189 + asm("call %a0": : "i"(&TRealXFromTInt64)); // convert to TRealX in ecx,edx:ebx
1.1190 + asm("pop eax"); // eax=this
1.1191 + asm("mov [eax], ebx"); // store result
1.1192 + asm("mov [eax+4], edx");
1.1193 + asm("mov [eax+8], ecx");
1.1194 + asm("mov ecx, eax"); // restore this ptr
1.1195 + asm("pop ebx"); // restore ebx
1.1196 + THISCALL_EPILOG1()
1.1197 + }
1.1198 +
1.1199 +
1.1200 +
1.1201 +
1.1202 +__NAKED__ EXPORT_C TRealX::TRealX(const TInt64& /*aInt*/)
1.1203 +/**
1.1204 +Constructs an extended precision object from a 64 bit integer.
1.1205 +
1.1206 +@param aInt A reference to a 64 bit integer.
1.1207 +*/
1.1208 + {
1.1209 + // on entry ecx=this, [esp+4]=address of aInt, return eax=this
1.1210 + asm("jmp %a0": : "i"(&__6TRealXRC6TInt64));
1.1211 + }
1.1212 +
1.1213 +
1.1214 +
1.1215 +
1.1216 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(const TInt64& /*aInt*/)
1.1217 +/**
1.1218 +Assigns the specified 64 bit integer value to this extended precision object.
1.1219 +
1.1220 +@param aInt A reference to a 64 bit integer.
1.1221 +
1.1222 +@return A reference to this extended precision object.
1.1223 +*/
1.1224 + {
1.1225 + // on entry ecx=this, [esp+4]=address of aInt, return eax=this
1.1226 + asm("jmp %a0": : "i"(&__6TRealXRC6TInt64));
1.1227 + }
1.1228 +
1.1229 +
1.1230 +
1.1231 +
1.1232 +LOCAL_C __NAKED__ void ConvertTReal32ToTRealX(void)
1.1233 + {
1.1234 + // Convert TReal32 in edx to TRealX in ecx:edx,ebx
1.1235 + asm("xor ebx, ebx"); // mant low always zero
1.1236 + asm("mov eax, edx");
1.1237 + asm("shr eax, 23"); // exponent now in al, sign in ah bit 0
1.1238 + asm("test al, al"); // check for denormal/zero
1.1239 + asm("jz short treal32totrealx2"); // branch if denormal/zero
1.1240 + asm("xor ecx, ecx");
1.1241 + asm("mov cl, al");
1.1242 + asm("add ecx, 0x7F80"); // bias exponent correctly for TRealX
1.1243 + asm("cmp al, 0xFF"); // check for infinity/NaN
1.1244 + asm("jnz short treal32totrealx1"); // skip if neither
1.1245 + asm("mov cl, al"); // else set TRealX exponent to FFFF
1.1246 + asm("mov ch, al");
1.1247 + asm("treal32totrealx1:");
1.1248 + asm("shl edx, 8"); // left-justify mantissa in edx
1.1249 + asm("or edx, 0x80000000"); // put in implied integer bit
1.1250 + asm("shl ecx, 16"); // exponent into ecx bits 16-31
1.1251 + asm("mov cl, ah"); // sign into ecx bit 0
1.1252 + asm("ret");
1.1253 + asm("treal32totrealx2:"); // come here if exponent 0
1.1254 + asm("shl edx, 9"); // left-justify mantissa in edx [shift out integer bit as well]
1.1255 + asm("jnz short treal32totrealx3"); // jump if denormal
1.1256 + asm("xor ecx, ecx"); // else return 0
1.1257 + asm("mov cl, ah"); // with same sign as input value
1.1258 + asm("ret");
1.1259 + asm("treal32totrealx3:"); // come here if denormal
1.1260 + asm("bsr ecx, edx"); // ecx=bit number of MSB of edx
1.1261 + asm("neg ecx");
1.1262 + asm("add ecx, 31"); // ecx=number of left shifts to normalise edx
1.1263 + asm("shl edx, cl"); // normalise
1.1264 + asm("neg ecx");
1.1265 + asm("add ecx, 0x7F80"); // exponent=7F80-number of shifts
1.1266 + asm("shl ecx, 16"); // exponent into ecx bits 16-31
1.1267 + asm("mov cl, ah"); // sign into ecx bit 0
1.1268 + asm("ret");
1.1269 + }
1.1270 +
1.1271 +
1.1272 +
1.1273 +
1.1274 +LOCAL_C __NAKED__ void ConvertTReal64ToTRealX(void)
1.1275 + {
1.1276 + // Convert TReal64 in edx:ebx to TRealX in ecx:edx,ebx
1.1277 + asm("mov eax, edx");
1.1278 + asm("shr eax, 20");
1.1279 + asm("mov ecx, 0x7FF");
1.1280 + asm("and ecx, eax"); // ecx=exponent
1.1281 + asm("jz short treal64totrealx1"); // branch if zero/denormal
1.1282 + asm("add ecx, 0x7C00"); // else bias exponent correctly for TRealX
1.1283 + asm("cmp ecx, 0x83FF"); // check for infinity/NaN
1.1284 + asm("jnz short treal64totrealx2");
1.1285 + asm("mov ch, cl"); // if so, set exponent to FFFF
1.1286 + asm("treal64totrealx2:");
1.1287 + asm("shl ecx, 16"); // exponent into ecx bits 16-31
1.1288 + asm("mov cl, 11"); // number of shifts needed to justify mantissa correctly
1.1289 + asm("shld edx, ebx, cl"); // shift mantissa left
1.1290 + asm("shl ebx, cl");
1.1291 + asm("or edx, 0x80000000"); // put in implied integer bit
1.1292 + asm("shr eax, 11"); // sign bit into al bit 0
1.1293 + asm("mov cl, al"); // into ecx bit 0
1.1294 + asm("ret");
1.1295 + asm("treal64totrealx1:"); // come here if zero/denormal
1.1296 + asm("mov cl, 12"); // number of shifts needed to justify mantissa correctly
1.1297 + asm("shld edx, ebx, cl"); // shift mantissa left
1.1298 + asm("shl ebx, cl");
1.1299 + asm("test edx, edx"); // check for zero
1.1300 + asm("jnz short treal64totrealx3");
1.1301 + asm("test ebx, ebx");
1.1302 + asm("jnz short treal64totrealx4");
1.1303 + asm("shr eax, 11"); // sign bit into eax bit 0, rest of eax=0
1.1304 + asm("mov ecx, eax"); // return 0 result with correct sign
1.1305 + asm("ret");
1.1306 + asm("treal64totrealx4:"); // come here if denormal, edx=0
1.1307 + asm("mov edx, ebx"); // shift mantissa left 32
1.1308 + asm("xor ebx, ebx");
1.1309 + asm("bsr ecx, edx"); // ecx=bit number of MSB of edx
1.1310 + asm("neg ecx");
1.1311 + asm("add ecx, 31"); // ecx=number of left shifts to normalise edx
1.1312 + asm("shl edx, cl"); // normalise
1.1313 + asm("neg ecx");
1.1314 + asm("add ecx, 0x7BE0"); // exponent=7BE0-number of shifts
1.1315 + asm("shl ecx, 16"); // exponent into bits 16-31 of ecx
1.1316 + asm("shr eax, 11");
1.1317 + asm("mov cl, al"); // sign into bit 0 of ecx
1.1318 + asm("ret");
1.1319 + asm("treal64totrealx3:"); // come here if denormal, edx nonzero
1.1320 + asm("bsr ecx, edx"); // ecx=bit number of MSB of edx
1.1321 + asm("neg ecx");
1.1322 + asm("add ecx, 31"); // ecx=number of left shifts to normalise edx:ebx
1.1323 + asm("shld edx, ebx, cl"); // normalise
1.1324 + asm("shl ebx, cl");
1.1325 + asm("neg ecx");
1.1326 + asm("add ecx, 0x7C00"); // exponent=7C00-number of shifts
1.1327 + asm("shl ecx, 16"); // exponent into bits 16-31 of ecx
1.1328 + asm("shr eax, 11");
1.1329 + asm("mov cl, al"); // sign into bit 0 of ecx
1.1330 + asm("ret");
1.1331 + }
1.1332 +
1.1333 +
1.1334 +
1.1335 +
1.1336 +__NAKED__ EXPORT_C TInt TRealX::Set(TReal32 /*aReal*/)
1.1337 +/**
1.1338 +Gives this extended precision object a new value taken from
1.1339 +a single precision floating point number.
1.1340 +
1.1341 +@param aReal The single precision floating point value.
1.1342 +
1.1343 +@return KErrNone, if a valid number;
1.1344 +KErrOverflow, if the number is infinite;
1.1345 +KErrArgument, if not a number.
1.1346 +*/
1.1347 + {
1.1348 + // on entry, ecx=this and aReal is in [esp+4]
1.1349 + // on exit, error code in eax
1.1350 + THISCALL_PROLOG1()
1.1351 + asm("push ecx");
1.1352 + asm("push ebx"); // save ebx
1.1353 + asm("push ecx"); // save this
1.1354 + asm("mov edx, [esp+16]"); // aReal into edx
1.1355 + asm("call %a0": : "i"(&ConvertTReal32ToTRealX));
1.1356 + asm("pop eax"); // eax=this
1.1357 + asm("mov [eax], ebx"); // store result
1.1358 + asm("mov [eax+4], edx");
1.1359 + asm("mov [eax+8], ecx");
1.1360 + asm("xor eax, eax"); // error code=KErrNone initially
1.1361 + asm("cmp ecx, 0xFFFF0000"); // check for infinity/NaN
1.1362 + asm("jb short trealxsettreal32a"); // if neither, return KErrNone
1.1363 + asm("mov eax, -9"); // eax=KErrOverflow
1.1364 + asm("cmp edx, 0x80000000"); // check for infinity
1.1365 + asm("je short trealxsettreal32a"); // if infinity, return KErrOverflow
1.1366 + asm("mov eax, -6"); // if NaN, return KErrArgument
1.1367 + asm("trealxsettreal32a:");
1.1368 + asm("pop ebx");
1.1369 + asm("pop ecx");
1.1370 + THISCALL_EPILOG1()
1.1371 + }
1.1372 +
1.1373 +
1.1374 +
1.1375 +
1.1376 +__NAKED__ EXPORT_C TInt TRealX::Set(TReal64 /*aReal*/)
1.1377 +/**
1.1378 +Gives this extended precision object a new value taken from
1.1379 +a double precision floating point number.
1.1380 +
1.1381 +@param aReal The double precision floating point value.
1.1382 +
1.1383 +@return KErrNone, if a valid number;
1.1384 +KErrOverflow, if the number is infinite;
1.1385 +KErrArgument, if not a number.
1.1386 +*/
1.1387 + {
1.1388 + // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high)
1.1389 + // on exit, error code in eax
1.1390 + THISCALL_PROLOG2()
1.1391 + asm("push ecx");
1.1392 + asm("push ebx"); // save ebx
1.1393 + asm("push ecx"); // save this
1.1394 + asm("mov ebx, [esp+16]"); // aReal into edx:ebx
1.1395 + asm("mov edx, [esp+20]");
1.1396 + asm("call %a0": : "i"(&ConvertTReal64ToTRealX));
1.1397 + asm("pop eax"); // eax=this
1.1398 + asm("mov [eax], ebx"); // store result
1.1399 + asm("mov [eax+4], edx");
1.1400 + asm("mov [eax+8], ecx");
1.1401 + asm("xor eax, eax"); // error code=KErrNone initially
1.1402 + asm("cmp ecx, 0xFFFF0000"); // check for infinity/NaN
1.1403 + asm("jb short trealxsettreal64a"); // if neither, return KErrNone
1.1404 + asm("mov eax, -9"); // eax=KErrOverflow
1.1405 + asm("cmp edx, 0x80000000"); // check for infinity
1.1406 + asm("jne short trealxsettreal64b"); // branch if NaN
1.1407 + asm("test ebx, ebx");
1.1408 + asm("je short trealxsettreal64a"); // if infinity, return KErrOverflow
1.1409 + asm("trealxsettreal64b:");
1.1410 + asm("mov eax, -6"); // if NaN, return KErrArgument
1.1411 + asm("trealxsettreal64a:");
1.1412 + asm("pop ebx");
1.1413 + asm("pop ecx");
1.1414 + THISCALL_EPILOG2()
1.1415 + }
1.1416 +
1.1417 +
1.1418 +
1.1419 +
1.1420 +LOCAL_C __NAKED__ void __6TRealXf()
1.1421 + {
1.1422 + // common function for float to TRealX
1.1423 + THISCALL_PROLOG1()
1.1424 + asm("push ebx"); // save ebx
1.1425 + asm("push ecx"); // save this
1.1426 + asm("mov edx, [esp+12]"); // aReal into edx
1.1427 + asm("call %a0": : "i"(&ConvertTReal32ToTRealX));
1.1428 + asm("pop eax"); // eax=this
1.1429 + asm("mov [eax], ebx"); // store result
1.1430 + asm("mov [eax+4], edx");
1.1431 + asm("mov [eax+8], ecx");
1.1432 + asm("pop ebx");
1.1433 + asm("mov ecx,eax");
1.1434 + THISCALL_EPILOG1()
1.1435 + }
1.1436 +
1.1437 +
1.1438 +
1.1439 +
1.1440 +__NAKED__ EXPORT_C TRealX::TRealX(TReal32 /*aReal*/)
1.1441 +/**
1.1442 +Constructs an extended precision object from
1.1443 +a single precision floating point number.
1.1444 +
1.1445 +@param aReal The single precision floating point value.
1.1446 +*/
1.1447 + {
1.1448 + // on entry, ecx=this and aReal is in [esp+4]
1.1449 + // on exit, eax=this
1.1450 + asm("jmp %a0": : "i"(&__6TRealXf));
1.1451 + }
1.1452 +
1.1453 +
1.1454 +
1.1455 +
1.1456 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal32 /*aReal*/)
1.1457 +/**
1.1458 +Assigns the specified single precision floating point number to
1.1459 +this extended precision object.
1.1460 +
1.1461 +@param aReal The single precision floating point value.
1.1462 +
1.1463 +@return A reference to this extended precision object.
1.1464 +*/
1.1465 + {
1.1466 + // on entry, ecx=this and aReal is in [esp+4]
1.1467 + // on exit, eax=this
1.1468 + asm("jmp %a0": : "i"(&__6TRealXf));
1.1469 + }
1.1470 +
1.1471 +
1.1472 +
1.1473 +
1.1474 +LOCAL_C __NAKED__ void __6TRealXd()
1.1475 + {
1.1476 + // common function for double to TRealX
1.1477 + THISCALL_PROLOG2()
1.1478 + asm("push ebx"); // save ebx
1.1479 + asm("push ecx"); // save this
1.1480 + asm("mov ebx, [esp+12]"); // aReal into edx:ebx
1.1481 + asm("mov edx, [esp+16]");
1.1482 + asm("call %a0": : "i"(&ConvertTReal64ToTRealX));
1.1483 + asm("pop eax"); // eax=this
1.1484 + asm("mov [eax], ebx"); // store result
1.1485 + asm("mov [eax+4], edx");
1.1486 + asm("mov [eax+8], ecx");
1.1487 + asm("pop ebx");
1.1488 + asm("mov ecx,eax");
1.1489 + THISCALL_EPILOG2()
1.1490 + }
1.1491 +
1.1492 +
1.1493 +
1.1494 +
1.1495 +__NAKED__ EXPORT_C TRealX::TRealX(TReal64 /*aReal*/)
1.1496 +/**
1.1497 +Constructs an extended precision object from
1.1498 +a double precision floating point number.
1.1499 +
1.1500 +@param aReal The double precision floating point value.
1.1501 +*/
1.1502 + {
1.1503 + // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high)
1.1504 + // on exit, eax=this
1.1505 + asm("jmp %a0": : "i"(&__6TRealXd));
1.1506 + }
1.1507 +
1.1508 +
1.1509 +
1.1510 +
1.1511 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal64 /*aReal*/)
1.1512 +/**
1.1513 +Assigns the specified double precision floating point number to
1.1514 +this extended precision object.
1.1515 +
1.1516 +@param aReal The double precision floating point value.
1.1517 +
1.1518 +@return A reference to this extended precision object.
1.1519 +*/
1.1520 + {
1.1521 + // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high)
1.1522 + // on exit, eax=this
1.1523 + asm("jmp %a0": : "i"(&__6TRealXd));
1.1524 + }
1.1525 +
1.1526 +
1.1527 +
1.1528 +
1.1529 +__NAKED__ EXPORT_C TRealX::operator TInt() const
1.1530 +/**
1.1531 +Gets the extended precision value as a signed integer value.
1.1532 +
1.1533 +The operator asm("returns:");
1.1534 +
1.1535 +1. zero , if the extended precision value is not a number
1.1536 +
1.1537 +2. 0x7FFFFFFF, if the value is positive and too big to fit into a TInt.
1.1538 +
1.1539 +3. 0x80000000, if the value is negative and too big to fit into a TInt.
1.1540 +*/
1.1541 + {
1.1542 + // on entry ecx=this, return value in eax
1.1543 + THISCALL_PROLOG0()
1.1544 + asm("push ecx");
1.1545 + asm("mov edx, [ecx]"); // edx=mantissa low
1.1546 + asm("mov eax, [ecx+4]"); // eax=mantissa high
1.1547 + asm("mov ecx, [ecx+8]"); // ecx=exponent/sign
1.1548 + asm("ror ecx, 16"); // exponent into cx
1.1549 + asm("cmp cx, 0xFFFF");
1.1550 + asm("jz short trealxtoint1"); // branch if exp=FFFF
1.1551 + asm("mov dx, cx");
1.1552 + asm("mov cx, 0x801E");
1.1553 + asm("sub cx, dx"); // cx=number of right shifts needed to convert mantissa to int
1.1554 + asm("jbe short trealxtoint2"); // if exp>=801E, saturate result
1.1555 + asm("cmp cx, 31"); // more than 31 shifts needed?
1.1556 + asm("ja short trealxtoint0"); // if so, underflow to zero
1.1557 + asm("shr eax, cl"); // else ABS[result]=eax>>cl
1.1558 + asm("test ecx, 0x10000"); // test sign
1.1559 + asm("jz short trealxtoint3"); // skip if +
1.1560 + asm("neg eax");
1.1561 + asm("trealxtoint3:");
1.1562 + asm("pop ecx");
1.1563 + THISCALL_EPILOG0()
1.1564 + asm("trealxtoint1:"); // come here if exponent=FFFF
1.1565 + asm("cmp eax, 0x80000000"); // check for infinity
1.1566 + asm("jnz short trealxtoint0"); // if NaN, return 0
1.1567 + asm("test edx, edx");
1.1568 + asm("jnz short trealxtoint0"); // if NaN, return 0
1.1569 + asm("trealxtoint2:"); // come here if argument too big for 32-bit integer
1.1570 + asm("mov eax, 0x7FFFFFFF");
1.1571 + asm("shr ecx, 17"); // sign bit into carry flag
1.1572 + asm("adc eax, 0"); // eax=7FFFFFFF if +, 80000000 if -
1.1573 + asm("pop ecx");
1.1574 + THISCALL_EPILOG0() // return saturated value
1.1575 + asm("trealxtoint0:"); // come here if INT{argument}=0 or NaN
1.1576 + asm("xor eax, eax"); // return 0
1.1577 + asm("pop ecx");
1.1578 + THISCALL_EPILOG0()
1.1579 + }
1.1580 +
1.1581 +
1.1582 +
1.1583 +
1.1584 +__NAKED__ EXPORT_C TRealX::operator TUint() const
1.1585 +/**
1.1586 +Returns the extended precision value as an unsigned signed integer value.
1.1587 +
1.1588 +The operator asm("returns:");
1.1589 +
1.1590 +1. zero, if the extended precision value is not a number
1.1591 +
1.1592 +2. 0xFFFFFFFF, if the value is positive and too big to fit into a TUint.
1.1593 +
1.1594 +3. zero, if the value is negative and too big to fit into a TUint.
1.1595 +*/
1.1596 + {
1.1597 + // on entry ecx=this, return value in eax
1.1598 + THISCALL_PROLOG0()
1.1599 + asm("push ecx");
1.1600 + asm("mov edx, [ecx]"); // edx=mantissa low
1.1601 + asm("mov eax, [ecx+4]"); // eax=mantissa high
1.1602 + asm("mov ecx, [ecx+8]"); // ecx=exponent/sign
1.1603 + asm("ror ecx, 16"); // exponent into cx
1.1604 + asm("cmp cx, 0xFFFF");
1.1605 + asm("jz short trealxtouint1"); // branch if exp=FFFF
1.1606 + asm("mov dx, cx");
1.1607 + asm("mov cx, 0x801E");
1.1608 + asm("sub cx, dx"); // cx=number of right shifts needed to convert mantissa to int
1.1609 + asm("jb short trealxtouint2"); // if exp>801E, saturate result
1.1610 + asm("cmp cx, 31"); // more than 31 shifts needed?
1.1611 + asm("ja short trealxtouint0"); // if so, underflow to zero
1.1612 + asm("test ecx, 0x10000"); // test sign
1.1613 + asm("jnz short trealxtouint0"); // if -, return 0
1.1614 + asm("shr eax, cl"); // else result=eax>>cl
1.1615 + asm("pop ecx");
1.1616 + THISCALL_EPILOG0()
1.1617 + asm("trealxtouint1:"); // come here if exponent=FFFF
1.1618 + asm("cmp eax, 0x80000000"); // check for infinity
1.1619 + asm("jnz short trealxtouint0"); // if NaN, return 0
1.1620 + asm("test edx, edx");
1.1621 + asm("jnz short trealxtouint0"); // if NaN, return 0
1.1622 + asm("trealxtouint2:"); // come here if argument too big for 32-bit integer
1.1623 + asm("mov eax, 0xFFFFFFFF");
1.1624 + asm("shr ecx, 17"); // sign bit into carry flag
1.1625 + asm("adc eax, 0"); // eax=FFFFFFFF if +, 0 if -
1.1626 + asm("pop ecx");
1.1627 + THISCALL_EPILOG0() // return saturated value
1.1628 + asm("trealxtouint0:"); // come here if INT{argument}=0 or NaN
1.1629 + asm("xor eax, eax"); // return 0
1.1630 + asm("pop ecx");
1.1631 + THISCALL_EPILOG0()
1.1632 + }
1.1633 +
1.1634 +
1.1635 +
1.1636 +
1.1637 +LOCAL_C __NAKED__ void ConvertTRealXToTInt64(void)
1.1638 + {
1.1639 + // Convert TRealX in ecx,edx:ebx to TInt64 in edx:ebx
1.1640 + asm("ror ecx, 16"); // exponent into cx
1.1641 + asm("cmp cx, 0xFFFF");
1.1642 + asm("jz short trealxtoint64a"); // branch if exp=FFFF
1.1643 + asm("mov ax, cx");
1.1644 + asm("mov cx, 0x803E");
1.1645 + asm("sub cx, ax"); // cx=number of right shifts needed to convert mantissa to int
1.1646 + asm("jbe short trealxtoint64b"); // if exp>=803E, saturate result
1.1647 + asm("cmp cx, 63"); // more than 63 shifts needed?
1.1648 + asm("ja short trealxtoint64z"); // if so, underflow to zero
1.1649 + asm("cmp cl, 31"); // more than 31 shifts needed?
1.1650 + asm("jbe short trealxtoint64d"); // branch if not
1.1651 + asm("sub cl, 32"); // cl=shift count - 32
1.1652 + asm("mov ebx, edx"); // shift right by 32
1.1653 + asm("xor edx, edx");
1.1654 + asm("trealxtoint64d:");
1.1655 + asm("shrd ebx, edx, cl"); // shift edx:ebx right by cl to give ABS{result}
1.1656 + asm("shr edx, cl");
1.1657 + asm("test ecx, 0x10000"); // test sign
1.1658 + asm("jz short trealxtoint64c"); // skip if +
1.1659 + asm("neg edx"); // if -, negate
1.1660 + asm("neg ebx");
1.1661 + asm("sbb edx, 0");
1.1662 + asm("trealxtoint64c:");
1.1663 + asm("ret");
1.1664 + asm("trealxtoint64a:"); // come here if exponent=FFFF
1.1665 + asm("cmp edx, 0x80000000"); // check for infinity
1.1666 + asm("jnz short trealxtoint64z"); // if NaN, return 0
1.1667 + asm("test ebx, ebx");
1.1668 + asm("jnz short trealxtoint64z"); // if NaN, return 0
1.1669 + asm("trealxtoint64b:"); // come here if argument too big for 32-bit integer
1.1670 + asm("mov edx, 0x7FFFFFFF");
1.1671 + asm("mov ebx, 0xFFFFFFFF");
1.1672 + asm("shr ecx, 17"); // sign bit into carry flag
1.1673 + asm("adc ebx, 0"); // edx:ebx=7FFFFFFF FFFFFFFF if +,
1.1674 + asm("adc edx, 0"); // or 80000000 00000000 if -
1.1675 + asm("ret"); // return saturated value
1.1676 + asm("trealxtoint64z:"); // come here if INT{argument}=0 or NaN
1.1677 + asm("xor edx, edx"); // return 0
1.1678 + asm("xor ebx, ebx");
1.1679 + asm("ret");
1.1680 + }
1.1681 +
1.1682 +
1.1683 +
1.1684 +
1.1685 +/**
1.1686 +Returns the extended precision value as a 64 bit integer value.
1.1687 +
1.1688 +The operator asm("returns:");
1.1689 +
1.1690 +1. zero, if the extended precision value is not a number
1.1691 +
1.1692 +2. 0x7FFFFFFF FFFFFFFF, if the value is positive and too big to fit
1.1693 +into a TInt64
1.1694 +
1.1695 +3. 0x80000000 00000000, if the value is negative and too big to fit
1.1696 +into a TInt.
1.1697 +*/
1.1698 +__NAKED__ EXPORT_C TRealX::operator TInt64() const
1.1699 + {
1.1700 + // on entry, ecx=this, return value in edx:eax
1.1701 + THISCALL_PROLOG0()
1.1702 + asm("push ecx");
1.1703 + asm("push ebx");
1.1704 + asm("mov ebx, [ecx]"); // get TRealX value into ecx,edx:ebx
1.1705 + asm("mov edx, [ecx+4]");
1.1706 + asm("mov ecx, [ecx+8]");
1.1707 + asm("call %a0": : "i"(&ConvertTRealXToTInt64));
1.1708 + asm("mov eax, ebx"); // result low into eax
1.1709 + asm("pop ebx");
1.1710 + asm("pop ecx");
1.1711 + THISCALL_EPILOG0()
1.1712 + }
1.1713 +
1.1714 +
1.1715 +
1.1716 +
1.1717 +LOCAL_C __NAKED__ void TRealXGetTReal32(void)
1.1718 + {
1.1719 + // Convert TRealX in ecx,edx:ebx to TReal32 in edx
1.1720 + // Return error code in eax
1.1721 + asm("cmp ecx, 0xFFFF0000"); // check for infinity/NaN
1.1722 + asm("jnc short trealxgettreal32a");
1.1723 + asm("xor eax, eax");
1.1724 + asm("ror ecx, 16"); // exponent into cx
1.1725 + asm("sub cx, 0x7F80"); // cx=result exponent if normalised
1.1726 + asm("jbe short trealxgettreal32b"); // jump if denormal, zero or underflow
1.1727 + asm("cmp cx, 0xFF"); // check if overflow
1.1728 + asm("jb short trealxgettreal32c"); // jump if not
1.1729 + asm("trealxgettreal32d:"); // come here if overflow
1.1730 + asm("xor edx, edx"); // set mantissa=0 to generate infinity
1.1731 + asm("ror ecx, 16"); // ecx back to normal format
1.1732 + asm("trealxgettreal32a:"); // come here if infinity or NaN
1.1733 + asm("shr edx, 7");
1.1734 + asm("or edx, 0xFF000000"); // set exponent to FF
1.1735 + asm("shr ecx, 1"); // sign bit -> carry
1.1736 + asm("rcr edx, 1"); // sign bit -> MSB of result
1.1737 + asm("mov eax, edx");
1.1738 + asm("shl eax, 9"); // test for infinity or NaN
1.1739 + asm("mov eax, -9"); // eax=KErrOverflow
1.1740 + asm("jz short trealxgettreal32e");
1.1741 + asm("mov eax, -6"); // if NaN, eax=KErrArgument
1.1742 + asm("trealxgettreal32e:");
1.1743 + asm("ret");
1.1744 + asm("trealxgettreal32b:"); // come here if exponent<=7F80
1.1745 + asm("cmp cx, -24"); // check for zero or total underflow
1.1746 + asm("jle short trealxgettreal32z");
1.1747 + asm("neg cl");
1.1748 + asm("inc cl"); // cl=number of right shifts to form denormal mantissa
1.1749 + asm("shrd eax, ebx, cl"); // shift mantissa right into eax
1.1750 + asm("shrd ebx, edx, cl");
1.1751 + asm("shr edx, cl");
1.1752 + asm("or edx, 0x80000000"); // set top bit to ensure correct rounding up
1.1753 + asm("xor cl, cl"); // cl=result exponent=0
1.1754 + asm("trealxgettreal32c:"); // come here if result normalised
1.1755 + asm("cmp dl, 0x80"); // check rounding bits
1.1756 + asm("ja short trealxgettreal32f"); // branch to round up
1.1757 + asm("jb short trealxgettreal32g"); // branch to round down
1.1758 + asm("test ebx, ebx");
1.1759 + asm("jnz short trealxgettreal32f"); // branch to round up
1.1760 + asm("test eax, eax");
1.1761 + asm("jnz short trealxgettreal32f"); // branch to round up
1.1762 + asm("test ecx, 0x01000000"); // check rounded-down flag
1.1763 + asm("jnz short trealxgettreal32f"); // branch to round up
1.1764 + asm("test ecx, 0x02000000"); // check rounded-up flag
1.1765 + asm("jnz short trealxgettreal32g"); // branch to round down
1.1766 + asm("test dh, 1"); // else round to even
1.1767 + asm("jz short trealxgettreal32g"); // branch to round down if LSB=0
1.1768 + asm("trealxgettreal32f:"); // come here to round up
1.1769 + asm("add edx, 0x100"); // increment mantissa
1.1770 + asm("jnc short trealxgettreal32g");
1.1771 + asm("rcr edx, 1");
1.1772 + asm("inc cl"); // if carry, increment exponent
1.1773 + asm("cmp cl, 0xFF"); // and check for overflow
1.1774 + asm("jz short trealxgettreal32d"); // branch out if overflow
1.1775 + asm("trealxgettreal32g:"); // come here to round down
1.1776 + asm("xor dl, dl");
1.1777 + asm("add edx, edx"); // shift out integer bit
1.1778 + asm("mov dl, cl");
1.1779 + asm("ror edx, 8"); // exponent->edx bits 24-31, mantissa in 23-1
1.1780 + asm("test edx, edx"); // check if underflow
1.1781 + asm("jz short trealxgettreal32h"); // branch out if underflow
1.1782 + asm("shr ecx, 17"); // sign bit->carry
1.1783 + asm("rcr edx, 1"); // ->edx bit 31, exp->edx bits 23-30, mant->edx bits 22-0
1.1784 + asm("xor eax, eax"); // return KErrNone
1.1785 + asm("ret");
1.1786 + asm("trealxgettreal32z:"); // come here if zero or underflow
1.1787 + asm("xor eax, eax");
1.1788 + asm("cmp cx, 0x8080"); // check for zero
1.1789 + asm("jz short trealxgettreal32y"); // if zero, return KErrNone
1.1790 + asm("trealxgettreal32h:"); // come here if underflow after rounding
1.1791 + asm("mov eax, -10"); // eax=KErrUnderflow
1.1792 + asm("trealxgettreal32y:");
1.1793 + asm("xor edx, edx");
1.1794 + asm("shr ecx, 17");
1.1795 + asm("rcr edx, 1"); // sign bit into edx bit 31, rest of edx=0
1.1796 + asm("ret");
1.1797 + }
1.1798 +
1.1799 +
1.1800 +
1.1801 +
1.1802 +LOCAL_C __NAKED__ void TRealXGetTReal64(void)
1.1803 + {
1.1804 + // Convert TRealX in ecx,edx:ebx to TReal64 in edx:ebx
1.1805 + // Return error code in eax
1.1806 + // edi, esi also modified
1.1807 + asm("ror ecx, 16"); // exponent into cx
1.1808 + asm("cmp cx, 0xFFFF"); // check for infinity/NaN
1.1809 + asm("jnc short trealxgettreal64a");
1.1810 + asm("xor eax, eax");
1.1811 + asm("xor edi, edi");
1.1812 + asm("sub cx, 0x7C00"); // cx=result exponent if normalised
1.1813 + asm("jbe short trealxgettreal64b"); // jump if denormal, zero or underflow
1.1814 + asm("cmp cx, 0x07FF"); // check if overflow
1.1815 + asm("jb short trealxgettreal64c"); // jump if not
1.1816 + asm("trealxgettreal64d:"); // come here if overflow
1.1817 + asm("xor edx, edx"); // set mantissa=0 to generate infinity
1.1818 + asm("xor ebx, ebx");
1.1819 + asm("trealxgettreal64a:"); // come here if infinity or NaN
1.1820 + asm("mov cl, 10");
1.1821 + asm("shrd ebx, edx, cl");
1.1822 + asm("shr edx, cl");
1.1823 + asm("or edx, 0xFFE00000"); // set exponent to 7FF
1.1824 + asm("shr ecx, 17"); // sign bit -> carry
1.1825 + asm("rcr edx, 1"); // sign bit -> MSB of result
1.1826 + asm("rcr ebx, 1");
1.1827 + asm("mov eax, edx");
1.1828 + asm("shl eax, 12"); // test for infinity or NaN
1.1829 + asm("mov eax, -9"); // eax=KErrOverflow
1.1830 + asm("jnz short trealxgettreal64n");
1.1831 + asm("test ebx, ebx");
1.1832 + asm("jz short trealxgettreal64e");
1.1833 + asm("trealxgettreal64n:");
1.1834 + asm("mov eax, -6"); // if NaN, eax=KErrArgument
1.1835 + asm("trealxgettreal64e:");
1.1836 + asm("ret");
1.1837 + asm("trealxgettreal64b:"); // come here if exponent<=7C00
1.1838 + asm("cmp cx, -53"); // check for zero or total underflow
1.1839 + asm("jle short trealxgettreal64z");
1.1840 + asm("neg cl");
1.1841 + asm("inc cl"); // cl=number of right shifts to form denormal mantissa
1.1842 + asm("cmp cl, 32");
1.1843 + asm("jb trealxgettreal64x");
1.1844 + asm("mov eax, ebx"); // if >=32 shifts, do 32 shifts and decrement count by 32
1.1845 + asm("mov ebx, edx");
1.1846 + asm("xor edx, edx");
1.1847 + asm("trealxgettreal64x:");
1.1848 + asm("shrd edi, eax, cl");
1.1849 + asm("shrd eax, ebx, cl"); // shift mantissa right into eax
1.1850 + asm("shrd ebx, edx, cl");
1.1851 + asm("shr edx, cl");
1.1852 + asm("or edx, 0x80000000"); // set top bit to ensure correct rounding up
1.1853 + asm("xor cx, cx"); // cx=result exponent=0
1.1854 + asm("trealxgettreal64c:"); // come here if result normalised
1.1855 + asm("mov esi, ebx");
1.1856 + asm("and esi, 0x7FF"); // esi=rounding bits
1.1857 + asm("cmp esi, 0x400"); // check rounding bits
1.1858 + asm("ja short trealxgettreal64f"); // branch to round up
1.1859 + asm("jb short trealxgettreal64g"); // branch to round down
1.1860 + asm("test eax, eax");
1.1861 + asm("jnz short trealxgettreal64f"); // branch to round up
1.1862 + asm("test edi, edi");
1.1863 + asm("jnz short trealxgettreal64f"); // branch to round up
1.1864 + asm("test ecx, 0x01000000"); // check rounded-down flag
1.1865 + asm("jnz short trealxgettreal64f"); // branch to round up
1.1866 + asm("test ecx, 0x02000000"); // check rounded-up flag
1.1867 + asm("jnz short trealxgettreal64g"); // branch to round down
1.1868 + asm("test ebx, 0x800"); // else round to even
1.1869 + asm("jz short trealxgettreal64g"); // branch to round down if LSB=0
1.1870 + asm("trealxgettreal64f:"); // come here to round up
1.1871 + asm("add ebx, 0x800"); // increment mantissa
1.1872 + asm("adc edx, 0");
1.1873 + asm("jnc short trealxgettreal64g");
1.1874 + asm("rcr edx, 1");
1.1875 + asm("inc cx"); // if carry, increment exponent
1.1876 + asm("cmp cx, 0x7FF"); // and check for overflow
1.1877 + asm("jz short trealxgettreal64d"); // branch out if overflow
1.1878 + asm("trealxgettreal64g:"); // come here to round down
1.1879 + asm("xor bl, bl"); // clear rounding bits
1.1880 + asm("and bh, 0xF8");
1.1881 + asm("mov di, cx"); // save exponent
1.1882 + asm("mov cl, 10");
1.1883 + asm("and edx, 0x7FFFFFFF"); // clear integer bit
1.1884 + asm("shrd ebx, edx, cl"); // shift mantissa right by 10
1.1885 + asm("shr edx, cl");
1.1886 + asm("shl edi, 21"); // exponent into edi bits 21-31
1.1887 + asm("or edx, edi"); // into edx bits 21-31
1.1888 + asm("test edx, edx"); // check if underflow
1.1889 + asm("jnz short trealxgettreal64i");
1.1890 + asm("test ebx, ebx");
1.1891 + asm("jz short trealxgettreal64h"); // branch out if underflow
1.1892 + asm("trealxgettreal64i:");
1.1893 + asm("shr ecx, 17"); // sign bit->carry
1.1894 + asm("rcr edx, 1"); // ->edx bit 31, exp->edx bits 20-30, mant->edx bits 20-0
1.1895 + asm("rcr ebx, 1");
1.1896 + asm("xor eax, eax"); // return KErrNone
1.1897 + asm("ret");
1.1898 + asm("trealxgettreal64z:"); // come here if zero or underflow
1.1899 + asm("xor eax, eax");
1.1900 + asm("cmp cx, 0x8400"); // check for zero
1.1901 + asm("jz short trealxgettreal64y"); // if zero, return KErrNone
1.1902 + asm("trealxgettreal64h:"); // come here if underflow after rounding
1.1903 + asm("mov eax, -10"); // eax=KErrUnderflow
1.1904 + asm("trealxgettreal64y:");
1.1905 + asm("xor edx, edx");
1.1906 + asm("xor ebx, ebx");
1.1907 + asm("shr ecx, 17");
1.1908 + asm("rcr edx, 1"); // sign bit into edx bit 31, rest of edx=0, ebx=0
1.1909 + asm("ret");
1.1910 + }
1.1911 +
1.1912 +
1.1913 +
1.1914 +
1.1915 +__NAKED__ EXPORT_C TRealX::operator TReal32() const
1.1916 +/**
1.1917 +Returns the extended precision value as
1.1918 +a single precision floating point value.
1.1919 +*/
1.1920 + {
1.1921 + // On entry, ecx=this
1.1922 + // On exit, TReal32 value on top of FPU stack
1.1923 + THISCALL_PROLOG0()
1.1924 + asm("push ecx");
1.1925 + asm("push ebx");
1.1926 + asm("mov ebx, [ecx]"); // *this into ecx,edx:ebx
1.1927 + asm("mov edx, [ecx+4]");
1.1928 + asm("mov ecx, [ecx+8]");
1.1929 + asm("call %a0": : "i"(&TRealXGetTReal32)); // Convert to TReal32 in edx
1.1930 + asm("push edx"); // push TReal32 onto stack
1.1931 + asm("fld dword ptr [esp]"); // push TReal32 onto FPU stack
1.1932 + asm("pop edx");
1.1933 + asm("pop ebx");
1.1934 + asm("pop ecx");
1.1935 + THISCALL_EPILOG0()
1.1936 + }
1.1937 +
1.1938 +
1.1939 +
1.1940 +
1.1941 +__NAKED__ EXPORT_C TRealX::operator TReal64() const
1.1942 +/**
1.1943 +Returns the extended precision value as
1.1944 +a double precision floating point value.
1.1945 +*/
1.1946 + {
1.1947 + // On entry, ecx=this
1.1948 + // On exit, TReal64 value on top of FPU stack
1.1949 + THISCALL_PROLOG0()
1.1950 + asm("push ecx");
1.1951 + asm("push ebx");
1.1952 + asm("push esi");
1.1953 + asm("push edi");
1.1954 + asm("mov ebx, [ecx]"); // *this into ecx,edx:ebx
1.1955 + asm("mov edx, [ecx+4]");
1.1956 + asm("mov ecx, [ecx+8]");
1.1957 + asm("call %a0": : "i"(&TRealXGetTReal64)); // Convert to TReal32 in edx:ebx
1.1958 + asm("push edx"); // push TReal64 onto stack
1.1959 + asm("push ebx");
1.1960 + asm("fld qword ptr [esp]"); // push TReal64 onto FPU stack
1.1961 + asm("add esp, 8");
1.1962 + asm("pop edi");
1.1963 + asm("pop esi");
1.1964 + asm("pop ebx");
1.1965 + asm("pop ecx");
1.1966 + THISCALL_EPILOG0()
1.1967 + }
1.1968 +
1.1969 +
1.1970 +
1.1971 +
1.1972 +__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal32& /*aVal*/) const
1.1973 +/**
1.1974 +Extracts the extended precision value as
1.1975 +a single precision floating point value.
1.1976 +
1.1977 +@param aVal A reference to a single precision object which contains
1.1978 +the result of the operation.
1.1979 +
1.1980 +@return KErrNone, if the operation is successful;
1.1981 +KErrOverflow, if the operation results in overflow;
1.1982 +KErrUnderflow, if the operation results in underflow.
1.1983 +*/
1.1984 + {
1.1985 + // On entry, ecx=this, [esp+4]=address of aVal
1.1986 + // On exit, eax=return code
1.1987 + THISCALL_PROLOG1()
1.1988 + asm("push ecx");
1.1989 + asm("push ebx");
1.1990 + asm("mov ebx, [ecx]"); // *this into ecx,edx:ebx
1.1991 + asm("mov edx, [ecx+4]");
1.1992 + asm("mov ecx, [ecx+8]");
1.1993 + asm("call %a0": : "i"(&TRealXGetTReal32));
1.1994 + asm("mov ecx, [esp+12]"); // ecx=address of aVal
1.1995 + asm("mov [ecx], edx"); // store result
1.1996 + asm("pop ebx");
1.1997 + asm("pop ecx");
1.1998 + THISCALL_EPILOG1() // return with error code in eax
1.1999 + }
1.2000 +
1.2001 +
1.2002 +
1.2003 +
1.2004 +__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal64& /*aVal*/) const
1.2005 +/**
1.2006 +Extracts the extended precision value as
1.2007 +a double precision floating point value.
1.2008 +
1.2009 +@param aVal A reference to a double precision object which
1.2010 +contains the result of the operation.
1.2011 +
1.2012 +@return KErrNone, if the operation is successful;
1.2013 +KErrOverflow, if the operation results in overflow;
1.2014 +KErrUnderflow, if the operation results in underflow.
1.2015 +*/
1.2016 + {
1.2017 + // On entry, ecx=this, [esp+4]=address of aVal
1.2018 + // On exit, eax=return code
1.2019 + THISCALL_PROLOG1()
1.2020 + asm("push ecx");
1.2021 + asm("push ebx");
1.2022 + asm("push esi");
1.2023 + asm("push edi");
1.2024 + asm("mov ebx, [ecx]"); // *this into ecx,edx:ebx
1.2025 + asm("mov edx, [ecx+4]");
1.2026 + asm("mov ecx, [ecx+8]");
1.2027 + asm("call %a0": : "i"(&TRealXGetTReal64));
1.2028 + asm("mov ecx, [esp+20]"); // ecx=address of aVal
1.2029 + asm("mov [ecx], ebx"); // store result
1.2030 + asm("mov [ecx+4], edx");
1.2031 + asm("pop edi");
1.2032 + asm("pop esi");
1.2033 + asm("pop ebx");
1.2034 + asm("pop ecx");
1.2035 + THISCALL_EPILOG1() // return with error code in eax
1.2036 + }
1.2037 +
1.2038 +
1.2039 +
1.2040 +
1.2041 +__NAKED__ EXPORT_C void TRealX::SetZero(TBool /*aNegative*/)
1.2042 +/**
1.2043 +Sets the value of this extended precision object to zero.
1.2044 +
1.2045 +@param aNegative ETrue, the value is a negative zero;
1.2046 +EFalse, the value is a positive zero, this is the default.
1.2047 +*/
1.2048 + {
1.2049 + THISCALL_PROLOG1()
1.2050 + asm("mov edx, [esp+4]"); // aNegative into edx
1.2051 + asm("xor eax, eax"); // eax=0
1.2052 + asm("mov [ecx], eax");
1.2053 + asm("mov [ecx+4], eax");
1.2054 + asm("test edx, edx");
1.2055 + asm("jz short setzero1");
1.2056 + asm("inc eax"); // eax=1 if aNegative!=0
1.2057 + asm("setzero1:");
1.2058 + asm("mov [ecx+8], eax"); // generate positive or negative zero
1.2059 + THISCALL_EPILOG1()
1.2060 + }
1.2061 +
1.2062 +
1.2063 +
1.2064 +
1.2065 +__NAKED__ EXPORT_C void TRealX::SetNaN()
1.2066 +/**
1.2067 +Sets the value of this extended precision object to 'not a number'.
1.2068 +*/
1.2069 + {
1.2070 + THISCALL_PROLOG0()
1.2071 + asm("xor eax, eax"); // set *this to 'real indefinite'
1.2072 + asm("mov [ecx], eax");
1.2073 + asm("mov eax, 0xC0000000");
1.2074 + asm("mov [ecx+4], eax");
1.2075 + asm("mov eax, 0xFFFF0001");
1.2076 + asm("mov [ecx+8], eax");
1.2077 + THISCALL_EPILOG0()
1.2078 + }
1.2079 +
1.2080 +
1.2081 +
1.2082 +
1.2083 +__NAKED__ EXPORT_C void TRealX::SetInfinite(TBool /*aNegative*/)
1.2084 +/**
1.2085 +Sets the value of this extended precision object to infinity.
1.2086 +
1.2087 +@param aNegative ETrue, the value is a negative zero;
1.2088 +EFalse, the value is a positive zero.
1.2089 +*/
1.2090 + {
1.2091 + THISCALL_PROLOG1()
1.2092 + asm("mov edx, [esp+4]"); // aNegative into edx
1.2093 + asm("mov eax, 0xFFFF0000"); // exponent=FFFF, sign=0 initially
1.2094 + asm("test edx, edx");
1.2095 + asm("jz short setinf1");
1.2096 + asm("inc eax"); // sign=1 if aNegative!=0
1.2097 + asm("setinf1:");
1.2098 + asm("mov [ecx+8], eax");
1.2099 + asm("mov eax, 0x80000000"); // generate positive or negative infinity
1.2100 + asm("mov [ecx+4], eax");
1.2101 + asm("xor eax, eax");
1.2102 + asm("mov [ecx], eax");
1.2103 + THISCALL_EPILOG1()
1.2104 + }
1.2105 +
1.2106 +
1.2107 +
1.2108 +
1.2109 +__NAKED__ EXPORT_C TBool TRealX::IsZero() const
1.2110 +/**
1.2111 +Determines whether the extended precision value is zero.
1.2112 +
1.2113 +@return True, if the extended precision value is zero, false, otherwise.
1.2114 +*/
1.2115 + {
1.2116 + THISCALL_PROLOG0()
1.2117 + asm("mov eax, [ecx+8]"); // check exponent
1.2118 + asm("shr eax, 16"); // move exponent into ax
1.2119 + asm("jz short iszero1"); // branch if zero
1.2120 + asm("xor eax, eax"); // else return 0
1.2121 + THISCALL_EPILOG0()
1.2122 + asm("iszero1:");
1.2123 + asm("inc eax"); // if zero, return 1
1.2124 + THISCALL_EPILOG0()
1.2125 + }
1.2126 +
1.2127 +
1.2128 +
1.2129 +
1.2130 +__NAKED__ EXPORT_C TBool TRealX::IsNaN() const
1.2131 +/**
1.2132 +Determines whether the extended precision value is 'not a number'.
1.2133 +
1.2134 +@return True, if the extended precision value is 'not a number',
1.2135 +false, otherwise.
1.2136 +*/
1.2137 + {
1.2138 + THISCALL_PROLOG0()
1.2139 + asm("mov eax, [ecx+8]"); // check exponent
1.2140 + asm("cmp eax, 0xFFFF0000");
1.2141 + asm("jc short isnan0"); // branch if not FFFF
1.2142 + asm("mov eax, [ecx+4]");
1.2143 + asm("cmp eax, 0x80000000"); // check for infinity
1.2144 + asm("jne short isnan1");
1.2145 + asm("mov eax, [ecx]");
1.2146 + asm("test eax, eax");
1.2147 + asm("jne short isnan1");
1.2148 + asm("isnan0:");
1.2149 + asm("xor eax, eax"); // return 0 if not NaN
1.2150 + THISCALL_EPILOG0()
1.2151 + asm("isnan1:");
1.2152 + asm("mov eax, 1"); // return 1 if NaN
1.2153 + THISCALL_EPILOG0()
1.2154 + }
1.2155 +
1.2156 +
1.2157 +
1.2158 +
1.2159 +__NAKED__ EXPORT_C TBool TRealX::IsInfinite() const
1.2160 +/**
1.2161 +Determines whether the extended precision value has a finite value.
1.2162 +
1.2163 +@return True, if the extended precision value is finite,
1.2164 +false, if the value is 'not a number' or is infinite,
1.2165 +*/
1.2166 + {
1.2167 + THISCALL_PROLOG0()
1.2168 + asm("mov eax, [ecx+8]"); // check exponent
1.2169 + asm("cmp eax, 0xFFFF0000");
1.2170 + asm("jc short isinf0"); // branch if not FFFF
1.2171 + asm("mov eax, [ecx+4]");
1.2172 + asm("cmp eax, 0x80000000"); // check for infinity
1.2173 + asm("jne short isinf0");
1.2174 + asm("mov eax, [ecx]");
1.2175 + asm("test eax, eax");
1.2176 + asm("jne short isinf0");
1.2177 + asm("inc eax"); // return 1 if infinity
1.2178 + THISCALL_EPILOG0()
1.2179 + asm("isinf0:");
1.2180 + asm("xor eax, eax"); // return 0 if not infinity
1.2181 + THISCALL_EPILOG0()
1.2182 + }
1.2183 +
1.2184 +
1.2185 +
1.2186 +
1.2187 +__NAKED__ EXPORT_C TBool TRealX::IsFinite() const
1.2188 +/**
1.2189 +Determines whether the extended precision value has a finite value.
1.2190 +
1.2191 +@return True, if the extended precision value is finite,
1.2192 +false, if the value is 'not a number' or is infinite,
1.2193 +*/
1.2194 + {
1.2195 + THISCALL_PROLOG0()
1.2196 + asm("mov eax, [ecx+8]"); // check exponent
1.2197 + asm("cmp eax, 0xFFFF0000"); // check for NaN or infinity
1.2198 + asm("jnc short isfinite0"); // branch if NaN or infinity
1.2199 + asm("mov eax, 1"); // return 1 if finite
1.2200 + THISCALL_EPILOG0()
1.2201 + asm("isfinite0:");
1.2202 + asm("xor eax, eax"); // return 0 if NaN or infinity
1.2203 + THISCALL_EPILOG0()
1.2204 + }
1.2205 +
1.2206 +
1.2207 +
1.2208 +
1.2209 +__NAKED__ EXPORT_C const TRealX& TRealX::operator+=(const TRealX& /*aVal*/)
1.2210 +/**
1.2211 +Adds an extended precision value to this extended precision number.
1.2212 +
1.2213 +@param aVal The extended precision value to be added.
1.2214 +
1.2215 +@return A reference to this object.
1.2216 +
1.2217 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2218 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2219 +*/
1.2220 + {
1.2221 + // on entry ecx=this, [esp+4]=address of aVal
1.2222 + THISCALL_PROLOG1()
1.2223 + asm("push ebx"); // save registers
1.2224 + asm("push ebp");
1.2225 + asm("push esi");
1.2226 + asm("push edi");
1.2227 + asm("mov esi, ecx"); // this into esi
1.2228 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2229 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2230 + asm("mov edx, [ecx+4]");
1.2231 + asm("mov ecx, [ecx+8]");
1.2232 + asm("call %a0": :"i"(&TRealXAdd)); // do addition, result in ecx,edx:ebx, error code in eax
1.2233 + asm("mov [esi], ebx"); // store result in *this
1.2234 + asm("mov [esi+4], edx");
1.2235 + asm("mov [esi+8], ecx");
1.2236 + asm("test eax, eax");
1.2237 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2238 + asm("mov eax, esi"); // return this in eax
1.2239 + asm("mov ecx, esi"); // restore registers
1.2240 + asm("pop edi");
1.2241 + asm("pop esi");
1.2242 + asm("pop ebp");
1.2243 + asm("pop ebx");
1.2244 + THISCALL_EPILOG1()
1.2245 + }
1.2246 +
1.2247 +
1.2248 +
1.2249 +
1.2250 +__NAKED__ EXPORT_C const TRealX& TRealX::operator-=(const TRealX& /*aVal*/)
1.2251 +/**
1.2252 +Subtracts an extended precision value from this extended precision number.
1.2253 +
1.2254 +@param aVal The extended precision value to be subtracted.
1.2255 +
1.2256 +@return A reference to this object.
1.2257 +
1.2258 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2259 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2260 +*/
1.2261 + {
1.2262 + // on entry ecx=this, [esp+4]=address of aVal
1.2263 + THISCALL_PROLOG1()
1.2264 + asm("push ebx"); // save registers
1.2265 + asm("push ebp");
1.2266 + asm("push esi");
1.2267 + asm("push edi");
1.2268 + asm("mov esi, ecx"); // this into esi
1.2269 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2270 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2271 + asm("mov edx, [ecx+4]");
1.2272 + asm("mov ecx, [ecx+8]");
1.2273 + asm("call %a0": : "i"(&TRealXSubtract)); // do subtraction, result in ecx,edx:ebx, error code in eax
1.2274 + asm("mov [esi], ebx"); // store result in *this
1.2275 + asm("mov [esi+4], edx");
1.2276 + asm("mov [esi+8], ecx");
1.2277 + asm("test eax, eax");
1.2278 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2279 + asm("mov eax, esi"); // return this in eax
1.2280 + asm("mov ecx, esi"); // restore registers
1.2281 + asm("pop edi");
1.2282 + asm("pop esi");
1.2283 + asm("pop ebp");
1.2284 + asm("pop ebx");
1.2285 + THISCALL_EPILOG1()
1.2286 + }
1.2287 +
1.2288 +
1.2289 +
1.2290 +
1.2291 +__NAKED__ EXPORT_C const TRealX& TRealX::operator*=(const TRealX& /*aVal*/)
1.2292 +/**
1.2293 +Multiplies this extended precision number by an extended precision value.
1.2294 +
1.2295 +@param aVal The extended precision value to be subtracted.
1.2296 +
1.2297 +@return A reference to this object.
1.2298 +
1.2299 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2300 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2301 +*/
1.2302 + {
1.2303 + // on entry ecx=this, [esp+4]=address of aVal
1.2304 + THISCALL_PROLOG1()
1.2305 + asm("push ebx"); // save registers
1.2306 + asm("push ebp");
1.2307 + asm("push esi");
1.2308 + asm("push edi");
1.2309 + asm("mov esi, ecx"); // esi = this
1.2310 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2311 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2312 + asm("mov edx, [ecx+4]");
1.2313 + asm("mov ecx, [ecx+8]");
1.2314 + asm("call %a0": : "i"(&TRealXMultiply)); // do multiplication, result in ecx,edx:ebx, error code in eax
1.2315 + asm("mov [esi], ebx"); // store result in *this
1.2316 + asm("mov [esi+4], edx");
1.2317 + asm("mov [esi+8], ecx");
1.2318 + asm("test eax, eax");
1.2319 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2320 + asm("mov eax, esi"); // return this in eax
1.2321 + asm("mov ecx, esi"); // restore registers
1.2322 + asm("pop edi");
1.2323 + asm("pop esi");
1.2324 + asm("pop ebp");
1.2325 + asm("pop ebx");
1.2326 + THISCALL_EPILOG1()
1.2327 + }
1.2328 +
1.2329 +
1.2330 +
1.2331 +
1.2332 +__NAKED__ EXPORT_C const TRealX& TRealX::operator/=(const TRealX& /*aVal*/)
1.2333 +/**
1.2334 +Divides this extended precision number by an extended precision value.
1.2335 +
1.2336 +@param aVal The extended precision value to be used as the divisor.
1.2337 +
1.2338 +@return A reference to this object.
1.2339 +
1.2340 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2341 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2342 +@panic MATHX KErrDivideByZero if the divisor is zero.
1.2343 +*/
1.2344 + {
1.2345 + // on entry ecx=this, [esp+4]=address of aVal
1.2346 + THISCALL_PROLOG1()
1.2347 + asm("push ebx");
1.2348 + asm("push ebp");
1.2349 + asm("push esi");
1.2350 + asm("push edi");
1.2351 + asm("mov esi, ecx"); // this into esi
1.2352 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2353 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2354 + asm("mov edx, [ecx+4]");
1.2355 + asm("mov ecx, [ecx+8]");
1.2356 + asm("call %a0": : "i"(&TRealXDivide)); // do division, result in ecx,edx:ebx, error code in eax
1.2357 + asm("mov [esi], ebx"); // store result in *this
1.2358 + asm("mov [esi+4], edx");
1.2359 + asm("mov [esi+8], ecx");
1.2360 + asm("test eax, eax");
1.2361 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2362 + asm("mov eax, esi"); // return this in eax
1.2363 + asm("mov ecx, esi"); // restore registers
1.2364 + asm("pop edi");
1.2365 + asm("pop esi");
1.2366 + asm("pop ebp");
1.2367 + asm("pop ebx");
1.2368 + THISCALL_EPILOG1()
1.2369 + }
1.2370 +
1.2371 +
1.2372 +
1.2373 +
1.2374 +__NAKED__ EXPORT_C const TRealX& TRealX::operator%=(const TRealX& /*aVal*/)
1.2375 +/**
1.2376 +Modulo-divides this extended precision number by an extended precision value.
1.2377 +
1.2378 +@param aVal The extended precision value to be used as the divisor.
1.2379 +
1.2380 +@return A reference to this object.
1.2381 +
1.2382 +@panic MATHX KErrTotalLossOfPrecision panic if precision is lost.
1.2383 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2384 +*/
1.2385 + {
1.2386 + // on entry ecx=this, [esp+4]=address of aVal
1.2387 + THISCALL_PROLOG1()
1.2388 + asm("push ebx");
1.2389 + asm("push ebp");
1.2390 + asm("push esi");
1.2391 + asm("push edi");
1.2392 + asm("mov esi, ecx"); // this into esi
1.2393 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2394 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2395 + asm("mov edx, [ecx+4]");
1.2396 + asm("mov ecx, [ecx+8]");
1.2397 + asm("call %a0": : "i"(&TRealXModulo)); // do modulo, result in ecx,edx:ebx, error code in eax
1.2398 + asm("mov [esi], ebx"); // store result in *this
1.2399 + asm("mov [esi+4], edx");
1.2400 + asm("mov [esi+8], ecx");
1.2401 + asm("test eax, eax");
1.2402 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2403 + asm("mov eax, esi"); // return this in eax
1.2404 + asm("mov ecx, esi"); // restore registers
1.2405 + asm("pop edi");
1.2406 + asm("pop esi");
1.2407 + asm("pop ebp");
1.2408 + asm("pop ebx");
1.2409 + THISCALL_EPILOG1()
1.2410 + }
1.2411 +
1.2412 +
1.2413 +
1.2414 +
1.2415 +__NAKED__ EXPORT_C TInt TRealX::AddEq(const TRealX& /*aVal*/)
1.2416 +/**
1.2417 +Adds an extended precision value to this extended precision number.
1.2418 +
1.2419 +@param aVal The extended precision value to be added.
1.2420 +
1.2421 +@return KErrNone, if the operation is successful;
1.2422 +KErrOverflow,if the operation results in overflow;
1.2423 +KErrUnderflow, if the operation results in underflow.
1.2424 +*/
1.2425 + {
1.2426 + // on entry ecx=this, [esp+4]=address of aVal
1.2427 + THISCALL_PROLOG1()
1.2428 + asm("push ebx"); // save registers
1.2429 + asm("push ebp");
1.2430 + asm("push esi");
1.2431 + asm("push edi");
1.2432 + asm("mov esi, ecx"); // this into esi
1.2433 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2434 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2435 + asm("mov edx, [ecx+4]");
1.2436 + asm("mov ecx, [ecx+8]");
1.2437 + asm("call %a0": :"i"(&TRealXAdd)); // do addition, result in ecx,edx:ebx, error code in eax
1.2438 + asm("mov [esi], ebx"); // store result
1.2439 + asm("mov [esi+4], edx");
1.2440 + asm("mov [esi+8], ecx");
1.2441 + asm("mov ecx, esi"); // restore registers
1.2442 + asm("pop edi");
1.2443 + asm("pop esi");
1.2444 + asm("pop ebp");
1.2445 + asm("pop ebx");
1.2446 + THISCALL_EPILOG1() // return with error code in eax
1.2447 + }
1.2448 +
1.2449 +
1.2450 +
1.2451 +
1.2452 +__NAKED__ EXPORT_C TInt TRealX::SubEq(const TRealX& /*aVal*/)
1.2453 +/**
1.2454 +Subtracts an extended precision value from this extended precision number.
1.2455 +
1.2456 +@param aVal The extended precision value to be subtracted.
1.2457 +
1.2458 +@return KErrNone, if the operation is successful;
1.2459 +KErrOverflow, if the operation results in overflow;
1.2460 +KErrUnderflow, if the operation results in underflow.
1.2461 +*/
1.2462 + {
1.2463 + // on entry ecx=this, [esp+4]=address of aVal
1.2464 + THISCALL_PROLOG1()
1.2465 + asm("push ebx"); // save registers
1.2466 + asm("push ebp");
1.2467 + asm("push esi");
1.2468 + asm("push edi");
1.2469 + asm("mov esi, ecx"); // this into esi
1.2470 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2471 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2472 + asm("mov edx, [ecx+4]");
1.2473 + asm("mov ecx, [ecx+8]");
1.2474 + asm("call %a0": : "i"(&TRealXSubtract)); // do subtraction, result in ecx,edx:ebx, error code in eax
1.2475 + asm("mov [esi], ebx"); // store result
1.2476 + asm("mov [esi+4], edx");
1.2477 + asm("mov [esi+8], ecx");
1.2478 + asm("mov ecx, esi"); // restore registers
1.2479 + asm("pop edi");
1.2480 + asm("pop esi");
1.2481 + asm("pop ebp");
1.2482 + asm("pop ebx");
1.2483 + THISCALL_EPILOG1() // return with error code in eax
1.2484 + }
1.2485 +
1.2486 +
1.2487 +
1.2488 +
1.2489 +__NAKED__ EXPORT_C TInt TRealX::MultEq(const TRealX& /*aVal*/)
1.2490 +/**
1.2491 +Multiplies this extended precision number by an extended precision value.
1.2492 +
1.2493 +@param aVal The extended precision value to be used as the multiplier.
1.2494 +
1.2495 +@return KErrNone, if the operation is successful;
1.2496 +KErrOverflow, if the operation results in overflow;
1.2497 +KErrUnderflow, if the operation results in underflow
1.2498 +*/
1.2499 + {
1.2500 + // on entry ecx=this, [esp+4]=address of aVal
1.2501 + THISCALL_PROLOG1()
1.2502 + asm("push ebx"); // save registers
1.2503 + asm("push ebp");
1.2504 + asm("push esi");
1.2505 + asm("push edi");
1.2506 + asm("mov esi, ecx"); // this into esi
1.2507 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2508 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2509 + asm("mov edx, [ecx+4]");
1.2510 + asm("mov ecx, [ecx+8]");
1.2511 + asm("call %a0": : "i"(&TRealXMultiply)); // do multiplication, result in ecx,edx:ebx, error code in eax
1.2512 + asm("mov [esi], ebx"); // store result
1.2513 + asm("mov [esi+4], edx");
1.2514 + asm("mov [esi+8], ecx");
1.2515 + asm("mov ecx, esi"); // restore registers
1.2516 + asm("pop edi");
1.2517 + asm("pop esi");
1.2518 + asm("pop ebp");
1.2519 + asm("pop ebx");
1.2520 + THISCALL_EPILOG1() // return with error code in eax
1.2521 + }
1.2522 +
1.2523 +
1.2524 +
1.2525 +
1.2526 +__NAKED__ EXPORT_C TInt TRealX::DivEq(const TRealX& /*aVal*/)
1.2527 +/**
1.2528 +Divides this extended precision number by an extended precision value.
1.2529 +
1.2530 +@param aVal The extended precision value to be used as the divisor.
1.2531 +
1.2532 +@return KErrNone, if the operation is successful;
1.2533 +KErrOverflow, if the operation results in overflow;
1.2534 +KErrUnderflow, if the operation results in underflow;
1.2535 +KErrDivideByZero, if the divisor is zero.
1.2536 +*/
1.2537 + {
1.2538 + // on entry ecx=this, [esp+4]=address of aVal
1.2539 + THISCALL_PROLOG1()
1.2540 + asm("push ebx"); // save registers
1.2541 + asm("push ebp");
1.2542 + asm("push esi");
1.2543 + asm("push edi");
1.2544 + asm("mov esi, ecx"); // this into esi
1.2545 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2546 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2547 + asm("mov edx, [ecx+4]");
1.2548 + asm("mov ecx, [ecx+8]");
1.2549 + asm("call %a0": : "i"(&TRealXDivide)); // do division, result in ecx,edx:ebx, error code in eax
1.2550 + asm("mov [esi], ebx"); // store result
1.2551 + asm("mov [esi+4], edx");
1.2552 + asm("mov [esi+8], ecx");
1.2553 + asm("mov ecx, esi"); // restore registers
1.2554 + asm("pop edi");
1.2555 + asm("pop esi");
1.2556 + asm("pop ebp");
1.2557 + asm("pop ebx");
1.2558 + THISCALL_EPILOG1() // return with error code in eax
1.2559 + }
1.2560 +
1.2561 +
1.2562 +
1.2563 +
1.2564 +__NAKED__ EXPORT_C TInt TRealX::ModEq(const TRealX& /*aVal*/)
1.2565 +/**
1.2566 +Modulo-divides this extended precision number by an extended precision value.
1.2567 +
1.2568 +@param aVal The extended precision value to be used as the divisor.
1.2569 +
1.2570 +@return KErrNone, if the operation is successful;
1.2571 +KErrTotalLossOfPrecision, if precision is lost;
1.2572 +KErrUnderflow, if the operation results in underflow.
1.2573 +*/
1.2574 + {
1.2575 + // on entry ecx=this, [esp+4]=address of aVal
1.2576 + THISCALL_PROLOG1()
1.2577 + asm("push ebx"); // save registers
1.2578 + asm("push ebp");
1.2579 + asm("push esi");
1.2580 + asm("push edi");
1.2581 + asm("mov esi, ecx"); // this into esi
1.2582 + asm("mov ecx, [esp+20]"); // address of aVal into ecx
1.2583 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2584 + asm("mov edx, [ecx+4]");
1.2585 + asm("mov ecx, [ecx+8]");
1.2586 + asm("call %a0": : "i"(&TRealXModulo)); // do modulo, result in ecx,edx:ebx, error code in eax
1.2587 + asm("mov [esi], ebx"); // store result
1.2588 + asm("mov [esi+4], edx");
1.2589 + asm("mov [esi+8], ecx");
1.2590 + asm("mov ecx, esi"); // restore registers
1.2591 + asm("pop edi");
1.2592 + asm("pop esi");
1.2593 + asm("pop ebp");
1.2594 + asm("pop ebx");
1.2595 + THISCALL_EPILOG1() // return with error code in eax
1.2596 + }
1.2597 +
1.2598 +
1.2599 +
1.2600 +
1.2601 +__NAKED__ EXPORT_C TRealX TRealX::operator+() const
1.2602 +/**
1.2603 +Returns this extended precision number unchanged.
1.2604 +
1.2605 +Note that this may also be referred to as a unary plus operator.
1.2606 +
1.2607 +@return The extended precision number.
1.2608 +*/
1.2609 + {
1.2610 + THISCALL_PROLOG0_BIGRETVAL()
1.2611 + asm("mov eax, [esp+4]"); // eax=address to write return value
1.2612 + asm("mov edx, [ecx]");
1.2613 + asm("mov [eax], edx");
1.2614 + asm("mov edx, [ecx+4]");
1.2615 + asm("mov [eax+4], edx");
1.2616 + asm("mov edx, [ecx+8]");
1.2617 + asm("mov [eax+8], edx"); // return address of return value in eax
1.2618 + THISCALL_EPILOG0_BIGRETVAL()
1.2619 + }
1.2620 +
1.2621 +
1.2622 +
1.2623 +
1.2624 +__NAKED__ EXPORT_C TRealX TRealX::operator-() const
1.2625 +/**
1.2626 +Negates this extended precision number.
1.2627 +
1.2628 +This may also be referred to as a unary minus operator.
1.2629 +
1.2630 +@return The negative of the extended precision number.
1.2631 +*/
1.2632 + {
1.2633 + THISCALL_PROLOG0_BIGRETVAL()
1.2634 + asm("mov eax, [esp+4]"); // eax=address to write return value
1.2635 + asm("mov edx, [ecx]");
1.2636 + asm("mov [eax], edx");
1.2637 + asm("mov edx, [ecx+4]");
1.2638 + asm("mov [eax+4], edx");
1.2639 + asm("mov edx, [ecx+8]");
1.2640 + asm("xor dl, 1"); // change sign bit
1.2641 + asm("mov [eax+8], edx");
1.2642 + THISCALL_EPILOG0_BIGRETVAL() // return address of return value in eax
1.2643 + }
1.2644 +
1.2645 +
1.2646 +
1.2647 +
1.2648 +__NAKED__ EXPORT_C TRealX& TRealX::operator++()
1.2649 +/**
1.2650 +Increments this extended precision number by one,
1.2651 +and then returns a reference to it.
1.2652 +
1.2653 +This is also referred to as a prefix operator.
1.2654 +
1.2655 +@return A reference to this object.
1.2656 +
1.2657 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2658 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2659 +*/
1.2660 + {
1.2661 + // pre-increment
1.2662 + // on entry ecx=this, return this in eax
1.2663 + THISCALL_PROLOG0()
1.2664 + asm("push ebx"); // save registers
1.2665 + asm("push ebp");
1.2666 + asm("push esi");
1.2667 + asm("push edi");
1.2668 + asm("mov esi, ecx"); // this into esi
1.2669 + asm("mov ecx, 0x7FFF0000"); // set ecx,edx:ebx to 1.0
1.2670 + asm("mov edx, 0x80000000");
1.2671 + asm("xor ebx, ebx");
1.2672 + asm("call %a0": :"i"(&TRealXAdd)); // add 1 to *this
1.2673 + asm("mov [esi], ebx"); // store result
1.2674 + asm("mov [esi+4], edx");
1.2675 + asm("mov [esi+8], ecx");
1.2676 + asm("test eax, eax"); // check error code
1.2677 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2678 + asm("mov eax, esi"); // else return this in eax
1.2679 + asm("mov ecx, esi");
1.2680 + asm("pop edi");
1.2681 + asm("pop esi");
1.2682 + asm("pop ebp");
1.2683 + asm("pop ebx");
1.2684 + THISCALL_EPILOG0()
1.2685 + }
1.2686 +
1.2687 +
1.2688 +
1.2689 +
1.2690 +__NAKED__ EXPORT_C TRealX TRealX::operator++(TInt)
1.2691 +/**
1.2692 +Returns this extended precision number before incrementing it by one.
1.2693 +
1.2694 +This is also referred to as a postfix operator.
1.2695 +
1.2696 +@return A reference to this object.
1.2697 +
1.2698 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2699 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2700 +*/
1.2701 + {
1.2702 + // post-increment
1.2703 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=dummy int
1.2704 + THISCALL_PROLOG1_BIGRETVAL()
1.2705 + asm("push ebx"); // save registers
1.2706 + asm("push ebp");
1.2707 + asm("push esi");
1.2708 + asm("push edi");
1.2709 + asm("mov esi, ecx"); // this into esi
1.2710 + asm("mov edi, [esp+20]"); // address of return value into edi
1.2711 + asm("mov eax, [ecx]"); // copy initial value of *this into [edi]
1.2712 + asm("mov [edi], eax");
1.2713 + asm("mov eax, [ecx+4]");
1.2714 + asm("mov [edi+4], eax");
1.2715 + asm("mov eax, [ecx+8]");
1.2716 + asm("mov [edi+8], eax");
1.2717 + asm("mov ecx, 0x7FFF0000"); // set ecx,edx:ebx to 1.0
1.2718 + asm("mov edx, 0x80000000");
1.2719 + asm("xor ebx, ebx");
1.2720 + asm("call %a0": :"i"(&TRealXAdd)); // add 1 to *this
1.2721 + asm("mov [esi], ebx"); // store result in *this
1.2722 + asm("mov [esi+4], edx");
1.2723 + asm("mov [esi+8], ecx");
1.2724 + asm("test eax, eax"); // check error code
1.2725 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2726 + asm("mov eax, [esp+20]"); // address of return value into eax
1.2727 + asm("mov ecx, esi");
1.2728 + asm("pop edi");
1.2729 + asm("pop esi");
1.2730 + asm("pop ebp");
1.2731 + asm("pop ebx");
1.2732 + THISCALL_EPILOG1_BIGRETVAL()
1.2733 + }
1.2734 +
1.2735 +
1.2736 +
1.2737 +
1.2738 +__NAKED__ EXPORT_C TRealX& TRealX::operator--()
1.2739 +/**
1.2740 +Decrements this extended precision number by one,
1.2741 +and then returns a reference to it.
1.2742 +
1.2743 +This is also referred to as a prefix operator.
1.2744 +
1.2745 +@return A reference to this object.
1.2746 +
1.2747 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2748 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2749 +*/
1.2750 + {
1.2751 + // pre-decrement
1.2752 + // on entry ecx=this, return this in eax
1.2753 + THISCALL_PROLOG0()
1.2754 + asm("push ebx"); // save registers
1.2755 + asm("push ebp");
1.2756 + asm("push esi");
1.2757 + asm("push edi");
1.2758 + asm("mov esi, ecx"); // this into esi
1.2759 + asm("mov ecx, 0x7FFF0001"); // set ecx,edx:ebx to -1.0
1.2760 + asm("mov edx, 0x80000000");
1.2761 + asm("xor ebx, ebx");
1.2762 + asm("call %a0": :"i"(&TRealXAdd)); // add -1 to *this
1.2763 + asm("mov [esi], ebx"); // store result
1.2764 + asm("mov [esi+4], edx");
1.2765 + asm("mov [esi+8], ecx");
1.2766 + asm("test eax, eax"); // check error code
1.2767 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2768 + asm("mov eax, esi"); // else return this in eax
1.2769 + asm("mov ecx, esi");
1.2770 + asm("pop edi");
1.2771 + asm("pop esi");
1.2772 + asm("pop ebp");
1.2773 + asm("pop ebx");
1.2774 + THISCALL_EPILOG0()
1.2775 + }
1.2776 +
1.2777 +
1.2778 +
1.2779 +
1.2780 +__NAKED__ EXPORT_C TRealX TRealX::operator--(TInt)
1.2781 +/**
1.2782 +Returns this extended precision number before decrementing it by one.
1.2783 +
1.2784 +This is also referred to as a postfix operator.
1.2785 +
1.2786 +@return A reference to this object.
1.2787 +
1.2788 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2789 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2790 +*/
1.2791 + {
1.2792 + // post-decrement
1.2793 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=dummy int
1.2794 + THISCALL_PROLOG1_BIGRETVAL()
1.2795 + asm("push ebx"); // save registers
1.2796 + asm("push ebp");
1.2797 + asm("push esi");
1.2798 + asm("push edi");
1.2799 + asm("mov esi, ecx"); // this into esi
1.2800 + asm("mov edi, [esp+20]"); // address of return value into edi
1.2801 + asm("mov eax, [ecx]"); // copy initial value of *this into [edi]
1.2802 + asm("mov [edi], eax");
1.2803 + asm("mov eax, [ecx+4]");
1.2804 + asm("mov [edi+4], eax");
1.2805 + asm("mov eax, [ecx+8]");
1.2806 + asm("mov [edi+8], eax");
1.2807 + asm("mov ecx, 0x7FFF0001"); // set ecx,edx:ebx to -1.0
1.2808 + asm("mov edx, 0x80000000");
1.2809 + asm("xor ebx, ebx");
1.2810 + asm("call %a0": :"i"(&TRealXAdd)); // add -1 to *this
1.2811 + asm("mov [esi], ebx"); // store result in *this
1.2812 + asm("mov [esi+4], edx");
1.2813 + asm("mov [esi+8], ecx");
1.2814 + asm("test eax, eax"); // check error code
1.2815 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2816 + asm("mov eax, [esp+20]"); // address of return value into eax
1.2817 + asm("mov ecx, esi");
1.2818 + asm("pop edi");
1.2819 + asm("pop esi");
1.2820 + asm("pop ebp");
1.2821 + asm("pop ebx");
1.2822 + THISCALL_EPILOG1_BIGRETVAL()
1.2823 + }
1.2824 +
1.2825 +
1.2826 +
1.2827 +
1.2828 +__NAKED__ EXPORT_C TRealX TRealX::operator+(const TRealX& /*aVal*/) const
1.2829 +/**
1.2830 +Adds an extended precision value to this extended precision number.
1.2831 +
1.2832 +@param aVal The extended precision value to be added.
1.2833 +
1.2834 +@return An extended precision object containing the result.
1.2835 +
1.2836 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2837 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2838 +*/
1.2839 + {
1.2840 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal
1.2841 + THISCALL_PROLOG1_BIGRETVAL()
1.2842 + asm("push ecx"); // save registers
1.2843 + asm("push ebx");
1.2844 + asm("push ebp");
1.2845 + asm("push esi");
1.2846 + asm("push edi");
1.2847 + asm("mov esi, ecx"); // this into esi
1.2848 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.2849 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2850 + asm("mov edx, [ecx+4]");
1.2851 + asm("mov ecx, [ecx+8]");
1.2852 + asm("call %a0": :"i"(&TRealXAdd)); // do addition, result in ecx,edx:ebx, error code in eax
1.2853 + asm("mov esi, [esp+24]"); // esi=address of return value
1.2854 + asm("mov [esi], ebx"); // store result
1.2855 + asm("mov [esi+4], edx");
1.2856 + asm("mov [esi+8], ecx");
1.2857 + asm("test eax, eax");
1.2858 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2859 + asm("mov eax, esi"); // return address of return value in eax
1.2860 + asm("pop edi"); // restore registers
1.2861 + asm("pop esi");
1.2862 + asm("pop ebp");
1.2863 + asm("pop ebx");
1.2864 + asm("pop ecx");
1.2865 + THISCALL_EPILOG1_BIGRETVAL()
1.2866 + }
1.2867 +
1.2868 +
1.2869 +
1.2870 +
1.2871 +__NAKED__ EXPORT_C TRealX TRealX::operator-(const TRealX& /*aVal*/) const
1.2872 +/**
1.2873 +Subtracts an extended precision value from this extended precision number.
1.2874 +
1.2875 +@param aVal The extended precision value to be subtracted.
1.2876 +
1.2877 +@return An extended precision object containing the result.
1.2878 +
1.2879 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2880 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2881 +*/
1.2882 + {
1.2883 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal
1.2884 + THISCALL_PROLOG1_BIGRETVAL()
1.2885 + asm("push ecx"); // save registers
1.2886 + asm("push ebx");
1.2887 + asm("push ebp");
1.2888 + asm("push esi");
1.2889 + asm("push edi");
1.2890 + asm("mov esi, ecx"); // this into esi
1.2891 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.2892 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2893 + asm("mov edx, [ecx+4]");
1.2894 + asm("mov ecx, [ecx+8]");
1.2895 + asm("call %a0": : "i"(&TRealXSubtract)); // do subtraction, result in ecx,edx:ebx, error code in eax
1.2896 + asm("mov esi, [esp+24]"); // esi=address of return value
1.2897 + asm("mov [esi], ebx"); // store result
1.2898 + asm("mov [esi+4], edx");
1.2899 + asm("mov [esi+8], ecx");
1.2900 + asm("test eax, eax");
1.2901 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2902 + asm("mov eax, esi"); // return address of return value in eax
1.2903 + asm("pop edi"); // restore registers
1.2904 + asm("pop esi");
1.2905 + asm("pop ebp");
1.2906 + asm("pop ebx");
1.2907 + asm("pop ecx");
1.2908 + THISCALL_EPILOG1_BIGRETVAL()
1.2909 + }
1.2910 +
1.2911 +
1.2912 +
1.2913 +
1.2914 +__NAKED__ EXPORT_C TRealX TRealX::operator*(const TRealX& /*aVal*/) const
1.2915 +/**
1.2916 +Multiplies this extended precision number by an extended precision value.
1.2917 +
1.2918 +@param aVal The extended precision value to be used as the multiplier.
1.2919 +
1.2920 +@return An extended precision object containing the result.
1.2921 +
1.2922 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2923 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2924 +*/
1.2925 + {
1.2926 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal
1.2927 + THISCALL_PROLOG1_BIGRETVAL()
1.2928 + asm("push ecx"); // save registers
1.2929 + asm("push ebx");
1.2930 + asm("push ebp");
1.2931 + asm("push esi");
1.2932 + asm("push edi");
1.2933 + asm("mov esi, ecx"); // this into esi
1.2934 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.2935 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2936 + asm("mov edx, [ecx+4]");
1.2937 + asm("mov ecx, [ecx+8]");
1.2938 + asm("call %a0": : "i"(&TRealXMultiply)); // do multiplication, result in ecx,edx:ebx, error code in eax
1.2939 + asm("mov esi, [esp+24]"); // esi=address of return value
1.2940 + asm("mov [esi], ebx"); // store result
1.2941 + asm("mov [esi+4], edx");
1.2942 + asm("mov [esi+8], ecx");
1.2943 + asm("test eax, eax");
1.2944 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2945 + asm("mov eax, esi"); // return address of return value in eax
1.2946 + asm("pop edi"); // restore registers
1.2947 + asm("pop esi");
1.2948 + asm("pop ebp");
1.2949 + asm("pop ebx");
1.2950 + asm("pop ecx");
1.2951 + THISCALL_EPILOG1_BIGRETVAL()
1.2952 + }
1.2953 +
1.2954 +
1.2955 +
1.2956 +
1.2957 +__NAKED__ EXPORT_C TRealX TRealX::operator/(const TRealX& /*aVal*/) const
1.2958 +/**
1.2959 +Divides this extended precision number by an extended precision value.
1.2960 +
1.2961 +@param aVal The extended precision value to be used as the divisor.
1.2962 +
1.2963 +@return An extended precision object containing the result.
1.2964 +
1.2965 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2966 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2967 +@panic MATHX KErrDivideByZero if the divisor is zero.
1.2968 +*/
1.2969 + {
1.2970 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal
1.2971 + THISCALL_PROLOG1_BIGRETVAL()
1.2972 + asm("push ecx"); // save registers
1.2973 + asm("push ebx");
1.2974 + asm("push ebp");
1.2975 + asm("push esi");
1.2976 + asm("push edi");
1.2977 + asm("mov esi, ecx"); // this into esi
1.2978 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.2979 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.2980 + asm("mov edx, [ecx+4]");
1.2981 + asm("mov ecx, [ecx+8]");
1.2982 + asm("call %a0": : "i"(&TRealXDivide)); // do division, result in ecx,edx:ebx, error code in eax
1.2983 + asm("mov esi, [esp+24]"); // esi=address of return value
1.2984 + asm("mov [esi], ebx"); // store result
1.2985 + asm("mov [esi+4], edx");
1.2986 + asm("mov [esi+8], ecx");
1.2987 + asm("test eax, eax");
1.2988 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.2989 + asm("mov eax, esi"); // return address of return value in eax
1.2990 + asm("pop edi"); // restore registers
1.2991 + asm("pop esi");
1.2992 + asm("pop ebp");
1.2993 + asm("pop ebx");
1.2994 + asm("pop ecx");
1.2995 + THISCALL_EPILOG1_BIGRETVAL()
1.2996 + }
1.2997 +
1.2998 +
1.2999 +
1.3000 +
1.3001 +__NAKED__ EXPORT_C TRealX TRealX::operator%(const TRealX& /*aVal*/) const
1.3002 +/**
1.3003 +Modulo-divides this extended precision number by an extended precision value.
1.3004 +
1.3005 +@param aVal The extended precision value to be used as the divisor.
1.3006 +
1.3007 +@return An extended precision object containing the result.
1.3008 +
1.3009 +@panic MATHX KErrTotalLossOfPrecision if precision is lost.
1.3010 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.3011 +*/
1.3012 + {
1.3013 + // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal
1.3014 + THISCALL_PROLOG1_BIGRETVAL()
1.3015 + asm("push ecx"); // save registers
1.3016 + asm("push ebx");
1.3017 + asm("push ebp");
1.3018 + asm("push esi");
1.3019 + asm("push edi");
1.3020 + asm("mov esi, ecx"); // this into esi
1.3021 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3022 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3023 + asm("mov edx, [ecx+4]");
1.3024 + asm("mov ecx, [ecx+8]");
1.3025 + asm("call %a0": : "i"(&TRealXModulo)); // do modulo, result in ecx,edx:ebx, error code in eax
1.3026 + asm("mov esi, [esp+24]"); // esi=address of return value
1.3027 + asm("mov [esi], ebx"); // store result
1.3028 + asm("mov [esi+4], edx");
1.3029 + asm("mov [esi+8], ecx");
1.3030 + asm("test eax, eax");
1.3031 + _ASM_jn(z,TRealXPanicEax) // panic if error
1.3032 + asm("mov eax, esi"); // return address of return value in eax
1.3033 + asm("pop edi"); // restore registers
1.3034 + asm("pop esi");
1.3035 + asm("pop ebp");
1.3036 + asm("pop ebx");
1.3037 + asm("pop ecx");
1.3038 + THISCALL_EPILOG1_BIGRETVAL()
1.3039 + }
1.3040 +
1.3041 +
1.3042 +
1.3043 +
1.3044 +__NAKED__ EXPORT_C TInt TRealX::Add(TRealX& /*aResult*/, const TRealX& /*aVal*/) const
1.3045 +/**
1.3046 +Adds an extended precision value to this extended precision number.
1.3047 +
1.3048 +@param aResult On return, a reference to an extended precision object
1.3049 +containing the result of the operation.
1.3050 +@param aVal The extended precision value to be added.
1.3051 +
1.3052 +@return KErrNone, if the operation is successful;
1.3053 +KErrOverflow, if the operation results in overflow;
1.3054 +KErrUnderflow, if the operation results in underflow.
1.3055 +*/
1.3056 + {
1.3057 + // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal
1.3058 + THISCALL_PROLOG2()
1.3059 + asm("push ecx"); // save registers
1.3060 + asm("push ebx");
1.3061 + asm("push ebp");
1.3062 + asm("push esi");
1.3063 + asm("push edi");
1.3064 + asm("mov esi, ecx"); // this into esi
1.3065 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3066 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3067 + asm("mov edx, [ecx+4]");
1.3068 + asm("mov ecx, [ecx+8]");
1.3069 + asm("call %a0": :"i"(&TRealXAdd)); // do addition, result in ecx,edx:ebx, error code in eax
1.3070 + asm("mov esi, [esp+24]"); // esi=address of aResult
1.3071 + asm("mov [esi], ebx"); // store result
1.3072 + asm("mov [esi+4], edx");
1.3073 + asm("mov [esi+8], ecx");
1.3074 + asm("pop edi"); // restore registers
1.3075 + asm("pop esi");
1.3076 + asm("pop ebp");
1.3077 + asm("pop ebx");
1.3078 + asm("pop ecx");
1.3079 + THISCALL_EPILOG2() // return with error code in eax
1.3080 + }
1.3081 +
1.3082 +
1.3083 +
1.3084 +
1.3085 +__NAKED__ EXPORT_C TInt TRealX::Sub(TRealX& /*aResult*/, const TRealX& /*aVal*/) const
1.3086 +/**
1.3087 +Subtracts an extended precision value from this extended precision number.
1.3088 +
1.3089 +@param aResult On return, a reference to an extended precision object
1.3090 +containing the result of the operation.
1.3091 +@param aVal The extended precision value to be subtracted.
1.3092 +
1.3093 +@return KErrNone, if the operation is successful;
1.3094 +KErrOverflow, if the operation results in overflow;
1.3095 +KErrUnderflow, if the operation results in underflow.
1.3096 +*/
1.3097 + {
1.3098 + // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal
1.3099 + THISCALL_PROLOG2()
1.3100 + asm("push ecx"); // save registers
1.3101 + asm("push ebx");
1.3102 + asm("push ebp");
1.3103 + asm("push esi");
1.3104 + asm("push edi");
1.3105 + asm("mov esi, ecx"); // this into esi
1.3106 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3107 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3108 + asm("mov edx, [ecx+4]");
1.3109 + asm("mov ecx, [ecx+8]");
1.3110 + asm("call %a0": : "i"(&TRealXSubtract)); // do subtraction, result in ecx,edx:ebx, error code in eax
1.3111 + asm("mov esi, [esp+24]"); // esi=address of aResult
1.3112 + asm("mov [esi], ebx"); // store result
1.3113 + asm("mov [esi+4], edx");
1.3114 + asm("mov [esi+8], ecx");
1.3115 + asm("pop edi"); // restore registers
1.3116 + asm("pop esi");
1.3117 + asm("pop ebp");
1.3118 + asm("pop ebx");
1.3119 + asm("pop ecx");
1.3120 + THISCALL_EPILOG2() // return with error code in eax
1.3121 + }
1.3122 +
1.3123 +
1.3124 +
1.3125 +
1.3126 +__NAKED__ EXPORT_C TInt TRealX::Mult(TRealX& /*aResult*/, const TRealX& /*aVal*/) const
1.3127 +/**
1.3128 +Multiplies this extended precision number by an extended precision value.
1.3129 +
1.3130 +@param aResult On return, a reference to an extended precision object
1.3131 +containing the result of the operation.
1.3132 +@param aVal The extended precision value to be used as the multiplier.
1.3133 +
1.3134 +@return KErrNone, if the operation is successful;
1.3135 +KErrOverflow, if the operation results in overflow;
1.3136 +KErrUnderflow, if the operation results in underflow.
1.3137 +*/
1.3138 + {
1.3139 + // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal
1.3140 + THISCALL_PROLOG2()
1.3141 + asm("push ecx"); // save registers
1.3142 + asm("push ebx");
1.3143 + asm("push ebp");
1.3144 + asm("push esi");
1.3145 + asm("push edi");
1.3146 + asm("mov esi, ecx"); // this into esi
1.3147 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3148 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3149 + asm("mov edx, [ecx+4]");
1.3150 + asm("mov ecx, [ecx+8]");
1.3151 + asm("call %a0": : "i"(&TRealXMultiply)); // do multiplication, result in ecx,edx:ebx, error code in eax
1.3152 + asm("mov esi, [esp+24]"); // esi=address of aResult
1.3153 + asm("mov [esi], ebx"); // store result
1.3154 + asm("mov [esi+4], edx");
1.3155 + asm("mov [esi+8], ecx");
1.3156 + asm("pop edi"); // restore registers
1.3157 + asm("pop esi");
1.3158 + asm("pop ebp");
1.3159 + asm("pop ebx");
1.3160 + asm("pop ecx");
1.3161 + THISCALL_EPILOG2() // return with error code in eax
1.3162 + }
1.3163 +
1.3164 +
1.3165 +
1.3166 +__NAKED__ EXPORT_C TInt TRealX::Div(TRealX& /*aResult*/, const TRealX& /*aVal*/) const
1.3167 +/**
1.3168 +Divides this extended precision number by an extended precision value.
1.3169 +
1.3170 +@param aResult On return, a reference to an extended precision object
1.3171 +containing the result of the operation.
1.3172 +@param aVal The extended precision value to be used as the divisor.
1.3173 +
1.3174 +@return KErrNone, if the operation is successful;
1.3175 +KErrOverflow, if the operation results in overflow;
1.3176 +KErrUnderflow, if the operation results in underflow;
1.3177 +KErrDivideByZero, if the divisor is zero.
1.3178 +*/
1.3179 + {
1.3180 + // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal
1.3181 + THISCALL_PROLOG2()
1.3182 + asm("push ecx"); // save registers
1.3183 + asm("push ebx");
1.3184 + asm("push ebp");
1.3185 + asm("push esi");
1.3186 + asm("push edi");
1.3187 + asm("mov esi, ecx"); // this into esi
1.3188 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3189 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3190 + asm("mov edx, [ecx+4]");
1.3191 + asm("mov ecx, [ecx+8]");
1.3192 + asm("call %a0": : "i"(&TRealXDivide)); // do division, result in ecx,edx:ebx, error code in eax
1.3193 + asm("mov esi, [esp+24]"); // esi=address of aResult
1.3194 + asm("mov [esi], ebx"); // store result
1.3195 + asm("mov [esi+4], edx");
1.3196 + asm("mov [esi+8], ecx");
1.3197 + asm("pop edi"); // restore registers
1.3198 + asm("pop esi");
1.3199 + asm("pop ebp");
1.3200 + asm("pop ebx");
1.3201 + asm("pop ecx");
1.3202 + THISCALL_EPILOG2() // return with error code in eax
1.3203 + }
1.3204 +
1.3205 +
1.3206 +
1.3207 +
1.3208 +__NAKED__ EXPORT_C TInt TRealX::Mod(TRealX& /*aResult*/, const TRealX& /*aVal*/) const
1.3209 +/**
1.3210 +Modulo-divides this extended precision number by an extended precision value.
1.3211 +
1.3212 +@param aResult On return, a reference to an extended precision object
1.3213 +containing the result of the operation.
1.3214 +
1.3215 +@param aVal The extended precision value to be used as the divisor.
1.3216 +
1.3217 +@return KErrNone, if the operation is successful;
1.3218 +KErrTotalLossOfPrecision, if precision is lost;
1.3219 +KErrUnderflow, if the operation results in underflow.
1.3220 +*/
1.3221 + {
1.3222 + // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal
1.3223 + THISCALL_PROLOG2()
1.3224 + asm("push ecx"); // save registers
1.3225 + asm("push ebx");
1.3226 + asm("push ebp");
1.3227 + asm("push esi");
1.3228 + asm("push edi");
1.3229 + asm("mov esi, ecx"); // this into esi
1.3230 + asm("mov ecx, [esp+28]"); // address of aVal into ecx
1.3231 + asm("mov ebx, [ecx]"); // aVal into ecx,edx:ebx
1.3232 + asm("mov edx, [ecx+4]");
1.3233 + asm("mov ecx, [ecx+8]");
1.3234 + asm("call %a0": : "i"(&TRealXModulo)); // do modulo, result in ecx,edx:ebx, error code in eax
1.3235 + asm("mov esi, [esp+24]"); // esi=address of aResult
1.3236 + asm("mov [esi], ebx"); // store result
1.3237 + asm("mov [esi+4], edx");
1.3238 + asm("mov [esi+8], ecx");
1.3239 + asm("pop edi"); // restore registers
1.3240 + asm("pop esi");
1.3241 + asm("pop ebp");
1.3242 + asm("pop ebx");
1.3243 + asm("pop ecx");
1.3244 + THISCALL_EPILOG2() // return with error code in eax
1.3245 + }
1.3246 +
1.3247 +// Compare TRealX in ecx,edx:ebx (op1) to TRealX at [esi] (op2)
1.3248 +// Return 1 if op1<op2
1.3249 +// Return 2 if op1=op2
1.3250 +// Return 4 if op1>op2
1.3251 +// Return 8 if unordered
1.3252 +// Return value in eax
1.3253 +LOCAL_C __NAKED__ void TRealXCompare(void)
1.3254 + {
1.3255 + asm("cmp ecx, 0xFFFF0000"); // check if op1=NaN or infinity
1.3256 + asm("jc short fpcmp1"); // branch if not
1.3257 + asm("cmp edx, 0x80000000"); // check for infinity
1.3258 + asm("jnz short fpcmpunord"); // branch if NaN
1.3259 + asm("test ebx, ebx");
1.3260 + asm("jz short fpcmp1"); // if infinity, process normally
1.3261 + asm("fpcmpunord:"); // come here if unordered
1.3262 + asm("mov eax, 8"); // return 8
1.3263 + asm("ret");
1.3264 + asm("fpcmp1:"); // op1 is not a NaN
1.3265 + asm("mov eax, [esi+8]"); // get op2 into eax,edi:ebp
1.3266 + asm("mov edi, [esi+4]");
1.3267 + asm("mov ebp, [esi]");
1.3268 + asm("cmp eax, 0xFFFF0000"); // check for NaN or infinity
1.3269 + asm("jc short fpcmp2"); // branch if neither
1.3270 + asm("cmp edi, 0x80000000"); // check for infinity
1.3271 + asm("jnz short fpcmpunord"); // branch if NaN
1.3272 + asm("test ebp, ebp");
1.3273 + asm("jnz short fpcmpunord");
1.3274 + asm("fpcmp2:"); // neither operand is a NaN
1.3275 + asm("cmp ecx, 0x10000"); // check if op1=0
1.3276 + asm("jc short fpcmpop1z"); // branch if it is
1.3277 + asm("cmp eax, 0x10000"); // check if op2=0
1.3278 + asm("jc short fpcmp4"); // branch if it is
1.3279 + asm("xor al, cl"); // check if signs the same
1.3280 + asm("test al, 1");
1.3281 + asm("jnz short fpcmp4"); // branch if different
1.3282 + asm("push ecx");
1.3283 + asm("shr ecx, 16"); // op1 exponent into cx
1.3284 + asm("shr eax, 16"); // op2 exponent into ax
1.3285 + asm("cmp ecx, eax"); // compare exponents
1.3286 + asm("pop ecx");
1.3287 + asm("ja short fpcmp4"); // if op1 exp > op2 exp op1>op2 if +ve
1.3288 + asm("jb short fpcmp5"); // if op1 exp < op2 exp op1<op2 if +ve
1.3289 + asm("cmp edx, edi"); // else compare mantissa high words
1.3290 + asm("ja short fpcmp4");
1.3291 + asm("jb short fpcmp5");
1.3292 + asm("cmp ebx, ebp"); // if equal compare mantissa low words
1.3293 + asm("ja short fpcmp4");
1.3294 + asm("jb short fpcmp5");
1.3295 + asm("fpcmp0:");
1.3296 + asm("mov eax, 2"); // numbers exactly equal
1.3297 + asm("ret");
1.3298 + asm("fpcmp4:"); // come here if ABS{op1}>ABS{op2} or if signs different
1.3299 + // or if op2 zero, op1 nonzero
1.3300 + asm("mov eax, 4"); // return 4 if +ve
1.3301 + asm("test cl, 1"); // check sign
1.3302 + asm("jz short fpcmp4a"); // skip if +
1.3303 + asm("mov al, 1"); // return 1 if -ve
1.3304 + asm("fpcmp4a:");
1.3305 + asm("ret");
1.3306 + asm("fpcmp5:"); // come here if ABS{op1}<ABS{op2}
1.3307 + asm("mov eax, 1"); // return 1 if +ve
1.3308 + asm("test cl, 1"); // check sign
1.3309 + asm("jz short fpcmp5a"); // skip if +
1.3310 + asm("mov al, 4"); // return 4 if -ve
1.3311 + asm("fpcmp5a:");
1.3312 + asm("ret");
1.3313 + asm("fpcmpop1z:"); // come here if op1=0
1.3314 + asm("cmp eax, 0x10000"); // check if op2 also zero
1.3315 + asm("jc short fpcmp0"); // if so, they are equal
1.3316 + asm("test al, 1"); // test sign of op 2
1.3317 + asm("mov eax, 4"); // if -, return 4
1.3318 + asm("jnz short fpcmpop1z2n"); // skip if -
1.3319 + asm("mov al, 1"); // else return 1
1.3320 + asm("fpcmpop1z2n:");
1.3321 + asm("ret");
1.3322 + }
1.3323 +
1.3324 +
1.3325 +
1.3326 +
1.3327 +__NAKED__ EXPORT_C TRealX::TRealXOrder TRealX::Compare(const TRealX& /*aVal*/) const
1.3328 +/**
1.3329 +*/
1.3330 + {
1.3331 + // On entry ecx=this, [esp+4]=address of aVal
1.3332 + THISCALL_PROLOG1()
1.3333 + asm("push ecx"); // save registers
1.3334 + asm("push ebx");
1.3335 + asm("push ebp");
1.3336 + asm("push esi");
1.3337 + asm("push edi");
1.3338 + asm("mov esi, [esp+24]"); // address of aVal into esi
1.3339 + asm("mov ebx, [ecx]"); // *this into ecx,edx:ebx
1.3340 + asm("mov edx, [ecx+4]");
1.3341 + asm("mov ecx, [ecx+8]");
1.3342 + asm("call %a0": : "i"(&TRealXCompare)); // result in eax
1.3343 + asm("pop edi");
1.3344 + asm("pop esi");
1.3345 + asm("pop ebp");
1.3346 + asm("pop ebx");
1.3347 + asm("pop ecx");
1.3348 + THISCALL_EPILOG1()
1.3349 + }
1.3350 +
1.3351 +