sl@0: // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\euser\epoc\win32\uc_realx.cpp sl@0: // sl@0: // sl@0: sl@0: #include "u32std.h" sl@0: #include sl@0: sl@0: #pragma warning (disable : 4100) // unreferenced formal parameter sl@0: #pragma warning (disable : 4700) // local variable 'this' used without sl@0: // having been initialised sl@0: #pragma warning ( disable : 4414 ) // short jump to function converted to near sl@0: sl@0: sl@0: #if defined(__VC32__) && (_MSC_VER==1100) // untested on MSVC++ > 5.0 sl@0: // Workaround for MSVC++ 5.0 bug; MSVC incorrectly fixes up conditional jumps sl@0: // when the destination is a C++ function. sl@0: #define _ASM_j(cond,dest) _asm jn##cond short $+11 _asm jmp dest sl@0: #define _ASM_jn(cond,dest) _asm j##cond short $+11 _asm jmp dest sl@0: #pragma optimize( "", off ) // stop MSVC murdering the code sl@0: #else sl@0: #define _ASM_j(cond,dest) _asm j##cond dest sl@0: #define _ASM_jn(cond,dest) _asm jn##cond dest sl@0: #endif sl@0: sl@0: // sl@0: // 64-bit precision floating point routines sl@0: // Register storage format: sl@0: // edx:ebx=64 bit normalised mantissa sl@0: // ecx bits 16-31 = 16-bit exponent, biased by 7FFF sl@0: // ecx bit 0 = sign sl@0: // ecx bit 8 = rounded-down flag sl@0: // ecx bit 9 = rounded-up flag sl@0: // sl@0: // Memory storage format: sl@0: // 3 doublewords per number sl@0: // Low 32 bits of mantissa at [addr] sl@0: // High 32 bits of mantissa at [addr+4] sl@0: // Exponent/flags/sign at [addr+8] sl@0: // sl@0: sl@0: LOCAL_C void TRealXPanic(TInt aErr) sl@0: { sl@0: User::Panic(_L("MATHX"),aErr); sl@0: } sl@0: sl@0: __NAKED__ LOCAL_C void TRealXPanicEax(void) sl@0: { sl@0: _asm push eax sl@0: _asm call TRealXPanic sl@0: } sl@0: sl@0: LOCAL_C __NAKED__ void TRealXRealIndefinite(void) sl@0: { sl@0: // return 'real indefinite' NaN in ecx,edx:ebx sl@0: _asm mov ecx, 0xFFFF0001 // exponent=FFFF, sign negative sl@0: _asm mov edx, 0xC0000000 // mantissa=C0000000 00000000 sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -6 // return KErrArgument sl@0: _asm ret sl@0: } sl@0: sl@0: LOCAL_C __NAKED__ void TRealXBinOpNaN(void) sl@0: { sl@0: // generic routine to process NaN's in binary operations sl@0: // destination operand in ecx,edx:eax sl@0: // source operand at [esi] sl@0: sl@0: _asm mov eax, [esi+8] // source operand into eax,edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm mov ebp, [esi] sl@0: _asm cmp ecx, 0xFFFF0000 // check if dest is a NaN sl@0: _asm jb short TRealXBinOpNaN1 // if not, swap them sl@0: _asm cmp edx, 0x80000000 sl@0: _asm jne short TRealXBinOpNaN2 sl@0: _asm test ebx, ebx sl@0: _asm jne short TRealXBinOpNaN2 sl@0: TRealXBinOpNaN1: // swap the operands sl@0: _asm xchg ecx, eax sl@0: _asm xchg edx, edi sl@0: _asm xchg ebx, ebp sl@0: TRealXBinOpNaN2: sl@0: _asm cmp eax, 0xFFFF0000 // check if both operands are NaNs sl@0: _asm jb short TRealXBinOpNaN4 // if not, ignore non-NaN operand sl@0: _asm cmp edi, 0x80000000 sl@0: _asm jne short TRealXBinOpNaN3 sl@0: _asm test ebp, ebp sl@0: _asm je short TRealXBinOpNaN4 sl@0: TRealXBinOpNaN3: // if both operands are NaN's, compare significands sl@0: _asm cmp edx, edi sl@0: _asm ja short TRealXBinOpNaN4 sl@0: _asm jb short TRealXBinOpNaN5 sl@0: _asm cmp ebx, ebp sl@0: _asm jae short TRealXBinOpNaN4 sl@0: TRealXBinOpNaN5: // come here if dest is smaller - copy source to dest sl@0: _asm mov ecx, eax sl@0: _asm mov edx, edi sl@0: _asm mov ebx, ebp sl@0: TRealXBinOpNaN4: // NaN with larger significand is in ecx,edx:ebx sl@0: _asm or edx, 0x40000000 // convert an SNaN to a QNaN sl@0: _asm mov eax, -6 // return KErrArgument sl@0: _asm ret sl@0: } sl@0: sl@0: // Add TRealX at [esi] + ecx,edx:ebx sl@0: // Result in ecx,edx:ebx sl@0: // Error code in eax sl@0: // Note: +0 + +0 = +0, -0 + -0 = -0, +0 + -0 = -0 + +0 = +0, sl@0: // +/-0 + X = X + +/-0 = X, X + -X = -X + X = +0 sl@0: __NAKED__ LOCAL_C void TRealXAdd() sl@0: { sl@0: _asm xor ch, ch // clear rounding flags sl@0: _asm cmp ecx, 0xFFFF0000 // check if dest=NaN or infinity sl@0: _asm jnc addfpsd // branch if it is sl@0: _asm mov eax, [esi+8] // fetch sign/exponent of source sl@0: _asm cmp eax, 0xFFFF0000 // check if source=NaN or infinity sl@0: _asm jnc addfpss // branch if it is sl@0: _asm cmp eax, 0x10000 // check if source=0 sl@0: _asm jc addfp0s // branch if it is sl@0: _asm cmp ecx, 0x10000 // check if dest=0 sl@0: _asm jc addfp0d // branch if it is sl@0: _asm and cl, 1 // clear bits 1-7 of ecx sl@0: _asm and al, 1 // clear bits 1-7 of eax sl@0: _asm mov ch, cl sl@0: _asm xor ch, al // xor of signs into ch bit 0 sl@0: _asm add ch, ch sl@0: _asm or cl, ch // and into cl bit 1 sl@0: _asm or al, ch // and al bit 1 sl@0: _asm xor ch, ch // clear rounding flags sl@0: _asm mov ebp, [esi] // fetch source mantissa 0-31 sl@0: _asm mov edi, [esi+4] // fetch source mantissa 32-63 sl@0: _asm ror ecx, 16 // dest exponent into cx sl@0: _asm ror eax, 16 // source exponent into ax sl@0: _asm push ecx // push dest exponent/sign sl@0: _asm sub cx, ax // cx = dest exponent - source exponent sl@0: _asm je short addfp3b // if equal, no shifting required sl@0: _asm ja short addfp1 // branch if dest exponent >= source exponent sl@0: _asm xchg ebx, ebp // make sure edi:ebp contains the mantissa to be shifted sl@0: _asm xchg edx, edi // sl@0: _asm xchg eax, [esp] // and larger exponent and corresponding sign is on the stack sl@0: _asm neg cx // make cx positive = number of right shifts needed sl@0: addfp1: sl@0: _asm cmp cx, 64 // if more than 64 shifts needed sl@0: _asm ja addfp2 // branch to output larger number sl@0: _asm jb addfp3 // branch if <64 shifts sl@0: _asm mov eax, edi // exactly 64 shifts needed - rounding word=mant high sl@0: _asm test ebp, ebp // check bits lost sl@0: _asm jz short addfp3a sl@0: _asm or ch, 1 // if not all zero, set rounded-down flag sl@0: addfp3a: sl@0: _asm xor edi, edi // clear edx:ebx sl@0: _asm xor ebp, ebp sl@0: _asm jmp short addfp5 // finished shifting sl@0: addfp3b: // exponents equal sl@0: _asm xor eax, eax // set rounding word=0 sl@0: _asm jmp short addfp5 sl@0: addfp3: sl@0: _asm cmp cl, 32 // 32 or more shifts needed ? sl@0: _asm jb short addfp4 // skip if <32 sl@0: _asm mov eax, ebp // rounding word=mant low sl@0: _asm mov ebp, edi // mant low=mant high sl@0: _asm xor edi, edi // mant high=0 sl@0: _asm sub cl, 32 // reduce count by 32 sl@0: _asm jz short addfp5 // if now zero, finished shifting sl@0: _asm shrd edi, eax, cl // shift ebp:eax:edi right by cl bits sl@0: _asm shrd eax, ebp, cl // sl@0: _asm shr ebp, cl // sl@0: _asm test edi, edi // check bits lost in shift sl@0: _asm jz short addfp5 // if all zero, finished sl@0: _asm or ch, 1 // else set rounded-down flag sl@0: _asm xor edi, edi // clear edx again sl@0: _asm jmp short addfp5 // finished shifting sl@0: addfp4: // <32 shifts needed now sl@0: _asm xor eax, eax // clear rounding word initially sl@0: _asm shrd eax, ebp, cl // shift edi:ebp:eax right by cl bits sl@0: _asm shrd ebp, edi, cl // sl@0: _asm shr edi, cl // sl@0: sl@0: addfp5: sl@0: _asm mov [esp+3], ch // rounding flag into ch image on stack sl@0: _asm pop ecx // recover sign and exponent into ecx, with rounding flag sl@0: _asm ror ecx, 16 // into normal position sl@0: _asm test cl, 2 // addition or subtraction needed ? sl@0: _asm jnz short subfp1 // branch if subtraction sl@0: _asm add ebx,ebp // addition required - add mantissas sl@0: _asm adc edx,edi // sl@0: _asm jnc short roundfp // branch if no carry sl@0: _asm rcr edx,1 // shift carry right into mantissa sl@0: _asm rcr ebx,1 // sl@0: _asm rcr eax,1 // and into rounding word sl@0: _asm jnc short addfp5a sl@0: _asm or ch, 1 // if 1 shifted out, set rounded-down flag sl@0: addfp5a: sl@0: _asm add ecx, 0x10000 // and increment exponent sl@0: sl@0: // perform rounding based on rounding word in eax and rounding flag in ch sl@0: roundfp: sl@0: _asm cmp eax, 0x80000000 sl@0: _asm jc roundfp0 // if rounding word<80000000, round down sl@0: _asm ja roundfp1 // if >80000000, round up sl@0: _asm test ch, 1 sl@0: _asm jnz short roundfp1 // if rounded-down flag set, round up sl@0: _asm test ch, 2 sl@0: _asm jnz short roundfp0 // if rounded-up flag set, round down sl@0: _asm test bl, 1 // else test mantissa lsb sl@0: _asm jz short roundfp0 // round down if 0, up if 1 (round to even) sl@0: roundfp1: // Come here to round up sl@0: _asm add ebx, 1 // increment mantissa sl@0: _asm adc edx,0 // sl@0: _asm jnc roundfp1a // if no carry OK sl@0: _asm rcr edx,1 // else shift carry into mantissa (edx:ebx=0 here) sl@0: _asm add ecx, 0x10000 // and increment exponent sl@0: roundfp1a: sl@0: _asm cmp ecx, 0xFFFF0000 // check for overflow sl@0: _asm jae short addfpovfw // jump if overflow sl@0: _asm mov ch, 2 // else set rounded-up flag sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: roundfp0: // Come here to round down sl@0: _asm cmp ecx, 0xFFFF0000 // check for overflow sl@0: _asm jae short addfpovfw // jump if overflow sl@0: _asm test eax, eax // else check if rounding word zero sl@0: _asm jz short roundfp0a // if so, leave rounding flags as they are sl@0: _asm mov ch, 1 // else set rounded-down flag sl@0: roundfp0a: sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret // exit sl@0: sl@0: addfpovfw: // Come here if overflow occurs sl@0: _asm xor ch, ch // clear rounding flags, exponent=FFFF sl@0: _asm xor ebx, ebx sl@0: _asm mov edx, 0x80000000 // mantissa=80000000 00000000 for infinity sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm ret sl@0: sl@0: // exponents differ by more than 64 - output larger number sl@0: addfp2: sl@0: _asm pop ecx // recover exponent and sign sl@0: _asm ror ecx, 16 // into normal position sl@0: _asm or ch, 1 // set rounded-down flag sl@0: _asm test cl, 2 // check if signs the same sl@0: _asm jz addfp2a sl@0: _asm xor ch, 3 // if not, set rounded-up flag sl@0: addfp2a: sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // signs differ, so must subtract mantissas sl@0: subfp1: sl@0: _asm add ch, ch // if rounded-down flag set, change it to rounded-up sl@0: _asm neg eax // subtract rounding word from 0 sl@0: _asm sbb ebx, ebp // and subtract mantissas with borrow sl@0: _asm sbb edx, edi // sl@0: _asm jnc short subfp2 // if no borrow, sign is correct sl@0: _asm xor cl, 1 // else change sign of result sl@0: _asm shr ch, 1 // change rounding back to rounded-down sl@0: _asm not eax // negate rounding word sl@0: _asm not ebx // and mantissa sl@0: _asm not edx // sl@0: _asm add eax,1 // two's complement negation sl@0: _asm adc ebx,0 // sl@0: _asm adc edx,0 // sl@0: subfp2: sl@0: _asm jnz short subfp3 // branch if edx non-zero at this point sl@0: _asm mov edx, ebx // else shift ebx into edx sl@0: _asm or edx, edx // sl@0: _asm jz short subfp4 // if still zero, branch sl@0: _asm mov ebx, eax // else shift rounding word into ebx sl@0: _asm xor eax, eax // and zero rounding word sl@0: _asm sub ecx, 0x200000 // decrease exponent by 32 due to shift sl@0: _asm jnc short subfp3 // if no borrow, carry on sl@0: _asm jmp short subfpundflw // if borrow here, underflow sl@0: subfp4: sl@0: _asm mov edx, eax // move rounding word into edx sl@0: _asm or edx, edx // is edx still zero ? sl@0: _asm jz short subfp0 // if so, result is precisely zero sl@0: _asm xor ebx, ebx // else zero ebx and rounding word sl@0: _asm xor eax, eax // sl@0: _asm sub ecx, 0x400000 // and decrease exponent by 64 due to shift sl@0: _asm jc short subfpundflw // if borrow, underflow sl@0: subfp3: sl@0: _asm mov edi, ecx // preserve sign and exponent sl@0: _asm bsr ecx, edx // position of most significant 1 into ecx sl@0: _asm neg ecx // sl@0: _asm add ecx, 31 // cl = 31-position of MS 1 = number of shifts to normalise sl@0: _asm shld edx, ebx, cl // shift edx:ebx:eax left by cl bits sl@0: _asm shld ebx, eax, cl // sl@0: _asm shl eax, cl // sl@0: _asm mov ebp, ecx // bit count into ebp for subtraction sl@0: _asm shl ebp, 16 // shift left by 16 to align with exponent sl@0: _asm mov ecx, edi // exponent, sign, rounding flags back into ecx sl@0: _asm sub ecx, ebp // subtract shift count from exponent sl@0: _asm jc short subfpundflw // if borrow, underflow sl@0: _asm cmp ecx, 0x10000 // check if exponent 0 sl@0: _asm jnc roundfp // if not, jump to round result, else underflow sl@0: sl@0: // come here if underflow sl@0: subfpundflw: sl@0: _asm and ecx, 1 // set exponent to zero, leave sign sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -10 // return KErrUnderflow sl@0: _asm ret sl@0: sl@0: // come here to return zero result sl@0: subfp0: sl@0: _asm xor ecx, ecx // set exponent to zero, positive sign sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: addfp0snzd: sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if source=0 - eax=source exponent/sign sl@0: addfp0s: sl@0: _asm cmp ecx, 0x10000 // check if dest=0 sl@0: _asm jnc addfp0snzd // if not, return dest unaltered sl@0: _asm and ecx, eax // else both zero, result negative iff both zeros negative sl@0: _asm and ecx, 1 sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if dest=0, source nonzero sl@0: addfp0d: sl@0: _asm mov ebx, [esi] // return source unaltered sl@0: _asm mov edx, [esi+4] sl@0: _asm mov ecx, [esi+8] sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if dest=NaN or infinity sl@0: addfpsd: sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm mov eax, [esi+8] // eax=second operand exponent sl@0: _asm cmp eax, 0xFFFF0000 // check second operand for NaN or infinity sl@0: _asm jae short addfpsd1 // branch if NaN or infinity sl@0: addfpsd2: sl@0: _asm mov eax, -9 // else return dest unaltered (infinity) and KErrOverflow sl@0: _asm ret sl@0: addfpsd1: sl@0: _asm mov ebp, [esi] // source mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm xor al, cl // both operands are infinity - check signs sl@0: _asm test al, 1 sl@0: _asm jz short addfpsd2 // if both the same, return KErrOverflow sl@0: _asm jmp TRealXRealIndefinite // else return 'real indefinite' sl@0: sl@0: // come here if source=NaN or infinity, dest finite sl@0: addfpss: sl@0: _asm mov ebp, [esi] // source mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm mov ecx, eax // if source=infinity, return source unaltered sl@0: _asm mov edx, edi sl@0: _asm mov ebx, ebp sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm ret sl@0: } sl@0: sl@0: // Subtract TRealX at [esi] - ecx,edx:ebx sl@0: // Result in ecx,edx:ebx sl@0: // Error code in eax sl@0: __NAKED__ LOCAL_C void TRealXSubtract() sl@0: { sl@0: _asm xor cl, 1 // negate subtrahend sl@0: _asm jmp TRealXAdd sl@0: } sl@0: sl@0: // Multiply TRealX at [esi] * ecx,edx:ebx sl@0: // Result in ecx,edx:ebx sl@0: // Error code in eax sl@0: __NAKED__ LOCAL_C void TRealXMultiply() sl@0: { sl@0: _asm xor ch, ch // clear rounding flags sl@0: _asm mov eax, [esi+8] // fetch sign/exponent of source sl@0: _asm xor cl, al // xor signs sl@0: _asm cmp ecx, 0xFFFF0000 // check if dest=NaN or infinity sl@0: _asm jnc mulfpsd // branch if it is sl@0: _asm cmp eax, 0xFFFF0000 // check if source=NaN or infinity sl@0: _asm jnc mulfpss // branch if it is sl@0: _asm cmp eax, 0x10000 // check if source=0 sl@0: _asm jc mulfp0 // branch if it is sl@0: _asm cmp ecx, 0x10000 // check if dest=0 sl@0: _asm jc mulfp0 // branch if it is sl@0: _asm push ecx // save result sign sl@0: _asm shr ecx, 16 // dest exponent into cx sl@0: _asm shr eax, 16 // source exponent into ax sl@0: _asm add eax, ecx // add exponents sl@0: _asm sub eax, 0x7FFE // eax now contains result exponent sl@0: _asm push eax // save it sl@0: _asm mov edi, edx // save dest mantissa high sl@0: _asm mov eax, ebx // dest mantissa low -> eax sl@0: _asm mul dword ptr [esi] // dest mantissa low * source mantissa low -> edx:eax sl@0: _asm xchg ebx, eax // result dword 0 -> ebx, dest mant low -> eax sl@0: _asm mov ebp, edx // result dword 1 -> ebp sl@0: _asm mul dword ptr [esi+4] // dest mant low * src mant high -> edx:eax sl@0: _asm add ebp, eax // add in partial product to dwords 1 and 2 sl@0: _asm adc edx, 0 // sl@0: _asm mov ecx, edx // result dword 2 -> ecx sl@0: _asm mov eax, edi // dest mant high -> eax sl@0: _asm mul dword ptr [esi+4] // dest mant high * src mant high -> edx:eax sl@0: _asm add ecx, eax // add in partial product to dwords 2, 3 sl@0: _asm adc edx, 0 // sl@0: _asm mov eax, edi // dest mant high -> eax sl@0: _asm mov edi, edx // result dword 3 -> edi sl@0: _asm mul dword ptr [esi] // dest mant high * src mant low -> edx:eax sl@0: _asm add ebp, eax // add in partial product to dwords 1, 2 sl@0: _asm adc ecx, edx // sl@0: _asm adc edi, 0 // 128-bit mantissa product is now in edi:ecx:ebp:ebx sl@0: _asm mov edx, edi // top 64 bits into edx:ebx sl@0: _asm mov edi, ebx sl@0: _asm mov ebx, ecx // bottom 64 bits now in ebp:edi sl@0: _asm pop ecx // recover exponent sl@0: _asm js short mulfp1 // skip if mantissa normalised sl@0: _asm add edi, edi // else shift left (only one shift will be needed) sl@0: _asm adc ebp, ebp sl@0: _asm adc ebx, ebx sl@0: _asm adc edx, edx sl@0: _asm dec ecx // and decrement exponent sl@0: mulfp1: sl@0: _asm cmp ebp, 0x80000000 // compare bottom 64 bits with 80000000 00000000 for rounding sl@0: _asm ja short mulfp2 // branch to round up sl@0: _asm jb short mulfp3 // branch to round down sl@0: _asm test edi, edi sl@0: _asm jnz short mulfp2 // branch to round up sl@0: _asm test bl, 1 // if exactly half-way, test LSB of result mantissa sl@0: _asm jz short mulfp4 // if LSB=0, round down (round to even) sl@0: mulfp2: sl@0: _asm add ebx, 1 // round up - increment mantissa sl@0: _asm adc edx, 0 sl@0: _asm jnc short mulfp2a sl@0: _asm rcr edx, 1 sl@0: _asm inc ecx sl@0: mulfp2a: sl@0: _asm mov al, 2 // set rounded-up flag sl@0: _asm jmp short mulfp5 sl@0: mulfp3: // round down sl@0: _asm xor al, al // clear rounding flags sl@0: _asm or ebp, edi // check for exact result sl@0: _asm jz short mulfp5 // skip if exact sl@0: mulfp4: // come here to round down when we know result inexact sl@0: _asm mov al, 1 // else set rounded-down flag sl@0: mulfp5: // final mantissa now in edx:ebx, exponent in ecx sl@0: _asm cmp ecx, 0xFFFF // check for overflow sl@0: _asm jge short mulfp6 // branch if overflow sl@0: _asm cmp ecx, 0 // check for underflow sl@0: _asm jle short mulfp7 // branch if underflow sl@0: _asm shl ecx, 16 // else exponent up to top end of ecx sl@0: _asm mov ch, al // rounding flags into ch sl@0: _asm pop eax // recover result sign sl@0: _asm mov cl, al // into cl sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if overflow sl@0: mulfp6: sl@0: _asm pop eax // recover result sign sl@0: _asm mov ecx, 0xFFFF0000 // exponent=FFFF sl@0: _asm mov cl, al // sign into cl sl@0: _asm mov edx, 0x80000000 // set mantissa to 80000000 00000000 for infinity sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm ret sl@0: sl@0: // come here if underflow sl@0: mulfp7: sl@0: _asm pop eax // recover result sign sl@0: _asm xor ecx, ecx // exponent=0 sl@0: _asm mov cl, al // sign into cl sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -10 // return KErrUnderflow sl@0: _asm ret sl@0: sl@0: // come here if either operand zero sl@0: mulfp0: sl@0: _asm and ecx, 1 // set exponent=0, keep sign sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if destination operand NaN or infinity sl@0: mulfpsd: sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm cmp eax, 0xFFFF0000 // check second operand for NaN or infinity sl@0: _asm jae short mulfpsd1 // branch if NaN or infinity sl@0: _asm cmp eax, 0x10000 // check if second operand zero sl@0: _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite' sl@0: _asm mov eax, -9 // else return dest (infinity) with xor sign and KErrOverflow sl@0: _asm ret sl@0: mulfpsd1: sl@0: _asm mov ebp, [esi] // source mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm mov eax, -9 // both operands infinity - return infinity with xor sign sl@0: _asm ret // and KErrOverflow sl@0: sl@0: // come here if source operand NaN or infinity, destination finite sl@0: mulfpss: sl@0: _asm mov ebp, [esi] // source mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm cmp ecx, 0x10000 // source=infinity, check if dest=0 sl@0: _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite' sl@0: _asm or ecx, 0xFFFF0000 // set exp=FFFF, leave xor sign in cl sl@0: _asm mov edx, edi // set mantissa for infinity sl@0: _asm mov ebx, ebp sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm ret sl@0: } sl@0: sl@0: // Divide 96-bit unsigned dividend EDX:EAX:0 by 64-bit unsigned divisor ECX:EBX sl@0: // Assume ECX bit 31 = 1, ie 2^63 <= divisor < 2^64 sl@0: // Assume the quotient fits in 32 bits sl@0: // Return 32 bit quotient in EDI sl@0: // Return 64 bit remainder in EBP:ESI sl@0: __NAKED__ LOCAL_C void LongDivide(void) sl@0: { sl@0: _asm push edx // save dividend sl@0: _asm push eax // sl@0: _asm cmp edx, ecx // check if truncation of divisor will overflow DIV instruction sl@0: _asm jb short longdiv1 // skip if not sl@0: _asm xor eax, eax // else return quotient of 0xFFFFFFFF sl@0: _asm dec eax // sl@0: _asm jmp short longdiv2 // sl@0: longdiv1: sl@0: _asm div ecx // divide EDX:EAX by ECX to give approximate quotient in EAX sl@0: longdiv2: sl@0: _asm mov edi, eax // save approx quotient sl@0: _asm mul ebx // multiply approx quotient by full divisor ECX:EBX sl@0: _asm mov esi, eax // first partial product into EBP:ESI sl@0: _asm mov ebp, edx // sl@0: _asm mov eax, edi // approx quotient back into eax sl@0: _asm mul ecx // upper partial product now in EDX:EAX sl@0: _asm add eax, ebp // add to form 96-bit product in EDX:EAX:ESI sl@0: _asm adc edx, 0 // sl@0: _asm neg esi // remainder = dividend - approx quotient * divisor sl@0: _asm mov ebp, [esp] // fetch dividend bits 32-63 sl@0: _asm sbb ebp, eax // sl@0: _asm mov eax, [esp+4] // fetch dividend bits 64-95 sl@0: _asm sbb eax, edx // remainder is now in EAX:EBP:ESI sl@0: _asm jns short longdiv4 // if remainder positive, quotient is correct, so exit sl@0: longdiv3: sl@0: _asm dec edi // else quotient is too big, so decrement it sl@0: _asm add esi, ebx // and add divisor to remainder sl@0: _asm adc ebp, ecx // sl@0: _asm adc eax, 0 // sl@0: _asm js short longdiv3 // if still negative, repeat (requires <4 iterations) sl@0: longdiv4: sl@0: _asm add esp, 8 // remove dividend from stack sl@0: _asm ret // return with quotient in EDI, remainder in EBP:ESI sl@0: } sl@0: sl@0: // Divide TRealX at [esi] / ecx,edx:ebx sl@0: // Result in ecx,edx:ebx sl@0: // Error code in eax sl@0: __NAKED__ LOCAL_C void TRealXDivide(void) sl@0: { sl@0: _asm xor ch, ch // clear rounding flags sl@0: _asm mov eax, [esi+8] // fetch sign/exponent of dividend sl@0: _asm xor cl, al // xor signs sl@0: _asm cmp eax, 0xFFFF0000 // check if dividend=NaN or infinity sl@0: _asm jnc divfpss // branch if it is sl@0: _asm cmp ecx, 0xFFFF0000 // check if divisor=NaN or infinity sl@0: _asm jnc divfpsd // branch if it is sl@0: _asm cmp ecx, 0x10000 // check if divisor=0 sl@0: _asm jc divfpdv0 // branch if it is sl@0: _asm cmp eax, 0x10000 // check if dividend=0 sl@0: _asm jc divfpdd0 // branch if it is sl@0: _asm push esi // save pointer to dividend sl@0: _asm push ecx // save result sign sl@0: _asm shr ecx, 16 // divisor exponent into cx sl@0: _asm shr eax, 16 // dividend exponent into ax sl@0: _asm sub eax, ecx // subtract exponents sl@0: _asm add eax, 0x7FFE // eax now contains result exponent sl@0: _asm push eax // save it sl@0: _asm mov ecx, edx // divisor mantissa into ecx:ebx sl@0: _asm mov edx, [esi+4] // dividend mantissa into edx:eax sl@0: _asm mov eax, [esi] sl@0: _asm xor edi, edi // clear edi initially sl@0: _asm cmp edx, ecx // compare EDX:EAX with ECX:EBX sl@0: _asm jb short divfp1 // if EDX:EAX < ECX:EBX, leave everything as is sl@0: _asm ja short divfp2 // sl@0: _asm cmp eax, ebx // if EDX=ECX, then compare ls dwords sl@0: _asm jb short divfp1 // if dividend mant < divisor mant, leave everything as is sl@0: divfp2: sl@0: _asm sub eax, ebx // else dividend mant -= divisor mant sl@0: _asm sbb edx, ecx // sl@0: _asm inc edi // and EDI=1 (bit 0 of EDI is the integer part of the result) sl@0: _asm inc dword ptr [esp] // also increment result exponent sl@0: divfp1: sl@0: _asm push edi // save top bit of result sl@0: _asm call LongDivide // divide EDX:EAX:0 by ECX:EBX to give next 32 bits of result in EDI sl@0: _asm push edi // save next 32 bits of result sl@0: _asm mov edx, ebp // remainder from EBP:ESI into EDX:EAX sl@0: _asm mov eax, esi // sl@0: _asm call LongDivide // divide EDX:EAX:0 by ECX:EBX to give next 32 bits of result in EDI sl@0: _asm test byte ptr [esp+4], 1 // test integer bit of result sl@0: _asm jnz short divfp4 // if set, no need to calculate another bit sl@0: _asm xor eax, eax // sl@0: _asm add esi, esi // 2*remainder into EAX:EBP:ESI sl@0: _asm adc ebp, ebp // sl@0: _asm adc eax, eax // sl@0: _asm sub esi, ebx // subtract divisor to generate final quotient bit sl@0: _asm sbb ebp, ecx // sl@0: _asm sbb eax, 0 // sl@0: _asm jnc short divfp3 // skip if no borrow - in this case eax=0 sl@0: _asm add esi, ebx // if borrow add back - final remainder now in EBP:ESI sl@0: _asm adc ebp, ecx // sl@0: _asm adc eax, 0 // eax will be zero after this and carry will be set sl@0: divfp3: sl@0: _asm cmc // final bit = 1-C sl@0: _asm rcr eax, 1 // shift it into eax bit 31 sl@0: _asm mov ebx, edi // result into EDX:EBX:EAX, remainder in EBP:ESI sl@0: _asm pop edx sl@0: _asm add esp, 4 // discard integer bit (zero) sl@0: _asm jmp short divfp5 // branch to round sl@0: sl@0: divfp4: // integer bit was set sl@0: _asm mov ebx, edi // result into EDX:EBX:EAX sl@0: _asm pop edx // sl@0: _asm pop eax // integer part of result into eax (=1) sl@0: _asm stc // shift a 1 into top end of mantissa sl@0: _asm rcr edx,1 // sl@0: _asm rcr ebx,1 // sl@0: _asm rcr eax,1 // bottom bit into eax bit 31 sl@0: sl@0: // when we get to here we have 65 bits of quotient mantissa in sl@0: // EDX:EBX:EAX (bottom bit in eax bit 31) sl@0: // and the remainder is in EBP:ESI sl@0: divfp5: sl@0: _asm pop ecx // recover result exponent sl@0: _asm add eax, eax // test rounding bit sl@0: _asm jnc short divfp6 // branch to round down sl@0: _asm or ebp, esi // test remainder to see if we are exactly half-way sl@0: _asm jnz short divfp7 // if not, round up sl@0: _asm test bl, 1 // exactly halfway - test LSB of mantissa sl@0: _asm jz short divfp8 // round down if LSB=0 (round to even) sl@0: divfp7: sl@0: _asm add ebx, 1 // round up - increment mantissa sl@0: _asm adc edx, 0 sl@0: _asm jnc short divfp7a sl@0: _asm rcr edx, 1 // if carry, shift 1 into mantissa MSB sl@0: _asm inc ecx // and increment exponent sl@0: divfp7a: sl@0: _asm mov al, 2 // set rounded-up flag sl@0: _asm jmp short divfp9 sl@0: divfp6: sl@0: _asm xor al, al // round down - first clear rounding flags sl@0: _asm or ebp, esi // test if result exact sl@0: _asm jz short divfp9 // skip if exact sl@0: divfp8: // come here to round down when we know result is inexact sl@0: _asm mov al, 1 // set rounded-down flag sl@0: divfp9: // final mantissa now in edx:ebx, exponent in ecx sl@0: _asm cmp ecx, 0xFFFF // check for overflow sl@0: _asm jge short divfp10 // branch if overflow sl@0: _asm cmp ecx, 0 // check for underflow sl@0: _asm jle short divfp11 // branch if underflow sl@0: _asm shl ecx, 16 // else exponent up to top end of ecx sl@0: _asm mov ch, al // rounding flags into ch sl@0: _asm pop eax // recover result sign sl@0: _asm mov cl, al // into cl sl@0: _asm pop esi // recover dividend pointer sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if overflow sl@0: divfp10: sl@0: _asm pop eax // recover result sign sl@0: _asm mov ecx, 0xFFFF0000 // exponent=FFFF sl@0: _asm mov cl, al // sign into cl sl@0: _asm mov edx, 0x80000000 // set mantissa to 80000000 00000000 for infinity sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm pop esi // recover dividend pointer sl@0: _asm ret sl@0: sl@0: // come here if underflow sl@0: divfp11: sl@0: _asm pop eax // recover result sign sl@0: _asm xor ecx, ecx // exponent=0 sl@0: _asm mov cl, al // sign into cl sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -10 // return KErrUnderflow sl@0: _asm pop esi // recover dividend pointer sl@0: _asm ret sl@0: sl@0: sl@0: // come here if divisor=0, dividend finite sl@0: divfpdv0: sl@0: _asm cmp eax, 0x10000 // check if dividend also zero sl@0: _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite' sl@0: _asm or ecx, 0xFFFF0000 // else set exponent=FFFF, leave xor sign in cl sl@0: _asm mov edx, 0x80000000 // set mantissa for infinity sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -41 // return KErrDivideByZero sl@0: _asm ret sl@0: sl@0: // come here if dividend=0, divisor finite and nonzero sl@0: divfpdd0: sl@0: _asm and ecx, 1 // exponent=0, leave xor sign in cl sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: sl@0: // come here if dividend is a NaN or infinity sl@0: divfpss: sl@0: _asm mov ebp, [esi] // dividend mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm cmp ecx, 0xFFFF0000 // check divisor for NaN or infinity sl@0: _asm jae short divfpss1 // branch if NaN or infinity sl@0: _asm or ecx, 0xFFFF0000 // infinity/finite - return infinity with xor sign sl@0: _asm mov edx, 0x80000000 sl@0: _asm xor ebx, ebx sl@0: _asm mov eax, -9 // return KErrOverflow sl@0: _asm ret sl@0: divfpss1: sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm jmp TRealXRealIndefinite // if both operands infinite, return 'real indefinite' sl@0: sl@0: // come here if divisor is a NaN or infinity, dividend finite sl@0: divfpsd: sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm and ecx, 1 // dividend is finite, divisor=infinity, so return 0 with xor sign sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: } sl@0: sl@0: // TRealX modulo - dividend at [esi], divisor in ecx,edx:ebx sl@0: // Result in ecx,edx:ebx sl@0: // Error code in eax sl@0: __NAKED__ LOCAL_C void TRealXModulo(void) sl@0: { sl@0: _asm mov eax, [esi+8] // fetch sign/exponent of dividend sl@0: _asm mov cl, al // result sign=dividend sign sl@0: _asm xor ch, ch // clear rounding flags sl@0: _asm cmp eax, 0xFFFF0000 // check if dividend=NaN or infinity sl@0: _asm jnc modfpss // branch if it is sl@0: _asm cmp ecx, 0xFFFF0000 // check if divisor=NaN or infinity sl@0: _asm jnc modfpsd // branch if it is sl@0: _asm cmp ecx, 0x10000 // check if divisor=0 sl@0: _ASM_j(c,TRealXRealIndefinite) // if so, return 'real indefinite' sl@0: _asm shr eax, 16 // ax=dividend exponent sl@0: _asm ror ecx, 16 // cx=divisor exponent sl@0: _asm sub ax, cx // ax=dividend exponent-divisor exponent sl@0: _asm jc modfpdd0 // if dividend exponent is smaller, return dividend sl@0: _asm cmp ax, 64 // check if exponents differ by >= 64 bits sl@0: _asm jnc modfplp // if so, underflow sl@0: _asm mov ah, 0 // ah bit 0 acts as 65th accumulator bit sl@0: _asm mov ebp, [esi] // edi:ebp=dividend mantissa sl@0: _asm mov edi, [esi+4] // sl@0: _asm jmp short modfp2 // skip left shift on first iteration sl@0: modfp1: sl@0: _asm add ebp, ebp // shift accumulator left (65 bits) sl@0: _asm adc edi, edi sl@0: _asm adc ah, ah sl@0: modfp2: sl@0: _asm sub ebp, ebx // subtract divisor from dividend sl@0: _asm sbb edi, edx sl@0: _asm sbb ah, 0 sl@0: _asm jnc short modfp3 // skip if no borrow sl@0: _asm add ebp, ebx // else add back sl@0: _asm adc edi, edx sl@0: _asm adc ah, 0 sl@0: modfp3: sl@0: _asm dec al // any more bits to do? sl@0: _asm jns short modfp1 // loop if there are sl@0: _asm mov edx, edi // result mantissa (not yet normalised) into edx:ebx sl@0: _asm mov ebx, ebp sl@0: _asm or edi, ebx // check for zero sl@0: _asm jz modfp0 // jump if result zero sl@0: _asm or edx, edx // check if ms dword zero sl@0: _asm jnz short modfp4 sl@0: _asm mov edx, ebx // if so, shift left by 32 sl@0: _asm xor ebx, ebx sl@0: _asm sub cx, 32 // and decrement exponent by 32 sl@0: _asm jbe modfpund // if borrow or exponent zero, underflow sl@0: modfp4: sl@0: _asm mov edi, ecx // preserve sign and exponent sl@0: _asm bsr ecx, edx // position of most significant 1 into ecx sl@0: _asm neg ecx // sl@0: _asm add ecx, 31 // cl = 31-position of MS 1 = number of shifts to normalise sl@0: _asm shld edx, ebx, cl // shift edx:ebx left by cl bits sl@0: _asm shl ebx, cl // sl@0: _asm mov ebp, ecx // bit count into ebp for subtraction sl@0: _asm mov ecx, edi // exponent & sign back into ecx sl@0: _asm sub cx, bp // subtract shift count from exponent sl@0: _asm jbe short modfpund // if borrow or exponent 0, underflow sl@0: _asm rol ecx, 16 // else ecx=exponent:sign sl@0: _asm xor eax, eax // normal exit, result in ecx,edx:ebx sl@0: _asm ret sl@0: sl@0: // dividend=NaN or infinity sl@0: modfpss: sl@0: _asm mov ebp, [esi] // dividend mantissa into edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebp, ebp sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm cmp ecx, 0xFFFF0000 // check divisor for NaN or infinity sl@0: _ASM_j(b,TRealXRealIndefinite) // infinity%finite - return 'real indefinite' sl@0: _asm cmp edx, 0x80000000 // check for divisor=infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: _asm jmp TRealXRealIndefinite // if both operands infinite, return 'real indefinite' sl@0: sl@0: // divisor=NaN or infinity, dividend finite sl@0: modfpsd: sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _ASM_jn(e,TRealXBinOpNaN) // branch if NaN sl@0: _asm test ebx, ebx sl@0: _ASM_jn(e,TRealXBinOpNaN) sl@0: // finite%infinity - return dividend unaltered sl@0: sl@0: modfpdd0: sl@0: _asm mov ebx, [esi] // normal exit, return dividend unaltered sl@0: _asm mov edx, [esi+4] sl@0: _asm mov ecx, [esi+8] sl@0: _asm xor eax, eax sl@0: _asm ret sl@0: sl@0: modfp0: sl@0: _asm shr ecx, 16 // normal exit, result 0 sl@0: _asm xor eax, eax sl@0: _asm ret sl@0: sl@0: modfpund: sl@0: _asm shr ecx, 16 // underflow, result 0 sl@0: _asm mov eax, -10 // return KErrUnderflow sl@0: _asm ret sl@0: sl@0: modfplp: sl@0: _asm shr ecx, 16 // loss of precision, result 0 sl@0: _asm mov eax, -7 // return KErrTotalLossOfPrecision sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX() sl@0: /** sl@0: Constructs a default extended precision object. sl@0: sl@0: This sets the value to zero. sl@0: */ sl@0: { sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax // set value to zero sl@0: _asm mov [ecx+4], eax sl@0: _asm mov [ecx+8], eax sl@0: _asm mov eax, ecx // must return this sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(TUint /*aExp*/, TUint /*aMantHi*/, TUint /*aMantLo*/) sl@0: /** sl@0: Constructs an extended precision object from an explicit exponent and sl@0: a 64 bit mantissa. sl@0: sl@0: @param aExp The exponent sl@0: @param aMantHi The high order 32 bits of the 64 bit mantissa sl@0: @param aMantLo The low order 32 bits of the 64 bit mantissa sl@0: */ sl@0: { sl@0: _asm mov eax, [esp+4] // eax=aExp sl@0: _asm mov [ecx+8], eax sl@0: _asm mov eax, [esp+8] // eax=aMantHi sl@0: _asm mov [ecx+4], eax sl@0: _asm mov eax, [esp+12] // eax=aMantLo sl@0: _asm mov [ecx], eax sl@0: _asm mov eax, ecx // must return this sl@0: _asm ret 12 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Set(TInt /*aInt*/) sl@0: /** sl@0: Gives this extended precision object a new value taken sl@0: from a signed integer. sl@0: sl@0: @param aInt The signed integer value. sl@0: sl@0: @return KErrNone, always. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return code in eax sl@0: _asm mov edx, [esp+4] // edx=aInt sl@0: _asm or edx, edx // test sign/zero sl@0: _asm mov eax, 0x7FFF sl@0: _asm jz short trealxfromint0 // branch if 0 sl@0: _asm jns short trealxfromint1 // skip if positive sl@0: _asm neg edx // take absolute value sl@0: _asm add eax, 0x10000 // sign bit in eax bit 16 sl@0: trealxfromint1: sl@0: _asm push ecx // save this sl@0: _asm bsr ecx, edx // bit number of edx MSB into ecx sl@0: _asm add eax, ecx // add to eax to form result exponent sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of shifts to normalise edx sl@0: _asm shl edx, cl // normalise edx sl@0: _asm pop ecx // this back into ecx sl@0: _asm ror eax, 16 // sign/exponent into normal positions sl@0: _asm mov [ecx+4], edx // store mantissa high word sl@0: _asm mov [ecx+8], eax // store sign/exponent sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax // zero mantissa low word sl@0: _asm ret 4 // return KErrNone sl@0: trealxfromint0: sl@0: _asm mov [ecx], edx sl@0: _asm mov [ecx+4], edx // store mantissa high word=0 sl@0: _asm mov [ecx+8], edx // store sign/exponent=0 sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Set(TUint /*aInt*/) sl@0: /** sl@0: Gives this extended precision object a new value taken from sl@0: an unsigned integer. sl@0: sl@0: @param aInt The unsigned integer value. sl@0: sl@0: @return KErrNone, always. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return code in eax sl@0: _asm mov edx, [esp+4] // edx=aInt sl@0: _asm mov eax, 0x7FFF sl@0: _asm or edx, edx // test for 0 sl@0: _asm jz short trealxfromuint0 // branch if 0 sl@0: _asm push ecx // save this sl@0: _asm bsr ecx, edx // bit number of edx MSB into ecx sl@0: _asm add eax, ecx // add to eax to form result exponent sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of shifts to normalise edx sl@0: _asm shl edx, cl // normalise edx sl@0: _asm pop ecx // this back into ecx sl@0: _asm shl eax, 16 // exponent into normal position sl@0: _asm mov [ecx+4], edx // store mantissa high word sl@0: _asm mov [ecx+8], eax // store exponent sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax // zero mantissa low word sl@0: _asm ret 4 // return KErrNone sl@0: trealxfromuint0: sl@0: _asm mov [ecx], edx sl@0: _asm mov [ecx+4], edx // store mantissa high word=0 sl@0: _asm mov [ecx+8], edx // store sign/exponent=0 sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void TRealXFromTInt64(void) sl@0: { sl@0: // Convert TInt64 in edx:ebx to TRealX in ecx,edx:ebx sl@0: _asm mov eax, 0x7FFF sl@0: _asm or edx, edx // test sign/zero sl@0: _asm jz short trealxfromtint64a // branch if top word zero sl@0: _asm jns short trealxfromtint64b sl@0: _asm add eax, 0x10000 // sign bit into eax bit 16 sl@0: _asm neg edx // take absolute value sl@0: _asm neg ebx sl@0: _asm sbb edx, 0 sl@0: _asm jz short trealxfromtint64d // branch if top word zero sl@0: trealxfromtint64b: sl@0: _asm bsr ecx, edx // ecx=bit number of edx MSB sl@0: _asm add eax, ecx // add to exponent in eax sl@0: _asm add eax, 32 sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of left shifts to normalise sl@0: _asm shld edx, ebx, cl // shift left to normalise edx:ebx sl@0: _asm shl ebx, cl sl@0: _asm mov ecx, eax // sign/exponent into ecx sl@0: _asm ror ecx, 16 // and into normal positions sl@0: _asm ret sl@0: trealxfromtint64a: // come here if top word zero sl@0: _asm or ebx, ebx // test for bottom word also zero sl@0: _asm jz short trealxfromtint64c // branch if it is sl@0: trealxfromtint64d: // come here if top word zero, bottom word not sl@0: _asm mov edx, ebx // shift edx:ebx left 32 sl@0: _asm xor ebx, ebx sl@0: _asm bsr ecx, edx // ecx=bit number of edx MSB sl@0: _asm add eax, ecx // add to exponent in eax sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of left shifts to normalise sl@0: _asm shl edx, cl // normalise sl@0: _asm mov ecx, eax // sign/exponent into ecx sl@0: _asm ror ecx, 16 // and into normal positions sl@0: _asm ret sl@0: trealxfromtint64c: // entire number is zero sl@0: _asm xor ecx, ecx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Set(const TInt64& /*aInt*/) sl@0: /** sl@0: Gives this extended precision object a new value taken from sl@0: a 64 bit integer. sl@0: sl@0: @param aInt The 64 bit integer value. sl@0: sl@0: @return KErrNone, always. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aInt, return code in eax sl@0: _asm push ebx sl@0: _asm push ecx sl@0: _asm mov edx, [esp+12] // edx=address of aInt sl@0: _asm mov ebx, [edx] sl@0: _asm mov edx, [edx+4] // edx:ebx=aInt sl@0: _asm call TRealXFromTInt64 // convert to TRealX in ecx,edx:ebx sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void __6TRealXi() sl@0: { sl@0: // common function for int to TRealX sl@0: _asm mov edx, [esp+4] // edx=aInt sl@0: _asm or edx, edx // test sign/zero sl@0: _asm mov eax, 0x7FFF sl@0: _asm jz short trealxfromint0 // branch if 0 sl@0: _asm jns short trealxfromint1 // skip if positive sl@0: _asm neg edx // take absolute value sl@0: _asm add eax, 0x10000 // sign bit in eax bit 16 sl@0: trealxfromint1: sl@0: _asm push ecx // save this sl@0: _asm bsr ecx, edx // bit number of edx MSB into ecx sl@0: _asm add eax, ecx // add to eax to form result exponent sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of shifts to normalise edx sl@0: _asm shl edx, cl // normalise edx sl@0: _asm pop ecx // this back into ecx sl@0: _asm ror eax, 16 // sign/exponent into normal positions sl@0: _asm mov [ecx+4], edx // store mantissa high word sl@0: _asm mov [ecx+8], eax // store sign/exponent sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax // zero mantissa low word sl@0: _asm mov eax, ecx // return eax=this sl@0: _asm ret 4 sl@0: trealxfromint0: sl@0: _asm mov [ecx], edx sl@0: _asm mov [ecx+4], edx // store mantissa high word=0 sl@0: _asm mov [ecx+8], edx // store sign/exponent=0 sl@0: _asm mov eax, ecx // return eax=this sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(TInt /*aInt*/) sl@0: /** sl@0: Constructs an extended precision object from a signed integer value. sl@0: sl@0: @param aInt The signed integer value. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return eax=this sl@0: _asm jmp __6TRealXi sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator=(TInt /*aInt*/) sl@0: /** sl@0: Assigns the specified signed integer value to this extended precision object. sl@0: sl@0: @param aInt The signed integer value. sl@0: sl@0: @return A reference to this extended precision object. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return eax=this sl@0: _asm jmp __6TRealXi sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void __6TRealXui() sl@0: { sl@0: // common function for unsigned int to TRealX sl@0: _asm mov edx, [esp+4] // edx=aInt sl@0: _asm mov eax, 0x7FFF sl@0: _asm or edx, edx // test for zero sl@0: _asm jz short trealxfromuint0 // branch if 0 sl@0: _asm push ecx // save this sl@0: _asm bsr ecx, edx // bit number of edx MSB into ecx sl@0: _asm add eax, ecx // add to eax to form result exponent sl@0: _asm neg cl sl@0: _asm add cl, 31 // 31-bit number = number of shifts to normalise edx sl@0: _asm shl edx, cl // normalise edx sl@0: _asm pop ecx // this back into ecx sl@0: _asm shl eax, 16 // exponent into normal position sl@0: _asm mov [ecx+4], edx // store mantissa high word sl@0: _asm mov [ecx+8], eax // store exponent sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax // zero mantissa low word sl@0: _asm mov eax, ecx // return eax=this sl@0: _asm ret 4 sl@0: trealxfromuint0: sl@0: _asm mov [ecx], edx sl@0: _asm mov [ecx+4], edx // store mantissa high word=0 sl@0: _asm mov [ecx+8], edx // store sign/exponent=0 sl@0: _asm mov eax, ecx // return eax=this sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(TUint /*aInt*/) sl@0: /** sl@0: Constructs an extended precision object from an unsigned integer value. sl@0: sl@0: @param aInt The unsigned integer value. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return eax=this sl@0: _asm jmp __6TRealXui sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator=(TUint /*aInt*/) sl@0: /** sl@0: Assigns the specified unsigned integer value to this extended precision object. sl@0: sl@0: @param aInt The unsigned integer value. sl@0: sl@0: @return A reference to this extended precision object. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=aInt, return eax=this sl@0: _asm jmp __6TRealXui sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void __6TRealXRC6TInt64() sl@0: { sl@0: // common function for TInt64 to TRealX sl@0: _asm push ebx // preserve ebx sl@0: _asm push ecx // save this sl@0: _asm mov edx, [esp+12] // edx=address of aInt sl@0: _asm mov ebx, [edx] sl@0: _asm mov edx, [edx+4] // edx:ebx=aInt sl@0: _asm call TRealXFromTInt64 // convert to TRealX in ecx,edx:ebx sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm pop ebx // restore ebx sl@0: _asm ret 4 // return this in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(const TInt64& /*aInt*/) sl@0: /** sl@0: Constructs an extended precision object from a 64 bit integer. sl@0: sl@0: @param aInt A reference to a 64 bit integer. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aInt, return eax=this sl@0: _asm jmp __6TRealXRC6TInt64 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator=(const TInt64& /*aInt*/) sl@0: /** sl@0: Assigns the specified 64 bit integer value to this extended precision object. sl@0: sl@0: @param aInt A reference to a 64 bit integer. sl@0: sl@0: @return A reference to this extended precision object. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aInt, return eax=this sl@0: _asm jmp __6TRealXRC6TInt64 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void ConvertTReal32ToTRealX(void) sl@0: { sl@0: // Convert TReal32 in edx to TRealX in ecx:edx,ebx sl@0: _asm xor ebx, ebx // mant low always zero sl@0: _asm mov eax, edx sl@0: _asm shr eax, 23 // exponent now in al, sign in ah bit 0 sl@0: _asm test al, al // check for denormal/zero sl@0: _asm jz short treal32totrealx2 // branch if denormal/zero sl@0: _asm xor ecx, ecx sl@0: _asm mov cl, al sl@0: _asm add ecx, 0x7F80 // bias exponent correctly for TRealX sl@0: _asm cmp al, 0xFF // check for infinity/NaN sl@0: _asm jnz short treal32totrealx1 // skip if neither sl@0: _asm mov cl, al // else set TRealX exponent to FFFF sl@0: _asm mov ch, al sl@0: treal32totrealx1: sl@0: _asm shl edx, 8 // left-justify mantissa in edx sl@0: _asm or edx, 0x80000000 // put in implied integer bit sl@0: _asm shl ecx, 16 // exponent into ecx bits 16-31 sl@0: _asm mov cl, ah // sign into ecx bit 0 sl@0: _asm ret sl@0: treal32totrealx2: // come here if exponent 0 sl@0: _asm shl edx, 9 // left-justify mantissa in edx (shift out integer bit as well) sl@0: _asm jnz short treal32totrealx3 // jump if denormal sl@0: _asm xor ecx, ecx // else return 0 sl@0: _asm mov cl, ah // with same sign as input value sl@0: _asm ret sl@0: treal32totrealx3: // come here if denormal sl@0: _asm bsr ecx, edx // ecx=bit number of MSB of edx sl@0: _asm neg ecx sl@0: _asm add ecx, 31 // ecx=number of left shifts to normalise edx sl@0: _asm shl edx, cl // normalise sl@0: _asm neg ecx sl@0: _asm add ecx, 0x7F80 // exponent=7F80-number of shifts sl@0: _asm shl ecx, 16 // exponent into ecx bits 16-31 sl@0: _asm mov cl, ah // sign into ecx bit 0 sl@0: _asm ret sl@0: } sl@0: sl@0: __NAKED__ LOCAL_C void ConvertTReal64ToTRealX(void) sl@0: { sl@0: // Convert TReal64 in edx:ebx to TRealX in ecx:edx,ebx sl@0: _asm mov eax, edx sl@0: _asm shr eax, 20 sl@0: _asm mov ecx, 0x7FF sl@0: _asm and ecx, eax // ecx=exponent sl@0: _asm jz short treal64totrealx1 // branch if zero/denormal sl@0: _asm add ecx, 0x7C00 // else bias exponent correctly for TRealX sl@0: _asm cmp ecx, 0x83FF // check for infinity/NaN sl@0: _asm jnz short treal64totrealx2 sl@0: _asm mov ch, cl // if so, set exponent to FFFF sl@0: treal64totrealx2: sl@0: _asm shl ecx, 16 // exponent into ecx bits 16-31 sl@0: _asm mov cl, 11 // number of shifts needed to justify mantissa correctly sl@0: _asm shld edx, ebx, cl // shift mantissa left sl@0: _asm shl ebx, cl sl@0: _asm or edx, 0x80000000 // put in implied integer bit sl@0: _asm shr eax, 11 // sign bit into al bit 0 sl@0: _asm mov cl, al // into ecx bit 0 sl@0: _asm ret sl@0: treal64totrealx1: // come here if zero/denormal sl@0: _asm mov cl, 12 // number of shifts needed to justify mantissa correctly sl@0: _asm shld edx, ebx, cl // shift mantissa left sl@0: _asm shl ebx, cl sl@0: _asm test edx, edx // check for zero sl@0: _asm jnz short treal64totrealx3 sl@0: _asm test ebx, ebx sl@0: _asm jnz short treal64totrealx4 sl@0: _asm shr eax, 11 // sign bit into eax bit 0, rest of eax=0 sl@0: _asm mov ecx, eax // return 0 result with correct sign sl@0: _asm ret sl@0: treal64totrealx4: // come here if denormal, edx=0 sl@0: _asm mov edx, ebx // shift mantissa left 32 sl@0: _asm xor ebx, ebx sl@0: _asm bsr ecx, edx // ecx=bit number of MSB of edx sl@0: _asm neg ecx sl@0: _asm add ecx, 31 // ecx=number of left shifts to normalise edx sl@0: _asm shl edx, cl // normalise sl@0: _asm neg ecx sl@0: _asm add ecx, 0x7BE0 // exponent=7BE0-number of shifts sl@0: _asm shl ecx, 16 // exponent into bits 16-31 of ecx sl@0: _asm shr eax, 11 sl@0: _asm mov cl, al // sign into bit 0 of ecx sl@0: _asm ret sl@0: treal64totrealx3: // come here if denormal, edx nonzero sl@0: _asm bsr ecx, edx // ecx=bit number of MSB of edx sl@0: _asm neg ecx sl@0: _asm add ecx, 31 // ecx=number of left shifts to normalise edx:ebx sl@0: _asm shld edx, ebx, cl // normalise sl@0: _asm shl ebx, cl sl@0: _asm neg ecx sl@0: _asm add ecx, 0x7C00 // exponent=7C00-number of shifts sl@0: _asm shl ecx, 16 // exponent into bits 16-31 of ecx sl@0: _asm shr eax, 11 sl@0: _asm mov cl, al // sign into bit 0 of ecx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Set(TReal32 /*aReal*/) sl@0: /** sl@0: Gives this extended precision object a new value taken from sl@0: a single precision floating point number. sl@0: sl@0: @param aReal The single precision floating point value. sl@0: sl@0: @return KErrNone, if a valid number; sl@0: KErrOverflow, if the number is infinite; sl@0: KErrArgument, if not a number. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] sl@0: // on exit, error code in eax sl@0: _asm push ebx // save ebx sl@0: _asm push ecx // save this sl@0: _asm mov edx, [esp+12] // aReal into edx sl@0: _asm call ConvertTReal32ToTRealX sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm xor eax, eax // error code=KErrNone initially sl@0: _asm cmp ecx, 0xFFFF0000 // check for infinity/NaN sl@0: _asm jb short trealxsettreal32a // if neither, return KErrNone sl@0: _asm mov eax, -9 // eax=KErrOverflow sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _asm je short trealxsettreal32a // if infinity, return KErrOverflow sl@0: _asm mov eax, -6 // if NaN, return KErrArgument sl@0: trealxsettreal32a: sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Set(TReal64 /*aReal*/) sl@0: /** sl@0: Gives this extended precision object a new value taken from sl@0: a double precision floating point number. sl@0: sl@0: @param aReal The double precision floating point value. sl@0: sl@0: @return KErrNone, if a valid number; sl@0: KErrOverflow, if the number is infinite; sl@0: KErrArgument, if not a number. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high) sl@0: // on exit, error code in eax sl@0: _asm push ebx // save ebx sl@0: _asm push ecx // save this sl@0: _asm mov ebx, [esp+12] // aReal into edx:ebx sl@0: _asm mov edx, [esp+16] sl@0: _asm call ConvertTReal64ToTRealX sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm xor eax, eax // error code=KErrNone initially sl@0: _asm cmp ecx, 0xFFFF0000 // check for infinity/NaN sl@0: _asm jb short trealxsettreal64a // if neither, return KErrNone sl@0: _asm mov eax, -9 // eax=KErrOverflow sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _asm jne short trealxsettreal64b // branch if NaN sl@0: _asm test ebx, ebx sl@0: _asm je short trealxsettreal64a // if infinity, return KErrOverflow sl@0: trealxsettreal64b: sl@0: _asm mov eax, -6 // if NaN, return KErrArgument sl@0: trealxsettreal64a: sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void __6TRealXf() sl@0: { sl@0: // common function for float to TRealX sl@0: _asm push ebx // save ebx sl@0: _asm push ecx // save this sl@0: _asm mov edx, [esp+12] // aReal into edx sl@0: _asm call ConvertTReal32ToTRealX sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(TReal32 /*aReal*/) sl@0: /** sl@0: Constructs an extended precision object from sl@0: a single precision floating point number. sl@0: sl@0: @param aReal The single precision floating point value. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] sl@0: // on exit, eax=this sl@0: _asm jmp __6TRealXf sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal32 /*aReal*/) sl@0: /** sl@0: Assigns the specified single precision floating point number to sl@0: this extended precision object. sl@0: sl@0: @param aReal The single precision floating point value. sl@0: sl@0: @return A reference to this extended precision object. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] sl@0: // on exit, eax=this sl@0: _asm jmp __6TRealXf sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void __6TRealXd() sl@0: { sl@0: // common function for double to TRealX sl@0: _asm push ebx // save ebx sl@0: _asm push ecx // save this sl@0: _asm mov ebx, [esp+12] // aReal into edx:ebx sl@0: _asm mov edx, [esp+16] sl@0: _asm call ConvertTReal64ToTRealX sl@0: _asm pop eax // eax=this sl@0: _asm mov [eax], ebx // store result sl@0: _asm mov [eax+4], edx sl@0: _asm mov [eax+8], ecx sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::TRealX(TReal64 /*aReal*/) sl@0: /** sl@0: Constructs an extended precision object from sl@0: a double precision floating point number. sl@0: sl@0: @param aReal The double precision floating point value. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high) sl@0: // on exit, eax=this sl@0: _asm jmp __6TRealXd sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal64 /*aReal*/) sl@0: /** sl@0: Assigns the specified double precision floating point number to sl@0: this extended precision object. sl@0: sl@0: @param aReal The double precision floating point value. sl@0: sl@0: @return A reference to this extended precision object. sl@0: */ sl@0: { sl@0: // on entry, ecx=this and aReal is in [esp+4] (mant low) and [esp+8] (sign/exp/mant high) sl@0: // on exit, eax=this sl@0: _asm jmp __6TRealXd sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::operator TInt() const sl@0: /** sl@0: Gets the extended precision value as a signed integer value. sl@0: sl@0: The operator returns: sl@0: sl@0: 1. zero , if the extended precision value is not a number sl@0: sl@0: 2. 0x7FFFFFFF, if the value is positive and too big to fit into a TInt. sl@0: sl@0: 3. 0x80000000, if the value is negative and too big to fit into a TInt. sl@0: */ sl@0: { sl@0: // on entry ecx=this, return value in eax sl@0: _asm mov edx, [ecx] // edx=mantissa low sl@0: _asm mov eax, [ecx+4] // eax=mantissa high sl@0: _asm mov ecx, [ecx+8] // ecx=exponent/sign sl@0: _asm ror ecx, 16 // exponent into cx sl@0: _asm cmp cx, 0xFFFF sl@0: _asm jz short trealxtoint1 // branch if exp=FFFF sl@0: _asm mov dx, cx sl@0: _asm mov cx, 0x801E sl@0: _asm sub cx, dx // cx=number of right shifts needed to convert mantissa to int sl@0: _asm jbe short trealxtoint2 // if exp>=801E, saturate result sl@0: _asm cmp cx, 31 // more than 31 shifts needed? sl@0: _asm ja short trealxtoint0 // if so, underflow to zero sl@0: _asm shr eax, cl // else ABS(result)=eax>>cl sl@0: _asm test ecx, 0x10000 // test sign sl@0: _asm jz short trealxtoint3 // skip if + sl@0: _asm neg eax sl@0: trealxtoint3: sl@0: _asm ret sl@0: trealxtoint1: // come here if exponent=FFFF sl@0: _asm cmp eax, 0x80000000 // check for infinity sl@0: _asm jnz short trealxtoint0 // if NaN, return 0 sl@0: _asm test edx, edx sl@0: _asm jnz short trealxtoint0 // if NaN, return 0 sl@0: trealxtoint2: // come here if argument too big for 32-bit integer sl@0: _asm mov eax, 0x7FFFFFFF sl@0: _asm shr ecx, 17 // sign bit into carry flag sl@0: _asm adc eax, 0 // eax=7FFFFFFF if +, 80000000 if - sl@0: _asm ret // return saturated value sl@0: trealxtoint0: // come here if INT(argument)=0 or NaN sl@0: _asm xor eax, eax // return 0 sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::operator TUint() const sl@0: /** sl@0: Returns the extended precision value as an unsigned signed integer value. sl@0: sl@0: The operator returns: sl@0: sl@0: 1. zero, if the extended precision value is not a number sl@0: sl@0: 2. 0xFFFFFFFF, if the value is positive and too big to fit into a TUint. sl@0: sl@0: 3. zero, if the value is negative and too big to fit into a TUint. sl@0: */ sl@0: { sl@0: // on entry ecx=this, return value in eax sl@0: _asm mov edx, [ecx] // edx=mantissa low sl@0: _asm mov eax, [ecx+4] // eax=mantissa high sl@0: _asm mov ecx, [ecx+8] // ecx=exponent/sign sl@0: _asm ror ecx, 16 // exponent into cx sl@0: _asm cmp cx, 0xFFFF sl@0: _asm jz short trealxtouint1 // branch if exp=FFFF sl@0: _asm mov dx, cx sl@0: _asm mov cx, 0x801E sl@0: _asm sub cx, dx // cx=number of right shifts needed to convert mantissa to int sl@0: _asm jb short trealxtouint2 // if exp>801E, saturate result sl@0: _asm cmp cx, 31 // more than 31 shifts needed? sl@0: _asm ja short trealxtouint0 // if so, underflow to zero sl@0: _asm test ecx, 0x10000 // test sign sl@0: _asm jnz short trealxtouint0 // if -, return 0 sl@0: _asm shr eax, cl // else result=eax>>cl sl@0: _asm ret sl@0: trealxtouint1: // come here if exponent=FFFF sl@0: _asm cmp eax, 0x80000000 // check for infinity sl@0: _asm jnz short trealxtouint0 // if NaN, return 0 sl@0: _asm test edx, edx sl@0: _asm jnz short trealxtouint0 // if NaN, return 0 sl@0: trealxtouint2: // come here if argument too big for 32-bit integer sl@0: _asm mov eax, 0xFFFFFFFF sl@0: _asm shr ecx, 17 // sign bit into carry flag sl@0: _asm adc eax, 0 // eax=FFFFFFFF if +, 0 if - sl@0: _asm ret // return saturated value sl@0: trealxtouint0: // come here if INT(argument)=0 or NaN sl@0: _asm xor eax, eax // return 0 sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void ConvertTRealXToTInt64(void) sl@0: { sl@0: // Convert TRealX in ecx,edx:ebx to TInt64 in edx:ebx sl@0: _asm ror ecx, 16 // exponent into cx sl@0: _asm cmp cx, 0xFFFF sl@0: _asm jz short trealxtoint64a // branch if exp=FFFF sl@0: _asm mov ax, cx sl@0: _asm mov cx, 0x803E sl@0: _asm sub cx, ax // cx=number of right shifts needed to convert mantissa to int sl@0: _asm jbe short trealxtoint64b // if exp>=803E, saturate result sl@0: _asm cmp cx, 63 // more than 63 shifts needed? sl@0: _asm ja short trealxtoint64z // if so, underflow to zero sl@0: _asm cmp cl, 31 // more than 31 shifts needed? sl@0: _asm jbe short trealxtoint64d // branch if not sl@0: _asm sub cl, 32 // cl=shift count - 32 sl@0: _asm mov ebx, edx // shift right by 32 sl@0: _asm xor edx, edx sl@0: trealxtoint64d: sl@0: _asm shrd ebx, edx, cl // shift edx:ebx right by cl to give ABS(result) sl@0: _asm shr edx, cl sl@0: _asm test ecx, 0x10000 // test sign sl@0: _asm jz short trealxtoint64c // skip if + sl@0: _asm neg edx // if -, negate sl@0: _asm neg ebx sl@0: _asm sbb edx, 0 sl@0: trealxtoint64c: sl@0: _asm ret sl@0: trealxtoint64a: // come here if exponent=FFFF sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _asm jnz short trealxtoint64z // if NaN, return 0 sl@0: _asm test ebx, ebx sl@0: _asm jnz short trealxtoint64z // if NaN, return 0 sl@0: trealxtoint64b: // come here if argument too big for 32-bit integer sl@0: _asm mov edx, 0x7FFFFFFF sl@0: _asm mov ebx, 0xFFFFFFFF sl@0: _asm shr ecx, 17 // sign bit into carry flag sl@0: _asm adc ebx, 0 // edx:ebx=7FFFFFFF FFFFFFFF if +, sl@0: _asm adc edx, 0 // or 80000000 00000000 if - sl@0: _asm ret // return saturated value sl@0: trealxtoint64z: // come here if INT(argument)=0 or NaN sl@0: _asm xor edx, edx // return 0 sl@0: _asm xor ebx, ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: /** sl@0: Returns the extended precision value as a 64 bit integer value. sl@0: sl@0: The operator returns: sl@0: sl@0: 1. zero, if the extended precision value is not a number sl@0: sl@0: 2. 0x7FFFFFFF FFFFFFFF, if the value is positive and too big to fit sl@0: into a TInt64 sl@0: sl@0: 3. 0x80000000 00000000, if the value is negative and too big to fit sl@0: into a TInt64. sl@0: */ sl@0: __NAKED__ EXPORT_C TRealX::operator TInt64() const sl@0: { sl@0: // on entry, ecx=this, return value in edx:eax sl@0: _asm push ebx sl@0: _asm mov ebx, [ecx] // get TRealX value into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call ConvertTRealXToTInt64 sl@0: _asm mov eax, ebx // store low result into eax sl@0: _asm pop ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void TRealXGetTReal32(void) sl@0: { sl@0: // Convert TRealX in ecx,edx:ebx to TReal32 in edx sl@0: // Return error code in eax sl@0: _asm cmp ecx, 0xFFFF0000 // check for infinity/NaN sl@0: _asm jnc short trealxgettreal32a sl@0: _asm xor eax, eax sl@0: _asm ror ecx, 16 // exponent into cx sl@0: _asm sub cx, 0x7F80 // cx=result exponent if normalised sl@0: _asm jbe short trealxgettreal32b // jump if denormal, zero or underflow sl@0: _asm cmp cx, 0xFF // check if overflow sl@0: _asm jb short trealxgettreal32c // jump if not sl@0: trealxgettreal32d: // come here if overflow sl@0: _asm xor edx, edx // set mantissa=0 to generate infinity sl@0: _asm ror ecx, 16 // ecx back to normal format sl@0: trealxgettreal32a: // come here if infinity or NaN sl@0: _asm shr edx, 7 sl@0: _asm or edx, 0xFF000000 // set exponent to FF sl@0: _asm shr ecx, 1 // sign bit -> carry sl@0: _asm rcr edx, 1 // sign bit -> MSB of result sl@0: _asm mov eax, edx sl@0: _asm shl eax, 9 // test for infinity or NaN sl@0: _asm mov eax, -9 // eax=KErrOverflow sl@0: _asm jz short trealxgettreal32e sl@0: _asm mov eax, -6 // if NaN, eax=KErrArgument sl@0: trealxgettreal32e: sl@0: _asm ret sl@0: trealxgettreal32b: // come here if exponent<=7F80 sl@0: _asm cmp cx, -24 // check for zero or total underflow sl@0: _asm jle short trealxgettreal32z sl@0: _asm neg cl sl@0: _asm inc cl // cl=number of right shifts to form denormal mantissa sl@0: _asm shrd eax, ebx, cl // shift mantissa right into eax sl@0: _asm shrd ebx, edx, cl sl@0: _asm shr edx, cl sl@0: _asm or edx, 0x80000000 // set top bit to ensure correct rounding up sl@0: _asm xor cl, cl // cl=result exponent=0 sl@0: trealxgettreal32c: // come here if result normalised sl@0: _asm cmp dl, 0x80 // check rounding bits sl@0: _asm ja short trealxgettreal32f // branch to round up sl@0: _asm jb short trealxgettreal32g // branch to round down sl@0: _asm test ebx, ebx sl@0: _asm jnz short trealxgettreal32f // branch to round up sl@0: _asm test eax, eax sl@0: _asm jnz short trealxgettreal32f // branch to round up sl@0: _asm test ecx, 0x01000000 // check rounded-down flag sl@0: _asm jnz short trealxgettreal32f // branch to round up sl@0: _asm test ecx, 0x02000000 // check rounded-up flag sl@0: _asm jnz short trealxgettreal32g // branch to round down sl@0: _asm test dh, 1 // else round to even sl@0: _asm jz short trealxgettreal32g // branch to round down if LSB=0 sl@0: trealxgettreal32f: // come here to round up sl@0: _asm add edx, 0x100 // increment mantissa sl@0: _asm jnc short trealxgettreal32g sl@0: _asm rcr edx, 1 sl@0: _asm inc cl // if carry, increment exponent sl@0: _asm cmp cl, 0xFF // and check for overflow sl@0: _asm jz short trealxgettreal32d // branch out if overflow sl@0: trealxgettreal32g: // come here to round down sl@0: _asm xor dl, dl sl@0: _asm add edx, edx // shift out integer bit sl@0: _asm mov dl, cl sl@0: _asm ror edx, 8 // exponent->edx bits 24-31, mantissa in 23-1 sl@0: _asm test edx, edx // check if underflow sl@0: _asm jz short trealxgettreal32h // branch out if underflow sl@0: _asm shr ecx, 17 // sign bit->carry sl@0: _asm rcr edx, 1 // ->edx bit 31, exp->edx bits 23-30, mant->edx bits 22-0 sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: trealxgettreal32z: // come here if zero or underflow sl@0: _asm xor eax, eax sl@0: _asm cmp cx, 0x8080 // check for zero sl@0: _asm jz short trealxgettreal32y // if zero, return KErrNone sl@0: trealxgettreal32h: // come here if underflow after rounding sl@0: _asm mov eax, -10 // eax=KErrUnderflow sl@0: trealxgettreal32y: sl@0: _asm xor edx, edx sl@0: _asm shr ecx, 17 sl@0: _asm rcr edx, 1 // sign bit into edx bit 31, rest of edx=0 sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ LOCAL_C void TRealXGetTReal64(void) sl@0: { sl@0: // Convert TRealX in ecx,edx:ebx to TReal64 in edx:ebx sl@0: // Return error code in eax sl@0: // edi, esi also modified sl@0: _asm ror ecx, 16 // exponent into cx sl@0: _asm cmp cx, 0xFFFF // check for infinity/NaN sl@0: _asm jnc short trealxgettreal64a sl@0: _asm xor eax, eax sl@0: _asm xor edi, edi sl@0: _asm sub cx, 0x7C00 // cx=result exponent if normalised sl@0: _asm jbe short trealxgettreal64b // jump if denormal, zero or underflow sl@0: _asm cmp cx, 0x07FF // check if overflow sl@0: _asm jb short trealxgettreal64c // jump if not sl@0: trealxgettreal64d: // come here if overflow sl@0: _asm xor edx, edx // set mantissa=0 to generate infinity sl@0: _asm xor ebx, ebx sl@0: trealxgettreal64a: // come here if infinity or NaN sl@0: _asm mov cl, 10 sl@0: _asm shrd ebx, edx, cl sl@0: _asm shr edx, cl sl@0: _asm or edx, 0xFFE00000 // set exponent to 7FF sl@0: _asm shr ecx, 17 // sign bit -> carry sl@0: _asm rcr edx, 1 // sign bit -> MSB of result sl@0: _asm rcr ebx, 1 sl@0: _asm mov eax, edx sl@0: _asm shl eax, 12 // test for infinity or NaN sl@0: _asm mov eax, -9 // eax=KErrOverflow sl@0: _asm jnz short trealxgettreal64n sl@0: _asm test ebx, ebx sl@0: _asm jz short trealxgettreal64e sl@0: trealxgettreal64n: sl@0: _asm mov eax, -6 // if NaN, eax=KErrArgument sl@0: trealxgettreal64e: sl@0: _asm ret sl@0: trealxgettreal64b: // come here if exponent<=7C00 sl@0: _asm cmp cx, -53 // check for zero or total underflow sl@0: _asm jle trealxgettreal64z sl@0: _asm neg cl sl@0: _asm inc cl // cl=number of right shifts to form denormal mantissa sl@0: _asm cmp cl, 32 sl@0: _asm jb trealxgettreal64x sl@0: _asm mov eax, ebx // if >=32 shifts, do 32 shifts and decrement count by 32 sl@0: _asm mov ebx, edx sl@0: _asm xor edx, edx sl@0: trealxgettreal64x: sl@0: _asm shrd edi, eax, cl sl@0: _asm shrd eax, ebx, cl // shift mantissa right into eax sl@0: _asm shrd ebx, edx, cl sl@0: _asm shr edx, cl sl@0: _asm or edx, 0x80000000 // set top bit to ensure correct rounding up sl@0: _asm xor cx, cx // cx=result exponent=0 sl@0: trealxgettreal64c: // come here if result normalised sl@0: _asm mov esi, ebx sl@0: _asm and esi, 0x7FF // esi=rounding bits sl@0: _asm cmp esi, 0x400 // check rounding bits sl@0: _asm ja short trealxgettreal64f // branch to round up sl@0: _asm jb short trealxgettreal64g // branch to round down sl@0: _asm test eax, eax sl@0: _asm jnz short trealxgettreal64f // branch to round up sl@0: _asm test edi, edi sl@0: _asm jnz short trealxgettreal64f // branch to round up sl@0: _asm test ecx, 0x01000000 // check rounded-down flag sl@0: _asm jnz short trealxgettreal64f // branch to round up sl@0: _asm test ecx, 0x02000000 // check rounded-up flag sl@0: _asm jnz short trealxgettreal64g // branch to round down sl@0: _asm test ebx, 0x800 // else round to even sl@0: _asm jz short trealxgettreal64g // branch to round down if LSB=0 sl@0: trealxgettreal64f: // come here to round up sl@0: _asm add ebx, 0x800 // increment mantissa sl@0: _asm adc edx, 0 sl@0: _asm jnc short trealxgettreal64g sl@0: _asm rcr edx, 1 sl@0: _asm inc cx // if carry, increment exponent sl@0: _asm cmp cx, 0x7FF // and check for overflow sl@0: _asm jz trealxgettreal64d // branch out if overflow sl@0: trealxgettreal64g: // come here to round down sl@0: _asm xor bl, bl // clear rounding bits sl@0: _asm and bh, 0xF8 sl@0: _asm mov di, cx // save exponent sl@0: _asm mov cl, 10 sl@0: _asm and edx, 0x7FFFFFFF // clear integer bit sl@0: _asm shrd ebx, edx, cl // shift mantissa right by 10 sl@0: _asm shr edx, cl sl@0: _asm shl edi, 21 // exponent into edi bits 21-31 sl@0: _asm or edx, edi // into edx bits 21-31 sl@0: _asm test edx, edx // check if underflow sl@0: _asm jnz short trealxgettreal64i sl@0: _asm test ebx, ebx sl@0: _asm jz short trealxgettreal64h // branch out if underflow sl@0: trealxgettreal64i: sl@0: _asm shr ecx, 17 // sign bit->carry sl@0: _asm rcr edx, 1 // ->edx bit 31, exp->edx bits 20-30, mant->edx bits 20-0 sl@0: _asm rcr ebx, 1 sl@0: _asm xor eax, eax // return KErrNone sl@0: _asm ret sl@0: trealxgettreal64z: // come here if zero or underflow sl@0: _asm xor eax, eax sl@0: _asm cmp cx, 0x8400 // check for zero sl@0: _asm jz short trealxgettreal64y // if zero, return KErrNone sl@0: trealxgettreal64h: // come here if underflow after rounding sl@0: _asm mov eax, -10 // eax=KErrUnderflow sl@0: trealxgettreal64y: sl@0: _asm xor edx, edx sl@0: _asm xor ebx, ebx sl@0: _asm shr ecx, 17 sl@0: _asm rcr edx, 1 // sign bit into edx bit 31, rest of edx=0, ebx=0 sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::operator TReal32() const sl@0: /** sl@0: Returns the extended precision value as sl@0: a single precision floating point value. sl@0: */ sl@0: { sl@0: // On entry, ecx=this sl@0: // On exit, TReal32 value on top of FPU stack sl@0: _asm push ebx sl@0: _asm mov ebx, [ecx] // *this into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXGetTReal32 // Convert to TReal32 in edx sl@0: _asm push edx // push TReal32 onto stack sl@0: _asm fld dword ptr [esp] // push TReal32 onto FPU stack sl@0: _asm pop edx sl@0: _asm pop ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX::operator TReal64() const sl@0: /** sl@0: Returns the extended precision value as sl@0: a double precision floating point value. sl@0: */ sl@0: { sl@0: // On entry, ecx=this sl@0: // On exit, TReal64 value on top of FPU stack sl@0: _asm push ebx sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov ebx, [ecx] // *this into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXGetTReal64 // Convert to TReal32 in edx:ebx sl@0: _asm push edx // push TReal64 onto stack sl@0: _asm push ebx sl@0: _asm fld qword ptr [esp] // push TReal64 onto FPU stack sl@0: _asm add esp, 8 sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal32& /*aVal*/) const sl@0: /** sl@0: Extracts the extended precision value as sl@0: a single precision floating point value. sl@0: sl@0: @param aVal A reference to a single precision object which contains sl@0: the result of the operation. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // On entry, ecx=this, [esp+4]=address of aVal sl@0: // On exit, eax=return code sl@0: _asm push ebx sl@0: _asm mov ebx, [ecx] // *this into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXGetTReal32 sl@0: _asm mov ecx, [esp+8] // ecx=address of aVal sl@0: _asm mov [ecx], edx // store result sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal64& /*aVal*/) const sl@0: /** sl@0: Extracts the extended precision value as sl@0: a double precision floating point value. sl@0: sl@0: @param aVal A reference to a double precision object which sl@0: contains the result of the operation. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // On entry, ecx=this, [esp+4]=address of aVal sl@0: // On exit, eax=return code sl@0: _asm push ebx sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov ebx, [ecx] // *this into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXGetTReal64 sl@0: _asm mov ecx, [esp+16] // ecx=address of aVal sl@0: _asm mov [ecx], ebx // store result sl@0: _asm mov [ecx+4], edx sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C void TRealX::SetZero(TBool /*aNegative*/) sl@0: /** sl@0: Sets the value of this extended precision object to zero. sl@0: sl@0: @param aNegative ETrue, the value is a negative zero; sl@0: EFalse, the value is a positive zero, this is the default. sl@0: */ sl@0: { sl@0: _asm mov edx, [esp+4] // aNegative into edx sl@0: _asm xor eax, eax // eax=0 sl@0: _asm mov [ecx], eax sl@0: _asm mov [ecx+4], eax sl@0: _asm test edx, edx sl@0: _asm jz short setzero1 sl@0: _asm inc eax // eax=1 if aNegative!=0 sl@0: setzero1: sl@0: _asm mov [ecx+8], eax // generate positive or negative zero sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C void TRealX::SetNaN() sl@0: /** sl@0: Sets the value of this extended precision object to 'not a number'. sl@0: */ sl@0: { sl@0: _asm xor eax, eax // set *this to 'real indefinite' sl@0: _asm mov [ecx], eax sl@0: _asm mov eax, 0xC0000000 sl@0: _asm mov [ecx+4], eax sl@0: _asm mov eax, 0xFFFF0001 sl@0: _asm mov [ecx+8], eax sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C void TRealX::SetInfinite(TBool /*aNegative*/) sl@0: /** sl@0: Sets the value of this extended precision object to infinity. sl@0: sl@0: @param aNegative ETrue, the value is a negative zero; sl@0: EFalse, the value is a positive zero. sl@0: */ sl@0: { sl@0: _asm mov edx, [esp+4] // aNegative into edx sl@0: _asm mov eax, 0xFFFF0000 // exponent=FFFF, sign=0 initially sl@0: _asm test edx, edx sl@0: _asm jz short setinf1 sl@0: _asm inc eax // sign=1 if aNegative!=0 sl@0: setinf1: sl@0: _asm mov [ecx+8], eax // generate positive or negative infinity sl@0: _asm mov eax, 0x80000000 sl@0: _asm mov [ecx+4], eax sl@0: _asm xor eax, eax sl@0: _asm mov [ecx], eax sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TBool TRealX::IsZero() const sl@0: /** sl@0: Determines whether the extended precision value is zero. sl@0: sl@0: @return True, if the extended precision value is zero, false, otherwise. sl@0: */ sl@0: { sl@0: _asm mov eax, [ecx+8] // check exponent sl@0: _asm shr eax, 16 // move exponent into ax sl@0: _asm jz short iszero1 // branch if zero sl@0: _asm xor eax, eax // else return 0 sl@0: _asm ret sl@0: iszero1: sl@0: _asm inc eax // if zero, return 1 sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TBool TRealX::IsNaN() const sl@0: /** sl@0: Determines whether the extended precision value is 'not a number'. sl@0: sl@0: @return True, if the extended precision value is 'not a number', sl@0: false, otherwise. sl@0: */ sl@0: { sl@0: _asm mov eax, [ecx+8] // check exponent sl@0: _asm cmp eax, 0xFFFF0000 sl@0: _asm jc short isnan0 // branch if not FFFF sl@0: _asm mov eax, [ecx+4] sl@0: _asm cmp eax, 0x80000000 // check for infinity sl@0: _asm jne short isnan1 sl@0: _asm mov eax, [ecx] sl@0: _asm test eax, eax sl@0: _asm jne short isnan1 sl@0: isnan0: sl@0: _asm xor eax, eax // return 0 if not NaN sl@0: _asm ret sl@0: isnan1: sl@0: _asm mov eax, 1 // return 1 if NaN sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TBool TRealX::IsInfinite() const sl@0: /** sl@0: Determines whether the extended precision value has a finite value. sl@0: sl@0: @return True, if the extended precision value is finite, sl@0: false, if the value is 'not a number' or is infinite, sl@0: */ sl@0: { sl@0: _asm mov eax, [ecx+8] // check exponent sl@0: _asm cmp eax, 0xFFFF0000 sl@0: _asm jc short isinf0 // branch if not FFFF sl@0: _asm mov eax, [ecx+4] sl@0: _asm cmp eax, 0x80000000 // check for infinity sl@0: _asm jne short isinf0 sl@0: _asm mov eax, [ecx] sl@0: _asm test eax, eax sl@0: _asm jne short isinf0 sl@0: _asm inc eax // return 1 if infinity sl@0: _asm ret sl@0: isinf0: sl@0: _asm xor eax, eax // return 0 if not infinity sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TBool TRealX::IsFinite() const sl@0: /** sl@0: Determines whether the extended precision value has a finite value. sl@0: sl@0: @return True, if the extended precision value is finite, sl@0: false, if the value is 'not a number' or is infinite, sl@0: */ sl@0: { sl@0: _asm mov eax, [ecx+8] // check exponent sl@0: _asm cmp eax, 0xFFFF0000 // check for NaN or infinity sl@0: _asm jnc short isfinite0 // branch if NaN or infinity sl@0: _asm mov eax, 1 // return 1 if finite sl@0: _asm ret sl@0: isfinite0: sl@0: _asm xor eax, eax // return 0 if NaN or infinity sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C const TRealX& TRealX::operator+=(const TRealX& /*aVal*/) sl@0: /** sl@0: Adds an extended precision value to this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be added. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXAdd // do addition, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return this in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C const TRealX& TRealX::operator-=(const TRealX& /*aVal*/) sl@0: /** sl@0: Subtracts an extended precision value from this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be subtracted. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXSubtract // do subtraction, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return this in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C const TRealX& TRealX::operator*=(const TRealX& /*aVal*/) sl@0: /** sl@0: Multiplies this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be subtracted. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXMultiply // do multiplication, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return this in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C const TRealX& TRealX::operator/=(const TRealX& /*aVal*/) sl@0: /** sl@0: Divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: @panic MATHX KErrDivideByZero if the divisor is zero. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXDivide // do division, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return this in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C const TRealX& TRealX::operator%=(const TRealX& /*aVal*/) sl@0: /** sl@0: Modulo-divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrTotalLossOfPrecision panic if precision is lost. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXModulo // do modulo, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return this in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::AddEq(const TRealX& /*aVal*/) sl@0: /** sl@0: Adds an extended precision value to this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be added. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow,if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXAdd // do addition, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::SubEq(const TRealX& /*aVal*/) sl@0: /** sl@0: Subtracts an extended precision value from this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be subtracted. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXSubtract // do subtraction, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::MultEq(const TRealX& /*aVal*/) sl@0: /** sl@0: Multiplies this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the multiplier. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXMultiply // do multiplication, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::DivEq(const TRealX& /*aVal*/) sl@0: /** sl@0: Divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow; sl@0: KErrDivideByZero, if the divisor is zero. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXDivide // do division, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::ModEq(const TRealX& /*aVal*/) sl@0: /** sl@0: Modulo-divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrTotalLossOfPrecision, if precision is lost; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+20] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXModulo // do modulo, result in ecx,edx:ebx, error code in eax sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 4 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator+() const sl@0: /** sl@0: Returns this extended precision number unchanged. sl@0: sl@0: Note that this may also be referred to as a unary plus operator. sl@0: sl@0: @return The extended precision number. sl@0: */ sl@0: { sl@0: _asm mov eax, [esp+4] // eax=address to write return value sl@0: _asm mov edx, [ecx] sl@0: _asm mov [eax], edx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov [eax+4], edx sl@0: _asm mov edx, [ecx+8] sl@0: _asm mov [eax+8], edx sl@0: _asm ret 4 // return address of return value in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator-() const sl@0: /** sl@0: Negates this extended precision number. sl@0: sl@0: This may also be referred to as a unary minus operator. sl@0: sl@0: @return The negative of the extended precision number. sl@0: */ sl@0: { sl@0: _asm mov eax, [esp+4] // eax=address to write return value sl@0: _asm mov edx, [ecx] sl@0: _asm mov [eax], edx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov [eax+4], edx sl@0: _asm mov edx, [ecx+8] sl@0: _asm xor dl, 1 // change sign bit sl@0: _asm mov [eax+8], edx sl@0: _asm ret 4 // return address of return value in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator++() sl@0: /** sl@0: Increments this extended precision number by one, sl@0: and then returns a reference to it. sl@0: sl@0: This is also referred to as a prefix operator. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // pre-increment sl@0: // on entry ecx=this, return this in eax sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, 0x7FFF0000 // set ecx,edx:ebx to 1.0 sl@0: _asm mov edx, 0x80000000 sl@0: _asm xor ebx, ebx sl@0: _asm call TRealXAdd // add 1 to *this sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax // check error code sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // else return this in eax sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator++(TInt) sl@0: /** sl@0: Returns this extended precision number before incrementing it by one. sl@0: sl@0: This is also referred to as a postfix operator. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // post-increment sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=dummy int sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov edi, [esp+20] // address of return value into edi sl@0: _asm mov eax, [ecx] // copy initial value of *this into [edi] sl@0: _asm mov [edi], eax sl@0: _asm mov eax, [ecx+4] sl@0: _asm mov [edi+4], eax sl@0: _asm mov eax, [ecx+8] sl@0: _asm mov [edi+8], eax sl@0: _asm mov ecx, 0x7FFF0000 // set ecx,edx:ebx to 1.0 sl@0: _asm mov edx, 0x80000000 sl@0: _asm xor ebx, ebx sl@0: _asm call TRealXAdd // add 1 to *this sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax // check error code sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, [esp+20] // address of return value into eax sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX& TRealX::operator--() sl@0: /** sl@0: Decrements this extended precision number by one, sl@0: and then returns a reference to it. sl@0: sl@0: This is also referred to as a prefix operator. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // pre-decrement sl@0: // on entry ecx=this, return this in eax sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, 0x7FFF0001 // set ecx,edx:ebx to -1.0 sl@0: _asm mov edx, 0x80000000 sl@0: _asm xor ebx, ebx sl@0: _asm call TRealXAdd // add -1 to *this sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax // check error code sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // else return this in eax sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator--(TInt) sl@0: /** sl@0: Returns this extended precision number before decrementing it by one. sl@0: sl@0: This is also referred to as a postfix operator. sl@0: sl@0: @return A reference to this object. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // post-decrement sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=dummy int sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov edi, [esp+20] // address of return value into edi sl@0: _asm mov eax, [ecx] // copy initial value of *this into [edi] sl@0: _asm mov [edi], eax sl@0: _asm mov eax, [ecx+4] sl@0: _asm mov [edi+4], eax sl@0: _asm mov eax, [ecx+8] sl@0: _asm mov [edi+8], eax sl@0: _asm mov ecx, 0x7FFF0001 // set ecx,edx:ebx to -1.0 sl@0: _asm mov edx, 0x80000000 sl@0: _asm xor ebx, ebx sl@0: _asm call TRealXAdd // add -1 to *this sl@0: _asm mov [esi], ebx // store result in *this sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax // check error code sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, [esp+20] // address of return value into eax sl@0: _asm pop edi sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator+(const TRealX& /*aVal*/) const sl@0: /** sl@0: Adds an extended precision value to this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be added. sl@0: sl@0: @return An extended precision object containing the result. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXAdd // do addition, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of return value sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return address of return value in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator-(const TRealX& /*aVal*/) const sl@0: /** sl@0: Subtracts an extended precision value from this extended precision number. sl@0: sl@0: @param aVal The extended precision value to be subtracted. sl@0: sl@0: @return An extended precision object containing the result. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXSubtract // do subtraction, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of return value sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return address of return value in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator*(const TRealX& /*aVal*/) const sl@0: /** sl@0: Multiplies this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the multiplier. sl@0: sl@0: @return An extended precision object containing the result. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXMultiply // do multiplication, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of return value sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return address of return value in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator/(const TRealX& /*aVal*/) const sl@0: /** sl@0: Divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return An extended precision object containing the result. sl@0: sl@0: @panic MATHX KErrOverflow if the operation results in overflow. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: @panic MATHX KErrDivideByZero if the divisor is zero. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXDivide // do division, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of return value sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return address of return value in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TRealX TRealX::operator%(const TRealX& /*aVal*/) const sl@0: /** sl@0: Modulo-divides this extended precision number by an extended precision value. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return An extended precision object containing the result. sl@0: sl@0: @panic MATHX KErrTotalLossOfPrecision if precision is lost. sl@0: @panic MATHX KErrUnderflow if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of return value, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXModulo // do modulo, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of return value sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm test eax, eax sl@0: _ASM_jn(z,TRealXPanicEax) // panic if error sl@0: _asm mov eax, esi // return address of return value in eax sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Add(TRealX& /*aResult*/, const TRealX& /*aVal*/) const sl@0: /** sl@0: Adds an extended precision value to this extended precision number. sl@0: sl@0: @param aResult On return, a reference to an extended precision object sl@0: containing the result of the operation. sl@0: @param aVal The extended precision value to be added. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXAdd // do addition, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of aResult sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Sub(TRealX& /*aResult*/, const TRealX& /*aVal*/) const sl@0: /** sl@0: Subtracts an extended precision value from this extended precision number. sl@0: sl@0: @param aResult On return, a reference to an extended precision object sl@0: containing the result of the operation. sl@0: @param aVal The extended precision value to be subtracted. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXSubtract // do subtraction, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of aResult sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Mult(TRealX& /*aResult*/, const TRealX& /*aVal*/) const sl@0: /** sl@0: Multiplies this extended precision number by an extended precision value. sl@0: sl@0: @param aResult On return, a reference to an extended precision object sl@0: containing the result of the operation. sl@0: @param aVal The extended precision value to be used as the multiplier. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXMultiply // do multiplication, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of aResult sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Div(TRealX& /*aResult*/, const TRealX& /*aVal*/) const sl@0: /** sl@0: Divides this extended precision number by an extended precision value. sl@0: sl@0: @param aResult On return, a reference to an extended precision object sl@0: containing the result of the operation. sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrOverflow, if the operation results in overflow; sl@0: KErrUnderflow, if the operation results in underflow; sl@0: KErrDivideByZero, if the divisor is zero. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXDivide // do division, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of aResult sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 // return with error code in eax sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TRealX::Mod(TRealX& /*aResult*/, const TRealX& /*aVal*/) const sl@0: /** sl@0: Modulo-divides this extended precision number by an extended precision value. sl@0: sl@0: @param aResult On return, a reference to an extended precision object sl@0: containing the result of the operation. sl@0: sl@0: @param aVal The extended precision value to be used as the divisor. sl@0: sl@0: @return KErrNone, if the operation is successful; sl@0: KErrTotalLossOfPrecision, if precision is lost; sl@0: KErrUnderflow, if the operation results in underflow. sl@0: */ sl@0: { sl@0: // on entry ecx=this, [esp+4]=address of aResult, [esp+8]=address of aVal sl@0: _asm push ebx // save registers sl@0: _asm push ebp sl@0: _asm push esi sl@0: _asm push edi sl@0: _asm mov esi, ecx // this into esi sl@0: _asm mov ecx, [esp+24] // address of aVal into ecx sl@0: _asm mov ebx, [ecx] // aVal into ecx,edx:ebx sl@0: _asm mov edx, [ecx+4] sl@0: _asm mov ecx, [ecx+8] sl@0: _asm call TRealXModulo // do modulo, result in ecx,edx:ebx, error code in eax sl@0: _asm mov esi, [esp+20] // esi=address of aResult sl@0: _asm mov [esi], ebx // store result sl@0: _asm mov [esi+4], edx sl@0: _asm mov [esi+8], ecx sl@0: _asm pop edi // restore registers sl@0: _asm pop esi sl@0: _asm pop ebp sl@0: _asm pop ebx sl@0: _asm ret 8 // return with error code in eax sl@0: } sl@0: sl@0: // Compare TRealX in ecx,edx:ebx (op1) to TRealX at [esi] (op2) sl@0: // Return 1 if op1op2 sl@0: // Return 8 if unordered sl@0: // Return value in eax sl@0: __NAKED__ LOCAL_C void TRealXCompare(void) sl@0: { sl@0: _asm cmp ecx, 0xFFFF0000 // check if op1=NaN or infinity sl@0: _asm jc short fpcmp1 // branch if not sl@0: _asm cmp edx, 0x80000000 // check for infinity sl@0: _asm jnz short fpcmpunord // branch if NaN sl@0: _asm test ebx, ebx sl@0: _asm jz short fpcmp1 // if infinity, process normally sl@0: fpcmpunord: // come here if unordered sl@0: _asm mov eax, 8 // return 8 sl@0: _asm ret sl@0: fpcmp1: // op1 is not a NaN sl@0: _asm mov eax, [esi+8] // get op2 into eax,edi:ebp sl@0: _asm mov edi, [esi+4] sl@0: _asm mov ebp, [esi] sl@0: _asm cmp eax, 0xFFFF0000 // check for NaN or infinity sl@0: _asm jc short fpcmp2 // branch if neither sl@0: _asm cmp edi, 0x80000000 // check for infinity sl@0: _asm jnz short fpcmpunord // branch if NaN sl@0: _asm test ebp, ebp sl@0: _asm jnz short fpcmpunord sl@0: fpcmp2: // neither operand is a NaN sl@0: _asm cmp ecx, 0x10000 // check if op1=0 sl@0: _asm jc short fpcmpop1z // branch if it is sl@0: _asm cmp eax, 0x10000 // check if op2=0 sl@0: _asm jc short fpcmp4 // branch if it is sl@0: _asm xor al, cl // check if signs the same sl@0: _asm test al, 1 sl@0: _asm jnz short fpcmp4 // branch if different sl@0: _asm push ecx sl@0: _asm shr ecx, 16 // op1 exponent into cx sl@0: _asm shr eax, 16 // op2 exponent into ax sl@0: _asm cmp ecx, eax // compare exponents sl@0: _asm pop ecx sl@0: _asm ja short fpcmp4 // if op1 exp > op2 exp op1>op2 if +ve sl@0: _asm jb short fpcmp5 // if op1 exp < op2 exp op1ABS(op2) or if signs different sl@0: // or if op2 zero, op1 nonzero sl@0: _asm mov eax, 4 // return 4 if +ve sl@0: _asm test cl, 1 // check sign sl@0: _asm jz short fpcmp4a // skip if + sl@0: _asm mov al, 1 // return 1 if -ve sl@0: fpcmp4a: sl@0: _asm ret sl@0: fpcmp5: // come here if ABS(op1)