1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/euser/epoc/arm/uc_realx.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,4386 @@
1.4 +// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\euser\epoc\arm\uc_realx.cia
1.18 +//
1.19 +//
1.20 +
1.21 +#include <e32cia.h>
1.22 +#include <u32std.h>
1.23 +#include <e32math.h>
1.24 +#ifdef __USE_VFP_MATH
1.25 +#include <arm_vfp.h>
1.26 +#endif
1.27 +
1.28 +#if defined(__USE_VFP_MATH) && !defined(__CPU_HAS_VFP)
1.29 +#error __USE_VFP_MATH was defined but not __CPU_HAS_VFP - impossible combination, check variant.mmh
1.30 +#endif
1.31 +
1.32 +#ifndef __EABI_CTORS__
1.33 +__NAKED__ EXPORT_C TRealX::TRealX()
1.34 +/**
1.35 +Constructs a default extended precision object.
1.36 +
1.37 +This sets the value to zero.
1.38 +*/
1.39 + {
1.40 + asm("mov r1, #0 ");
1.41 + asm("str r1, [r0] ");
1.42 + asm("str r1, [r0, #4] ");
1.43 + asm("str r1, [r0, #8] ");
1.44 + __JUMP(,lr);
1.45 + }
1.46 +
1.47 +
1.48 +
1.49 +
1.50 +__NAKED__ EXPORT_C TRealX::TRealX(TUint /*anExp*/, TUint /*aMantHi*/, TUint /*aMantLo*/)
1.51 +/**
1.52 +Constructs an extended precision object from an explicit exponent and
1.53 +a 64 bit mantissa.
1.54 +
1.55 +@param anExp The exponent
1.56 +@param aMantHi The high order 32 bits of the 64 bit mantissa
1.57 +@param aMantLo The low order 32 bits of the 64 bit mantissa
1.58 +*/
1.59 + {
1.60 + asm("str r1, [r0, #8] ");
1.61 + asm("str r2, [r0, #4] ");
1.62 + asm("str r3, [r0, #0] ");
1.63 + __JUMP(,lr);
1.64 + }
1.65 +#endif
1.66 +
1.67 +
1.68 +
1.69 +
1.70 +
1.71 +__NAKED__ EXPORT_C TInt TRealX::Set(TInt /*anInt*/)
1.72 +/**
1.73 +Gives this extended precision object a new value taken
1.74 +from a signed integer.
1.75 +
1.76 +@param anInt The signed integer value.
1.77 +
1.78 +@return KErrNone, always.
1.79 +*/
1.80 + {
1.81 + asm("stmfd sp!, {lr} ");
1.82 + asm("mov r2, r1 ");
1.83 + asm("bl ConvertIntToTRealX ");
1.84 + asm("stmia r0, {r1,r2,r3} ");
1.85 + asm("mov r0, #0 "); // return KErrNone
1.86 + __POPRET("");
1.87 + }
1.88 +
1.89 +
1.90 +
1.91 +
1.92 +#ifndef __EABI_CTORS__
1.93 +__NAKED__ EXPORT_C TRealX::TRealX(TInt /*anInt*/)
1.94 +/**
1.95 +Constructs an extended precision object from a signed integer value.
1.96 +
1.97 +@param anInt The signed integer value.
1.98 +*/
1.99 + {
1.100 + // fall through
1.101 + }
1.102 +#endif
1.103 +
1.104 +
1.105 +
1.106 +
1.107 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TInt /*anInt*/)
1.108 +/**
1.109 +Assigns the specified signed integer value to this extended precision object.
1.110 +
1.111 +@param anInt The signed integer value.
1.112 +
1.113 +@return A reference to this extended precision object.
1.114 +*/
1.115 + {
1.116 + asm("stmfd sp!, {lr} ");
1.117 + asm("mov r2, r1 ");
1.118 + asm("bl ConvertIntToTRealX ");
1.119 + asm("stmia r0, {r1,r2,r3} ");
1.120 + __POPRET("");
1.121 +
1.122 + asm("ConvertIntToTRealX: ");
1.123 + asm("cmp r2, #0 ");
1.124 + asm("movpl r3, #0 "); // if int>0, r3=0
1.125 + asm("beq ConvertIntToTRealX0 "); // if int=0, return 0
1.126 + asm("movmi r3, #1 "); // if int<0, r3=1
1.127 + asm("rsbmi r2, r2, #0 "); // if int -ve, negate it
1.128 + asm("orr r3, r3, #0x001E0000 ");
1.129 + asm("orr r3, r3, #0x80000000 "); // r3=exponent 801E + sign bit
1.130 +#ifdef __CPU_ARM_HAS_CLZ
1.131 + CLZ(12,2);
1.132 + asm("mov r2, r2, lsl r12 ");
1.133 + asm("sub r3, r3, r12, lsl #16 ");
1.134 +#else
1.135 + asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
1.136 + asm("movcc r2, r2, lsl #16 ");
1.137 + asm("subcc r3, r3, #0x100000 ");
1.138 + asm("cmp r2, #0x1000000 ");
1.139 + asm("movcc r2, r2, lsl #8 ");
1.140 + asm("subcc r3, r3, #0x080000 ");
1.141 + asm("cmp r2, #0x10000000 ");
1.142 + asm("movcc r2, r2, lsl #4 ");
1.143 + asm("subcc r3, r3, #0x040000 ");
1.144 + asm("cmp r2, #0x40000000 ");
1.145 + asm("movcc r2, r2, lsl #2 ");
1.146 + asm("subcc r3, r3, #0x020000 ");
1.147 + asm("cmp r2, #0x80000000 ");
1.148 + asm("movcc r2, r2, lsl #1 ");
1.149 + asm("subcc r3, r3, #0x010000 ");
1.150 +#endif
1.151 + asm("ConvertIntToTRealX0: ");
1.152 + asm("mov r1, #0 "); // low order word of mantissa = 0
1.153 + __JUMP(,lr);
1.154 + }
1.155 +
1.156 +
1.157 +
1.158 +
1.159 +__NAKED__ EXPORT_C TInt TRealX::Set(const TInt64& /*anInt*/)
1.160 +/**
1.161 +Gives this extended precision object a new value taken from
1.162 +a 64 bit integer.
1.163 +
1.164 +@param anInt The 64 bit integer value.
1.165 +
1.166 +@return KErrNone, always.
1.167 +*/
1.168 + {
1.169 + asm("stmfd sp!, {lr} ");
1.170 + asm("ldmia r1, {r1,r2} ");
1.171 + asm("bl ConvertInt64ToTRealX ");
1.172 + asm("stmia r0, {r1,r2,r3} ");
1.173 + asm("mov r0, #0 "); // return KErrNone
1.174 + __POPRET("");
1.175 + }
1.176 +
1.177 +
1.178 +
1.179 +
1.180 +#ifndef __EABI_CTORS__
1.181 +__NAKED__ EXPORT_C TRealX::TRealX(const TInt64& /*anInt*/)
1.182 +/**
1.183 +Constructs an extended precision object from a 64 bit integer.
1.184 +
1.185 +@param anInt A reference to a 64 bit integer.
1.186 +*/
1.187 + {
1.188 + // fall through
1.189 + }
1.190 +#endif
1.191 +
1.192 +
1.193 +
1.194 +
1.195 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(const TInt64& /*anInt*/)
1.196 +/**
1.197 +Assigns the specified 64 bit integer value to this extended precision object.
1.198 +
1.199 +@param anInt A reference to a 64 bit integer.
1.200 +
1.201 +@return A reference to this extended precision object.
1.202 +*/
1.203 + {
1.204 + asm("stmfd sp!, {lr} ");
1.205 + asm("ldmia r1, {r1,r2} ");
1.206 + asm("bl ConvertInt64ToTRealX ");
1.207 + asm("stmia r0, {r1,r2,r3} ");
1.208 + __POPRET("");
1.209 +
1.210 + asm("ConvertInt64ToTRealX: ");
1.211 + asm("movs r3, r2, lsr #31 "); // sign bit into r3 bit 0
1.212 + asm("beq ConvertInt64ToTRealX1 "); // skip if plus
1.213 + asm("rsbs r1, r1, #0 "); // take absolute value
1.214 + asm("rsc r2, r2, #0 ");
1.215 + asm("ConvertInt64ToTRealX1: ");
1.216 + asm("cmp r2, #0 "); // does it fit into 32 bits?
1.217 + asm("moveq r2, r1 "); // if it does, do 32 bit conversion
1.218 + asm("beq ConvertUintToTRealX1 ");
1.219 +#ifdef __CPU_ARM_HAS_CLZ
1.220 + CLZ(12,2);
1.221 + asm("mov r2, r2, lsl r12 ");
1.222 + asm("rsb r12, r12, #32 ");
1.223 + asm("orr r2, r2, r1, lsr r12 ");
1.224 + asm("rsb r12, r12, #32 ");
1.225 +#else
1.226 + asm("mov r12, #32 "); // 32-number of left-shifts needed to normalise
1.227 + asm("cmp r2, #0x10000 "); // calculate number required
1.228 + asm("movcc r2, r2, lsl #16 ");
1.229 + asm("subcc r12, r12, #16 ");
1.230 + asm("cmp r2, #0x1000000 ");
1.231 + asm("movcc r2, r2, lsl #8 ");
1.232 + asm("subcc r12, r12, #8 ");
1.233 + asm("cmp r2, #0x10000000 ");
1.234 + asm("movcc r2, r2, lsl #4 ");
1.235 + asm("subcc r12, r12, #4 ");
1.236 + asm("cmp r2, #0x40000000 ");
1.237 + asm("movcc r2, r2, lsl #2 ");
1.238 + asm("subcc r12, r12, #2 ");
1.239 + asm("cmp r2, #0x80000000 ");
1.240 + asm("movcc r2, r2, lsl #1 ");
1.241 + asm("subcc r12, r12, #1 "); // r2 is now normalised
1.242 + asm("orr r2, r2, r1, lsr r12 "); // shift r1 left into r2
1.243 + asm("rsb r12, r12, #32 ");
1.244 +#endif
1.245 + asm("mov r1, r1, lsl r12 ");
1.246 + asm("add r3, r3, #0x80000000 "); // exponent = 803E-r12
1.247 + asm("add r3, r3, #0x003E0000 ");
1.248 + asm("sub r3, r3, r12, lsl #16 ");
1.249 + __JUMP(,lr);
1.250 + }
1.251 +
1.252 +
1.253 +
1.254 +
1.255 +__NAKED__ EXPORT_C TInt TRealX::Set(TUint /*anInt*/)
1.256 +/**
1.257 +Gives this extended precision object a new value taken from
1.258 +an unsigned integer.
1.259 +
1.260 +@param The unsigned integer value.
1.261 +
1.262 +@return KErrNone, always.
1.263 +*/
1.264 + {
1.265 + asm("stmfd sp!, {lr} ");
1.266 + asm("mov r2, r1 ");
1.267 + asm("bl ConvertUintToTRealX ");
1.268 + asm("stmia r0, {r1,r2,r3} ");
1.269 + asm("mov r0, #0 "); // return KErrNone
1.270 + __POPRET("");
1.271 + }
1.272 +
1.273 +
1.274 +
1.275 +
1.276 +#ifndef __EABI_CTORS__
1.277 +__NAKED__ EXPORT_C TRealX::TRealX(TUint /*anInt*/)
1.278 +/**
1.279 +Constructs an extended precision object from an unsigned integer value.
1.280 +
1.281 +@param anInt The unsigned integer value.
1.282 +*/
1.283 + {
1.284 + // fall through
1.285 + }
1.286 +#endif
1.287 +
1.288 +
1.289 +
1.290 +
1.291 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TUint /*anInt*/)
1.292 +/**
1.293 +Assigns the specified unsigned integer value to this extended precision object.
1.294 +
1.295 +@param anInt The unsigned integer value.
1.296 +
1.297 +@return A reference to this extended precision object.
1.298 +*/
1.299 + {
1.300 + asm("stmfd sp!, {lr} ");
1.301 + asm("mov r2, r1 ");
1.302 + asm("bl ConvertUintToTRealX ");
1.303 + asm("stmia r0, {r1,r2,r3} ");
1.304 + __POPRET("");
1.305 +
1.306 + asm("ConvertUintToTRealX: ");
1.307 + asm("mov r3, #0 ");
1.308 + asm("ConvertUintToTRealX1: ");
1.309 + asm("cmp r2, #0 "); // check for zero
1.310 + asm("beq ConvertUintToTRealX0 ");
1.311 + asm("orr r3, r3, #0x001E0000 ");
1.312 + asm("orr r3, r3, #0x80000000 "); // r3=exponent 801E
1.313 +#ifdef __CPU_ARM_HAS_CLZ
1.314 + CLZ(12,2);
1.315 + asm("mov r2, r2, lsl r12 ");
1.316 + asm("sub r3, r3, r12, lsl #16 ");
1.317 +#else
1.318 + asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
1.319 + asm("movcc r2, r2, lsl #16 ");
1.320 + asm("subcc r3, r3, #0x100000 ");
1.321 + asm("cmp r2, #0x1000000 ");
1.322 + asm("movcc r2, r2, lsl #8 ");
1.323 + asm("subcc r3, r3, #0x080000 ");
1.324 + asm("cmp r2, #0x10000000 ");
1.325 + asm("movcc r2, r2, lsl #4 ");
1.326 + asm("subcc r3, r3, #0x040000 ");
1.327 + asm("cmp r2, #0x40000000 ");
1.328 + asm("movcc r2, r2, lsl #2 ");
1.329 + asm("subcc r3, r3, #0x020000 ");
1.330 + asm("cmp r2, #0x80000000 ");
1.331 + asm("movcc r2, r2, lsl #1 ");
1.332 + asm("subcc r3, r3, #0x010000 ");
1.333 +#endif
1.334 + asm("ConvertUintToTRealX0: ");
1.335 + asm("mov r1, #0 "); // low order word of mantissa = 0
1.336 + __JUMP(,lr);
1.337 + }
1.338 +
1.339 +
1.340 +
1.341 +
1.342 +__NAKED__ EXPORT_C void TRealX::SetZero(TBool /*aNegative*/)
1.343 +/**
1.344 +Sets the value of this extended precision object to zero.
1.345 +
1.346 +@param aNegative ETrue, the value is a negative zero;
1.347 + EFalse, the value is a positive zero, this is the default.
1.348 +*/
1.349 + {
1.350 + asm("mov r3, #0 ");
1.351 + asm("cmp r1, #0 ");
1.352 + asm("movne r3, #1 ");
1.353 + asm("mov r2, #0 ");
1.354 + asm("mov r1, #0 ");
1.355 + asm("stmia r0, {r1,r2,r3} ");
1.356 + __JUMP(,lr);
1.357 + }
1.358 +
1.359 +
1.360 +
1.361 +
1.362 +__NAKED__ EXPORT_C void TRealX::SetNaN()
1.363 +/**
1.364 +Sets the value of this extended precision object to 'not a number'.
1.365 +*/
1.366 + {
1.367 + asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
1.368 + asm("mov r2, #0xC0000000 ");
1.369 + asm("mov r1, #0 ");
1.370 + asm("stmia r0, {r1,r2,r3} ");
1.371 + __JUMP(,lr);
1.372 + asm("__RealIndefiniteExponent: ");
1.373 + asm(".word 0xFFFF0001 ");
1.374 + }
1.375 +
1.376 +
1.377 +
1.378 +
1.379 +
1.380 +__NAKED__ EXPORT_C void TRealX::SetInfinite(TBool /*aNegative*/)
1.381 +/**
1.382 +Sets the value of this extended precision object to infinity.
1.383 +
1.384 +@param aNegative ETrue, the value is a negative zero;
1.385 + EFalse, the value is a positive zero.
1.386 +*/
1.387 + {
1.388 + asm("ldr r3, [pc, #__InfiniteExponent-.-8] ");
1.389 + asm("cmp r1, #0 ");
1.390 + asm("orrne r3, r3, #1 ");
1.391 + asm("mov r2, #0x80000000 ");
1.392 + asm("mov r1, #0 ");
1.393 + asm("stmia r0, {r1,r2,r3} ");
1.394 + __JUMP(,lr);
1.395 + asm("__InfiniteExponent: ");
1.396 + asm(".word 0xFFFF0000 ");
1.397 + }
1.398 +
1.399 +
1.400 +
1.401 +
1.402 +__NAKED__ EXPORT_C TBool TRealX::IsZero() const
1.403 +/**
1.404 +Determines whether the extended precision value is zero.
1.405 +
1.406 +@return True, if the extended precision value is zero, false, otherwise.
1.407 +*/
1.408 + {
1.409 + asm("ldr r1, [r0, #8] "); // get exponent word
1.410 + asm("mov r0, #0 "); // default return value is 0
1.411 + asm("cmp r1, #0x10000 "); // is exponent=0 ?
1.412 + asm("movcc r0, #1 "); // if so return 1
1.413 + __JUMP(,lr);
1.414 + }
1.415 +
1.416 +
1.417 +
1.418 +
1.419 +__NAKED__ EXPORT_C TBool TRealX::IsNaN() const
1.420 +/**
1.421 +Determines whether the extended precision value is 'not a number'.
1.422 +
1.423 +@return True, if the extended precision value is 'not a number',
1.424 + false, otherwise.
1.425 +*/
1.426 + {
1.427 + asm("ldmia r0, {r1,r2,r3} ");
1.428 + asm("mov r0, #0 "); // default return value is 0
1.429 + asm("cmn r3, #0x10000 "); // check for exponent 65535
1.430 + asm("bcc 1f "); // branch if not
1.431 + asm("cmp r2, #0x80000000 "); // check if infinity
1.432 + asm("cmpeq r1, #0 ");
1.433 + asm("movne r0, #1 "); // if not, return 1
1.434 + asm("1: ");
1.435 + __JUMP(,lr);
1.436 + }
1.437 +
1.438 +
1.439 +
1.440 +
1.441 +__NAKED__ EXPORT_C TBool TRealX::IsInfinite() const
1.442 +/**
1.443 +Determines whether the extended precision value has a finite value.
1.444 +
1.445 +@return True, if the extended precision value is finite,
1.446 + false, if the value is 'not a number' or is infinite,
1.447 +*/
1.448 + {
1.449 + asm("ldmia r0, {r1,r2,r3} ");
1.450 + asm("mov r0, #0 "); // default return value is 0
1.451 + asm("cmn r3, #0x10000 "); // check for exponent 65535
1.452 + asm("bcc 1f "); // branch if not
1.453 + asm("cmp r2, #0x80000000 "); // check if infinity
1.454 + asm("cmpeq r1, #0 ");
1.455 + asm("moveq r0, #1 "); // if it is, return 1
1.456 + asm("1: ");
1.457 + __JUMP(,lr);
1.458 + }
1.459 +
1.460 +
1.461 +
1.462 +
1.463 +__NAKED__ EXPORT_C TBool TRealX::IsFinite() const
1.464 +/**
1.465 +Determines whether the extended precision value has a finite value.
1.466 +
1.467 +@return True, if the extended precision value is finite,
1.468 + false, if the value is 'not a number' or is infinite,
1.469 +*/
1.470 + {
1.471 + asm("ldr r1, [r0, #8] "); // get exponent word
1.472 + asm("mov r0, #0 "); // default return value is 0
1.473 + asm("cmn r1, #0x10000 "); // is exponent=65535 (infinity or NaN) ?
1.474 + asm("movcc r0, #1 "); // if not return 1
1.475 + __JUMP(,lr);
1.476 + }
1.477 +
1.478 +
1.479 +
1.480 +
1.481 +#ifndef __EABI_CTORS__
1.482 +__NAKED__ EXPORT_C TRealX::TRealX(TReal32 /*aReal*/) __SOFTFP
1.483 +/**
1.484 +Constructs an extended precision object from
1.485 +a single precision floating point number.
1.486 +
1.487 +@param aReal The single precision floating point value.
1.488 +*/
1.489 + {
1.490 + // fall through
1.491 + }
1.492 +#endif
1.493 +
1.494 +
1.495 +
1.496 +
1.497 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal32 /*aReal*/) __SOFTFP
1.498 +/**
1.499 +Assigns the specified single precision floating point number to
1.500 +this extended precision object.
1.501 +
1.502 +@param aReal The single precision floating point value.
1.503 +
1.504 +@return A reference to this extended precision object.
1.505 +*/
1.506 + {
1.507 + asm("stmfd sp!, {lr} ");
1.508 + asm("bl ConvertTReal32ToTRealX ");
1.509 + asm("stmia r0, {r1,r2,r3} ");
1.510 + __POPRET("");
1.511 + }
1.512 +
1.513 +
1.514 +
1.515 +
1.516 +__NAKED__ EXPORT_C TInt TRealX::Set(TReal32 /*aReal*/) __SOFTFP
1.517 +/**
1.518 +Gives this extended precision object a new value taken from
1.519 +a single precision floating point number.
1.520 +
1.521 +@param aReal The single precision floating point value.
1.522 +
1.523 +@return KErrNone, if a valid number;
1.524 + KErrOverflow, if the number is infinite;
1.525 + KErrArgument, if not a number.
1.526 +*/
1.527 + {
1.528 + // aReal is in r1 on entry
1.529 + // sign in bit 31, exponent in 30-23, mantissa (non-integer bits) in 22-0
1.530 + asm("stmfd sp!, {lr} ");
1.531 + asm("bl ConvertTReal32ToTRealX ");
1.532 + asm("stmia r0, {r1,r2,r3} ");
1.533 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.534 + asm("movcc r0, #0 "); // if neither, return KErrNone
1.535 + asm("bcc trealx_set_treal32_0 ");
1.536 + asm("cmp r2, #0x80000000 "); // check for infinity
1.537 + asm("mvneq r0, #8 "); // if so, return KErrOverflow
1.538 + asm("mvnne r0, #5 "); // else return KErrArgument
1.539 + asm("trealx_set_treal32_0: ");
1.540 + __POPRET("");
1.541 +
1.542 + // Convert 32-bit real in r1 to TRealX in r1,r2,r3
1.543 + // r0 unmodified, r1,r2,r3,r12 modified
1.544 + asm("ConvertTReal32ToTRealX: ");
1.545 + asm("mov r3, r1, lsr #7 "); // r3 bits 16-31 = TReal32 exponent
1.546 + asm("ands r3, r3, #0x00FF0000 ");
1.547 + asm("mov r2, r1, lsl #8 "); // r2 = TReal32 mantissa << 8, bit 31 not yet in
1.548 + asm("orrne r2, r2, #0x80000000 "); // if not zero/denormal, put in implied integer bit
1.549 + asm("orr r3, r3, r1, lsr #31 "); // r3 bit 0 = sign bit
1.550 + asm("mov r1, #0 "); // low word of mantissa = 0
1.551 + asm("beq ConvertTReal32ToTRealX0 "); // branch if zero/denormal
1.552 + asm("cmp r3, #0x00FF0000 "); // check for infinity or NaN
1.553 + asm("orrcs r3, r3, #0xFF000000 "); // if infinity or NaN, exponent = FFFF
1.554 + asm("addcc r3, r3, #0x7F000000 "); // else exponent = TReal32 exponent + 7F80
1.555 + asm("addcc r3, r3, #0x00800000 ");
1.556 + __JUMP(,lr);
1.557 + asm("ConvertTReal32ToTRealX0: "); // come here if zero or denormal
1.558 + asm("adds r2, r2, r2 "); // shift mantissa left one more and check if zero
1.559 + __JUMP(eq,lr);
1.560 + asm("add r3, r3, #0x7F000000 "); // else exponent = 7F80 (highest denormal exponent)
1.561 + asm("add r3, r3, #0x00800000 ");
1.562 +#ifdef __CPU_ARM_HAS_CLZ
1.563 + CLZ(12,2);
1.564 + asm("mov r2, r2, lsl r12 ");
1.565 + asm("sub r3, r3, r12, lsl #16 ");
1.566 +#else
1.567 + asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
1.568 + asm("movcc r2, r2, lsl #16 ");
1.569 + asm("subcc r3, r3, #0x100000 ");
1.570 + asm("cmp r2, #0x1000000 ");
1.571 + asm("movcc r2, r2, lsl #8 ");
1.572 + asm("subcc r3, r3, #0x080000 ");
1.573 + asm("cmp r2, #0x10000000 ");
1.574 + asm("movcc r2, r2, lsl #4 ");
1.575 + asm("subcc r3, r3, #0x040000 ");
1.576 + asm("cmp r2, #0x40000000 ");
1.577 + asm("movcc r2, r2, lsl #2 ");
1.578 + asm("subcc r3, r3, #0x020000 ");
1.579 + asm("cmp r2, #0x80000000 ");
1.580 + asm("movcc r2, r2, lsl #1 ");
1.581 + asm("subcc r3, r3, #0x010000 ");
1.582 +#endif
1.583 + __JUMP(,lr);
1.584 + }
1.585 +
1.586 +
1.587 +
1.588 +
1.589 +#ifndef __EABI_CTORS__
1.590 +__NAKED__ EXPORT_C TRealX::TRealX(TReal64 /*aReal*/) __SOFTFP
1.591 +/**
1.592 +Constructs an extended precision object from
1.593 +a double precision floating point number.
1.594 +
1.595 +@param aReal The double precision floating point value.
1.596 +*/
1.597 + {
1.598 + // fall through
1.599 + }
1.600 +#endif
1.601 +
1.602 +
1.603 +
1.604 +
1.605 +__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal64 /*aReal*/) __SOFTFP
1.606 +/**
1.607 +Assigns the specified double precision floating point number to
1.608 +this extended precision object.
1.609 +
1.610 +@param aReal The double precision floating point value.
1.611 +
1.612 +@return A reference to this extended precision object.
1.613 +*/
1.614 + {
1.615 + asm("stmfd sp!, {lr} ");
1.616 + asm("bl ConvertTReal64ToTRealX ");
1.617 + asm("stmia r0, {r1,r2,r3} ");
1.618 + __POPRET("");
1.619 + }
1.620 +
1.621 +
1.622 +
1.623 +
1.624 +__NAKED__ EXPORT_C TInt TRealX::Set(TReal64 /*aReal*/) __SOFTFP
1.625 +/**
1.626 +Gives this extended precision object a new value taken from
1.627 +a double precision floating point number.
1.628 +
1.629 +@param aReal The double precision floating point value.
1.630 +
1.631 +@return KErrNone, if a valid number;
1.632 + KErrOverflow, if the number is infinite;
1.633 + KErrArgument, if not a number.
1.634 +*/
1.635 + {
1.636 + // aReal is in r1,r2 on entry
1.637 + // sign in bit 31 of r1, exponent in 30-20 of r1
1.638 + // mantissa (non-integer bits) in 19-0 of r1 (high) and r2 (low)
1.639 + asm("stmfd sp!, {lr} ");
1.640 + asm("bl ConvertTReal64ToTRealX ");
1.641 + asm("stmia r0, {r1,r2,r3} ");
1.642 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.643 + asm("movcc r0, #0 "); // if neither, return KErrNone
1.644 + asm("bcc trealx_set_treal64_0 ");
1.645 + asm("cmp r2, #0x80000000 "); // check for infinity
1.646 + asm("cmpeq r1, #0 ");
1.647 + asm("mvneq r0, #8 "); // if so, return KErrOverflow
1.648 + asm("mvnne r0, #5 "); // else return KErrArgument
1.649 + asm("trealx_set_treal64_0: ");
1.650 + __POPRET("");
1.651 +
1.652 + // convert TReal64 in r1,r2 in GCC and r2 and r3 in RVCT
1.653 + // if __DOUBLE_WORDS_SWAPPED__ r1=sign,exp,high mant, r2=low mant
1.654 + // else r1 unused , r2=low mant, r3=sign,exp,high mant (as a result of EABI alignment reqs)
1.655 + // into TRealX in r1,r2,r3 (r2,r1=mant high,low r3=exp,flag,sign)
1.656 + // r0 unmodified, r1,r2,r3,r12 modified
1.657 + asm("ConvertTReal64ToTRealX: ");
1.658 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.659 + asm("mov r12, r2 "); // ls word of mantissa into r12
1.660 +#else
1.661 + asm("mov r12, r2 "); // ls word of mantissa into r12
1.662 + asm("mov r1, r3 ");
1.663 +#endif
1.664 + asm("mov r3, r1, lsr #20 "); // sign and exp into bottom 12 bits of r3
1.665 + asm("mov r2, r1, lsl #11 "); // left justify mantissa in r2,r1
1.666 + asm("mov r3, r3, lsl #16 "); // and into bits 16-27
1.667 + asm("bics r3, r3, #0x08000000 "); // remove sign, leaving exponent in bits 16-26
1.668 + asm("orr r2, r2, r12, lsr #21 ");
1.669 + asm("orrne r2, r2, #0x80000000 "); // if not zero/denormal, put in implied integer bit
1.670 + asm("orr r3, r3, r1, lsr #31 "); // sign bit into bit 0 of r3
1.671 + asm("mov r1, r12, lsl #11 ");
1.672 + asm("beq ConvertTReal64ToTRealX0 "); // branch if zero or denormal
1.673 + asm("mov r12, r3, lsl #5 "); // exponent into bits 21-31 of r12
1.674 + asm("cmn r12, #0x00200000 "); // check if exponent=7FF (infinity or NaN)
1.675 + asm("addcs r3, r3, #0xF8000000 "); // if so, result exponent=FFFF
1.676 + asm("addcc r3, r3, #0x7C000000 "); // else result exponent = TReal64 exponent + 7C00
1.677 + __JUMP(,lr);
1.678 + asm("ConvertTReal64ToTRealX0: "); // come here if zero or denormal
1.679 + asm("adds r1, r1, r1 "); // shift mantissa left one more bit
1.680 + asm("adcs r2, r2, r2 ");
1.681 + asm("cmpeq r1, #0 "); // and test for zero
1.682 + __JUMP(eq,lr);
1.683 + asm("add r3, r3, #0x7C000000 "); // else exponent=7C00 (highest denormal exponent)
1.684 + asm("cmp r2, #0 "); // normalise - first check if r2=0
1.685 + asm("moveq r2, r1 "); // if so, shift up by 32
1.686 + asm("moveq r1, #0 ");
1.687 + asm("subeq r3, r3, #0x200000 "); // and subtract 32 from exponent
1.688 +#ifdef __CPU_ARM_HAS_CLZ
1.689 + CLZ(12,2);
1.690 + asm("mov r2, r2, lsl r12 ");
1.691 + asm("rsb r12, r12, #32 ");
1.692 + asm("orr r2, r2, r1, lsr r12 ");
1.693 + asm("rsb r12, r12, #32 ");
1.694 +#else
1.695 + asm("mov r12, #32 "); // 32-number of left-shifts needed to normalise
1.696 + asm("cmp r2, #0x10000 "); // calculate number required
1.697 + asm("movcc r2, r2, lsl #16 ");
1.698 + asm("subcc r12, r12, #16 ");
1.699 + asm("cmp r2, #0x1000000 ");
1.700 + asm("movcc r2, r2, lsl #8 ");
1.701 + asm("subcc r12, r12, #8 ");
1.702 + asm("cmp r2, #0x10000000 ");
1.703 + asm("movcc r2, r2, lsl #4 ");
1.704 + asm("subcc r12, r12, #4 ");
1.705 + asm("cmp r2, #0x40000000 ");
1.706 + asm("movcc r2, r2, lsl #2 ");
1.707 + asm("subcc r12, r12, #2 ");
1.708 + asm("cmp r2, #0x80000000 ");
1.709 + asm("movcc r2, r2, lsl #1 ");
1.710 + asm("subcc r12, r12, #1 "); // r2 is now normalised
1.711 + asm("orr r2, r2, r1, lsr r12 "); // shift r1 left into r2
1.712 + asm("rsb r12, r12, #32 ");
1.713 +#endif
1.714 + asm("mov r1, r1, lsl r12 ");
1.715 + asm("sub r3, r3, r12, lsl #16 "); // exponent -= number of left shifts
1.716 + __JUMP(,lr);
1.717 + }
1.718 +
1.719 +
1.720 +
1.721 +
1.722 +
1.723 +__NAKED__ EXPORT_C TRealX::operator TInt() const
1.724 +/**
1.725 +Gets the extended precision value as a signed integer value.
1.726 +
1.727 +The operator returns:
1.728 +
1.729 +1. zero , if the extended precision value is not a number
1.730 +
1.731 +2. 0x7FFFFFFF, if the value is positive and too big to fit into a TInt.
1.732 +
1.733 +3. 0x80000000, if the value is negative and too big to fit into a TInt.
1.734 +*/
1.735 + {
1.736 + asm("ldmia r0, {r1,r2,r3} "); // get value into r1,r2,r3
1.737 +
1.738 + asm("ConvertTRealXToInt: ");
1.739 + asm("mov r12, #0x8000 "); // r12=0x801E
1.740 + asm("orr r12, r12, #0x001E ");
1.741 + asm("subs r12, r12, r3, lsr #16 "); // r12=801E-exponent
1.742 + asm("bls ConvertTRealXToInt1 "); // branch if exponent>=801E
1.743 + asm("cmp r12, #31 "); // test if exponent<7FFF
1.744 + asm("movhi r0, #0 "); // if so, underflow result to zero
1.745 + __JUMP(hi,lr);
1.746 + asm("mov r0, r2, lsr r12 "); // shift mantissa right to form integer
1.747 + asm("tst r3, #1 "); // check sign bit
1.748 + asm("rsbne r0, r0, #0 "); // if negative, r0=-r0
1.749 + __JUMP(,lr);
1.750 + asm("ConvertTRealXToInt1: ");
1.751 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.752 + asm("bcc ConvertTRealXToInt2 "); // branch if neither
1.753 + asm("cmp r2, #0x80000000 "); // check for infinity
1.754 + asm("cmpeq r1, #0 ");
1.755 + asm("movne r0, #0 "); // if NaN, return 0
1.756 + __JUMP(ne,lr);
1.757 + asm("ConvertTRealXToInt2: ");
1.758 + asm("mov r0, #0x80000000 "); // return 0x80000000 if -ve overflow, 0x7FFFFFFF if +ve
1.759 + asm("movs r3, r3, lsr #1 ");
1.760 + asm("sbc r0, r0, #0 ");
1.761 + __JUMP(,lr);
1.762 + }
1.763 +
1.764 +
1.765 +
1.766 +
1.767 +__NAKED__ EXPORT_C TRealX::operator TUint() const
1.768 +/**
1.769 +Returns the extended precision value as an unsigned signed integer value.
1.770 +
1.771 +The operator returns:
1.772 +
1.773 +1. zero, if the extended precision value is not a number
1.774 +
1.775 +2. 0xFFFFFFFF, if the value is positive and too big to fit into a TUint.
1.776 +
1.777 +3. zero, if the value is negative and too big to fit into a TUint.
1.778 +*/
1.779 + {
1.780 + asm("ldmia r0, {r1,r2,r3} "); // get value into r1,r2,r3
1.781 +
1.782 + asm("ConvertTRealXToUint: ");
1.783 + asm("mov r12, #0x8000 "); // r12=0x801E
1.784 + asm("orr r12, r12, #0x001E ");
1.785 + asm("subs r12, r12, r3, lsr #16 "); // r12=801E-exponent
1.786 + asm("bcc ConvertTRealXToUint1 "); // branch if exponent>801E
1.787 + asm("cmp r12, #31 "); // test if exponent<7FFF
1.788 + asm("movhi r0, #0 "); // if so, underflow result to zero
1.789 + __JUMP(hi,lr);
1.790 + asm("tst r3, #1 "); // check sign bit
1.791 + asm("moveq r0, r2, lsr r12 "); // if +ve, shift mantissa right to form integer
1.792 + asm("movne r0, #0 "); // if negative, r0=0
1.793 + __JUMP(,lr);
1.794 + asm("ConvertTRealXToUint1: ");
1.795 + asm("mov r0, #0 "); // r0=0 initially
1.796 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.797 + asm("bcc ConvertTRealXToUint2 "); // branch if neither
1.798 + asm("cmp r2, #0x80000000 "); // check for infinity
1.799 + asm("cmpeq r1, #0 ");
1.800 + __JUMP(ne,lr);
1.801 + asm("ConvertTRealXToUint2: ");
1.802 + asm("movs r3, r3, lsr #1 "); // sign bit into carry
1.803 + asm("sbc r0, r0, #0 "); // r0=0 if -ve, 0xFFFFFFFF if +ve
1.804 + __JUMP(,lr);
1.805 + }
1.806 +
1.807 +
1.808 +
1.809 +
1.810 +__NAKED__ EXPORT_C TRealX::operator TInt64() const
1.811 +/**
1.812 +Returns the extended precision value as a 64 bit integer value.
1.813 +
1.814 +The operator returns:
1.815 +
1.816 +1. zero, if the extended precision value is not a number
1.817 +
1.818 +2. 0x7FFFFFFF FFFFFFFF, if the value is positive and too big to fit
1.819 + into a TInt64
1.820 +
1.821 +3. 0x80000000 00000000, if the value is negative and too big to fit
1.822 + into a TInt.
1.823 +*/
1.824 + {
1.825 + // r0 = this, result in r1:r0
1.826 + asm("ldmia r0, {r0,r1,r2} "); // get value into r0,r1,r2
1.827 + asm("ConvertTRealXToInt64: ");
1.828 + asm("mov r3, #0x8000 "); // r3=0x803E
1.829 + asm("orr r3, r3, #0x003E ");
1.830 + asm("subs r3, r3, r2, lsr #16 "); // r3=803E-exponent
1.831 + asm("bls ConvertTRealXToInt64a "); // branch if exponent>=803E
1.832 + asm("cmp r3, #63 "); // test if exponent<7FFF
1.833 + asm("movhi r1, #0 "); // if so, underflow result to zero
1.834 + asm("movhi r0, #0 ");
1.835 + __JUMP(hi,lr);
1.836 + asm("cmp r3, #32 "); // >=32 shifts required?
1.837 + asm("subcs r3, r3, #32 "); // if so, r3-=32
1.838 + asm("movcs r0, r1, lsr r3 "); // r1:r0 >>= (r3+32)
1.839 + asm("movcs r1, #0 ");
1.840 + asm("movcc r0, r0, lsr r3 "); // else r1:r0>>=r3
1.841 + asm("rsbcc r3, r3, #32 ");
1.842 + asm("orrcc r0, r0, r1, lsl r3 ");
1.843 + asm("rsbcc r3, r3, #32 ");
1.844 + asm("movcc r1, r1, lsr r3 "); // r1:r0 = absolute integer
1.845 + asm("tst r2, #1 "); // check sign bit
1.846 + __JUMP(eq,lr);
1.847 + asm("rsbs r0, r0, #0 "); // else negate answer
1.848 + asm("rsc r1, r1, #0 ");
1.849 + __JUMP(,lr);
1.850 + asm("ConvertTRealXToInt64a: ");
1.851 + asm("cmn r2, #0x10000 "); // check for infinity or NaN
1.852 + asm("bcc ConvertTRealXToInt64b "); // branch if neither
1.853 + asm("cmp r1, #0x80000000 "); // check for infinity
1.854 + asm("cmpeq r0, #0 ");
1.855 + asm("movne r1, #0 "); // if NaN, return 0
1.856 + asm("movne r0, #0 ");
1.857 + __JUMP(ne,lr);
1.858 + asm("ConvertTRealXToInt64b: ");
1.859 + asm("mov r1, #0x80000000 "); // return KMaxTInt64/KMinTInt64 depending on sign
1.860 + asm("mov r0, #0 ");
1.861 + asm("movs r2, r2, lsr #1 ");
1.862 + asm("sbcs r0, r0, #0 ");
1.863 + asm("sbc r1, r1, #0 ");
1.864 + __JUMP(,lr);
1.865 + }
1.866 +
1.867 +
1.868 +
1.869 +
1.870 +__NAKED__ EXPORT_C TRealX::operator TReal32() const __SOFTFP
1.871 +/**
1.872 +Returns the extended precision value as
1.873 +a single precision floating point value.
1.874 +*/
1.875 + {
1.876 + asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1.877 +
1.878 + // Convert TRealX in r1,r2,r3 to TReal32 in r0
1.879 + asm("ConvertTRealXToTReal32: ");
1.880 + asm("mov r12, #0x8000 ");
1.881 + asm("orr r12, r12, #0x007F "); // r12=0x807F
1.882 + asm("cmp r3, r12, lsl #16 "); // check if exponent>=807F
1.883 + asm("bcs ConvertTRealXToTReal32a "); // branch if it is
1.884 + asm("sub r12, r12, #0x00FF "); // r12=0x7F80
1.885 + asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7F80 = result exponent if in range
1.886 + asm("bgt ConvertTRealXToTReal32b "); // branch if normalised result
1.887 + asm("cmn r12, #23 "); // check for total underflow or zero
1.888 + asm("movlt r0, r3, lsl #31 "); // in this case, return zero with appropriate sign
1.889 + __JUMP(lt,lr);
1.890 + asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
1.891 + asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1.892 + asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1.893 + asm("orrne r3, r3, #0x100 ");
1.894 + asm("rsb r0, r12, #32 ");
1.895 + asm("mov r1, r1, lsr r0 ");
1.896 + asm("orr r1, r1, r2, lsl r12 ");
1.897 + asm("mov r2, r2, lsr r0 "); // r2 top 24 bits now give unrounded result mantissa
1.898 + asm("mov r12, #0 "); // result exponent will be zero
1.899 + asm("ConvertTRealXToTReal32b: ");
1.900 + asm("movs r0, r2, lsl #24 "); // top 8 truncated bits into top byte of r0
1.901 + asm("bpl ConvertTRealXToTReal32c "); // if top bit clear, truncate
1.902 + asm("cmp r0, #0x80000000 ");
1.903 + asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
1.904 + asm("bhi ConvertTRealXToTReal32d "); // if >, round up
1.905 + asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1.906 + asm("bcs ConvertTRealXToTReal32c "); // if rounded up, truncate
1.907 + asm("bmi ConvertTRealXToTReal32d "); // if rounded down, round up
1.908 + asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
1.909 + asm("beq ConvertTRealXToTReal32c "); // if zero, truncate, else round up
1.910 + asm("ConvertTRealXToTReal32d: "); // come here to round up
1.911 + asm("adds r2, r2, #0x100 "); // increment the mantissa
1.912 + asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
1.913 + asm("addcs r12, r12, #1 "); // and increment exponent
1.914 + asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1.915 + asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1.916 + asm("ConvertTRealXToTReal32c: "); // come here to truncate
1.917 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.918 + asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
1.919 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.920 + asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
1.921 + __JUMP(,lr);
1.922 + asm("ConvertTRealXToTReal32a: "); // come here if overflow, infinity or NaN
1.923 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.924 + asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1.925 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.926 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.927 + asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
1.928 + asm("orr r0, r0, #0x00800000 ");
1.929 + asm("orr r0, r0, r2, lsr #8 "); // r0 bits 0-22 = result mantissa
1.930 + __JUMP(,lr);
1.931 + }
1.932 +
1.933 +
1.934 +
1.935 +
1.936 +__NAKED__ EXPORT_C TRealX::operator TReal64() const __SOFTFP
1.937 +/**
1.938 +Returns the extended precision value as
1.939 +a double precision floating point value.
1.940 +*/
1.941 + {
1.942 + asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1.943 +
1.944 + // Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
1.945 + // if __DOUBLE_WORDS_SWAPPED__ r0=sign,exp,high mant, r1=low mant
1.946 + // else r0, r1 reversed
1.947 + asm("ConvertTRealXToTReal64: ");
1.948 + asm("mov r12, #0x8300 ");
1.949 + asm("orr r12, r12, #0x00FF "); // r12=0x83FF
1.950 + asm("cmp r3, r12, lsl #16 "); // check if exponent>=83FF
1.951 + asm("bcs ConvertTRealXToTReal64a "); // branch if it is
1.952 + asm("mov r12, #0x7C00 ");
1.953 + asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7C00 = result exponent if in range
1.954 + asm("bgt ConvertTRealXToTReal64b "); // branch if normalised result
1.955 + asm("cmn r12, #52 "); // check for total underflow or zero
1.956 + asm("movlt r0, r3, lsl #31 "); // in this case, return zero with appropriate sign
1.957 + asm("movlt r1, #0 ");
1.958 + asm("blt ConvertTRealXToTReal64_end ");
1.959 +
1.960 + asm("adds r12, r12, #31 "); // check if >=32 shifts needed, r12=32-shift count
1.961 + asm("ble ConvertTRealXToTReal64e "); // branch if >=32 shifts needed
1.962 + asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1.963 + asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1.964 + asm("orrne r3, r3, #0x100 ");
1.965 + asm("rsb r0, r12, #32 "); // r0=shift count
1.966 + asm("mov r1, r1, lsr r0 ");
1.967 + asm("orr r1, r1, r2, lsl r12 ");
1.968 + asm("mov r2, r2, lsr r0 "); // r2:r1 top 53 bits = unrounded result mantissa
1.969 + asm("b ConvertTRealXToTReal64f ");
1.970 + asm("ConvertTRealXToTReal64e: ");
1.971 + asm("add r12, r12, #32 "); // r12=64-shift count
1.972 + asm("cmp r1, #0 "); // r1 bits are all lost - test them
1.973 + asm("moveqs r0, r2, lsl r12 "); // if zero, test lost bits from r2
1.974 + asm("bicne r3, r3, #0x300 "); // if lost bits not all zero, set rounded down flag
1.975 + asm("orrne r3, r3, #0x100 ");
1.976 + asm("rsb r0, r12, #32 "); // r0=shift count-32
1.977 + asm("mov r1, r2, lsr r0 "); // shift r2:r1 right
1.978 + asm("mov r2, #0 ");
1.979 + asm("ConvertTRealXToTReal64f: ");
1.980 + asm("mov r12, #0 "); // result exponent will be zero for denormals
1.981 + asm("ConvertTRealXToTReal64b: ");
1.982 + asm("movs r0, r1, lsl #21 "); // 11 rounding bits to top of r0
1.983 + asm("bpl ConvertTRealXToTReal64c "); // if top bit clear, truncate
1.984 + asm("cmp r0, #0x80000000 "); // compare rounding bits to 10000000000
1.985 + asm("bhi ConvertTRealXToTReal64d "); // if >, round up
1.986 + asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1.987 + asm("bcs ConvertTRealXToTReal64c "); // if rounded up, truncate
1.988 + asm("bmi ConvertTRealXToTReal64d "); // if rounded down, round up
1.989 + asm("tst r1, #0x800 "); // else round to even - test LSB of result mantissa
1.990 + asm("beq ConvertTRealXToTReal64c "); // if zero, truncate, else round up
1.991 + asm("ConvertTRealXToTReal64d: "); // come here to round up
1.992 + asm("adds r1, r1, #0x800 "); // increment the mantissa
1.993 + asm("adcs r2, r2, #0 ");
1.994 + asm("movcs r2, #0x80000000 "); // if carry, mantissa=10000...0
1.995 + asm("addcs r12, r12, #1 "); // and increment exponent
1.996 + asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1.997 + asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1.998 + asm("ConvertTRealXToTReal64c: "); // come here to truncate
1.999 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1000 + asm("orr r0, r0, r12, lsl #20 "); // exponent into r0 bits 20-30
1.1001 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1002 + asm("orr r0, r0, r2, lsr #11 "); // non-integer mantissa bits into r0 bits 0-19
1.1003 + asm("mov r1, r1, lsr #11 "); // and r1
1.1004 + asm("orr r1, r1, r2, lsl #21 ");
1.1005 + asm("b ConvertTRealXToTReal64_end ");
1.1006 +
1.1007 + asm("ConvertTRealXToTReal64a: "); // come here if overflow, infinity or NaN
1.1008 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.1009 + asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1.1010 + asm("movcc r1, #0 ");
1.1011 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1012 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1013 + asm("orr r0, r0, #0x7F000000 "); // r0 bits 20-30 = 7FF = exponent
1.1014 + asm("orr r0, r0, #0x00F00000 ");
1.1015 + asm("orr r0, r0, r2, lsr #11 "); // r0 bits 0-19 = result mantissa high bits
1.1016 + asm("mov r1, r1, lsr #11 "); // and r1=result mantissa low bits
1.1017 + asm("orr r1, r1, r2, lsl #21 ");
1.1018 + asm("ConvertTRealXToTReal64_end: ");
1.1019 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.1020 + asm("mov r2, r0 ");
1.1021 + asm("mov r0, r1 ");
1.1022 + asm("mov r1, r2 ");
1.1023 +#endif
1.1024 + __JUMP(,lr);
1.1025 + }
1.1026 +
1.1027 +
1.1028 +
1.1029 +
1.1030 +__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal32& /*aVal*/) const
1.1031 +/**
1.1032 +Extracts the extended precision value as
1.1033 +a single precision floating point value.
1.1034 +
1.1035 +@param aVal A reference to a single precision object which contains
1.1036 + the result of the operation.
1.1037 +
1.1038 +@return KErrNone, if the operation is successful;
1.1039 + KErrOverflow, if the operation results in overflow;
1.1040 + KErrUnderflow, if the operation results in underflow.
1.1041 +*/
1.1042 + {
1.1043 + asm("stmfd sp!, {r4,lr} ");
1.1044 + asm("mov r4, r1 ");
1.1045 + asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1.1046 + asm("bl TRealXGetTReal32 ");
1.1047 + asm("str r0, [r4] "); // store converted TReal32
1.1048 + asm("mov r0, r12 "); // return value into r0
1.1049 + __POPRET("r4,");
1.1050 +
1.1051 + // Convert TRealX in r1,r2,r3 to TReal32 in r0
1.1052 + // Return error code in r12
1.1053 + // r0-r3, r12 modified
1.1054 + asm("TRealXGetTReal32: ");
1.1055 + asm("mov r12, #0x8000 ");
1.1056 + asm("orr r12, r12, #0x007F "); // r12=0x807F
1.1057 + asm("cmp r3, r12, lsl #16 "); // check if exponent>=807F
1.1058 + asm("bcs TRealXGetTReal32a "); // branch if it is
1.1059 + asm("sub r12, r12, #0x00FF "); // r12=0x7F80
1.1060 + asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7F80 = result exponent if in range
1.1061 + asm("bgt TRealXGetTReal32b "); // branch if normalised result
1.1062 + asm("cmn r12, #23 "); // check for total underflow or zero
1.1063 + asm("bge TRealXGetTReal32e "); // skip if not
1.1064 + asm("mov r0, r3, lsl #31 "); // else return zero with appropriate sign
1.1065 + asm("mov r1, #0 ");
1.1066 + asm("cmp r3, #0x10000 "); // check for zero
1.1067 + asm("movcc r12, #0 "); // if zero return KErrNone
1.1068 + asm("mvncs r12, #9 "); // else return KErrUnderflow
1.1069 + __JUMP(,lr);
1.1070 + asm("TRealXGetTReal32e: ");
1.1071 + asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
1.1072 + asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1.1073 + asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1.1074 + asm("orrne r3, r3, #0x100 ");
1.1075 + asm("rsb r0, r12, #32 ");
1.1076 + asm("mov r1, r1, lsr r0 ");
1.1077 + asm("orr r1, r1, r2, lsl r12 ");
1.1078 + asm("mov r2, r2, lsr r0 "); // r2 top 24 bits now give unrounded result mantissa
1.1079 + asm("mov r12, #0 "); // result exponent will be zero
1.1080 + asm("TRealXGetTReal32b: ");
1.1081 + asm("movs r0, r2, lsl #24 "); // top 8 truncated bits into top byte of r0
1.1082 + asm("bpl TRealXGetTReal32c "); // if top bit clear, truncate
1.1083 + asm("cmp r0, #0x80000000 ");
1.1084 + asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
1.1085 + asm("bhi TRealXGetTReal32d "); // if >, round up
1.1086 + asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1.1087 + asm("bcs TRealXGetTReal32c "); // if rounded up, truncate
1.1088 + asm("bmi TRealXGetTReal32d "); // if rounded down, round up
1.1089 + asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
1.1090 + asm("beq TRealXGetTReal32c "); // if zero, truncate, else round up
1.1091 + asm("TRealXGetTReal32d: "); // come here to round up
1.1092 + asm("adds r2, r2, #0x100 "); // increment the mantissa
1.1093 + asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
1.1094 + asm("addcs r12, r12, #1 "); // and increment exponent
1.1095 + asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1.1096 + asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1.1097 + asm("TRealXGetTReal32c: "); // come here to truncate
1.1098 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1099 + asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
1.1100 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1101 + asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
1.1102 + asm("cmp r12, #0xFF "); // check for overflow
1.1103 + asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
1.1104 + __JUMP(eq,lr);
1.1105 + asm("bics r1, r0, #0x80000000 "); // check for underflow
1.1106 + asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
1.1107 + asm("movne r12, #0 "); // else return KErrNone
1.1108 + __JUMP(,lr);
1.1109 + asm("TRealXGetTReal32a: "); // come here if overflow, infinity or NaN
1.1110 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.1111 + asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1.1112 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1113 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1114 + asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
1.1115 + asm("orr r0, r0, #0x00800000 ");
1.1116 + asm("orr r0, r0, r2, lsr #8 "); // r0 bits 0-22 = result mantissa
1.1117 + asm("movs r12, r0, lsl #9 "); // check if result is infinity or NaN
1.1118 + asm("mvneq r12, #8 "); // if infinity return KErrOverflow
1.1119 + asm("mvnne r12, #5 "); // else return KErrArgument
1.1120 + __JUMP(,lr);
1.1121 + }
1.1122 +
1.1123 +
1.1124 +
1.1125 +
1.1126 +__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal64& /*aVal*/) const
1.1127 +/**
1.1128 +Extracts the extended precision value as
1.1129 +a double precision floating point value.
1.1130 +
1.1131 +@param aVal A reference to a double precision object which
1.1132 + contains the result of the operation.
1.1133 +
1.1134 +@return KErrNone, if the operation is successful;
1.1135 + KErrOverflow, if the operation results in overflow;
1.1136 + KErrUnderflow, if the operation results in underflow.
1.1137 +*/
1.1138 + {
1.1139 + asm("stmfd sp!, {r4,lr} ");
1.1140 + asm("mov r4, r1 ");
1.1141 + asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1.1142 + asm("bl TRealXGetTReal64 ");
1.1143 + asm("stmia r4, {r0,r1} "); // store converted TReal64
1.1144 + asm("mov r0, r12 "); // return value into r0
1.1145 + __POPRET("r4,");
1.1146 +
1.1147 + // Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
1.1148 + // Return error code in r12
1.1149 + // r0-r3, r12 modified
1.1150 + asm("TRealXGetTReal64: ");
1.1151 + asm("mov r12, #0x8300 ");
1.1152 + asm("orr r12, r12, #0x00FF "); // r12=0x83FF
1.1153 + asm("cmp r3, r12, lsl #16 "); // check if exponent>=83FF
1.1154 + asm("bcs TRealXGetTReal64a "); // branch if it is
1.1155 + asm("mov r12, #0x7C00 ");
1.1156 + asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7C00 = result exponent if in range
1.1157 + asm("bgt TRealXGetTReal64b "); // branch if normalised result
1.1158 + asm("cmn r12, #52 "); // check for total underflow or zero
1.1159 + asm("bge TRealXGetTReal64g "); // skip if not
1.1160 + asm("mov r0, r3, lsl #31 "); // else return zero with appropriate sign
1.1161 + asm("mov r1, #0 ");
1.1162 + asm("cmp r3, #0x10000 "); // check for zero
1.1163 + asm("movcc r12, #0 "); // if zero return KErrNone
1.1164 + asm("mvncs r12, #9 "); // else return KErrUnderflow
1.1165 + asm("b TRealXGetTReal64_end ");
1.1166 +
1.1167 + asm("TRealXGetTReal64g: ");
1.1168 + asm("adds r12, r12, #31 "); // check if >=32 shifts needed, r12=32-shift count
1.1169 + asm("ble TRealXGetTReal64e "); // branch if >=32 shifts needed
1.1170 + asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1.1171 + asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1.1172 + asm("orrne r3, r3, #0x100 ");
1.1173 + asm("rsb r0, r12, #32 "); // r0=shift count
1.1174 + asm("mov r1, r1, lsr r0 ");
1.1175 + asm("orr r1, r1, r2, lsl r12 ");
1.1176 + asm("mov r2, r2, lsr r0 "); // r2:r1 top 53 bits = unrounded result mantissa
1.1177 + asm("b TRealXGetTReal64f ");
1.1178 + asm("TRealXGetTReal64e: ");
1.1179 + asm("add r12, r12, #32 "); // r12=64-shift count
1.1180 + asm("cmp r1, #0 "); // r1 bits are all lost - test them
1.1181 + asm("moveqs r0, r2, lsl r12 "); // if zero, test lost bits from r2
1.1182 + asm("bicne r3, r3, #0x300 "); // if lost bits not all zero, set rounded down flag
1.1183 + asm("orrne r3, r3, #0x100 ");
1.1184 + asm("rsb r0, r12, #32 "); // r0=shift count-32
1.1185 + asm("mov r1, r2, lsr r0 "); // shift r2:r1 right
1.1186 + asm("mov r2, #0 ");
1.1187 + asm("TRealXGetTReal64f: ");
1.1188 + asm("mov r12, #0 "); // result exponent will be zero for denormals
1.1189 + asm("TRealXGetTReal64b: ");
1.1190 + asm("movs r0, r1, lsl #21 "); // 11 rounding bits to top of r0
1.1191 + asm("bpl TRealXGetTReal64c "); // if top bit clear, truncate
1.1192 + asm("cmp r0, #0x80000000 "); // compare rounding bits to 10000000000
1.1193 + asm("bhi TRealXGetTReal64d "); // if >, round up
1.1194 + asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1.1195 + asm("bcs TRealXGetTReal64c "); // if rounded up, truncate
1.1196 + asm("bmi TRealXGetTReal64d "); // if rounded down, round up
1.1197 + asm("tst r1, #0x800 "); // else round to even - test LSB of result mantissa
1.1198 + asm("beq TRealXGetTReal64c "); // if zero, truncate, else round up
1.1199 + asm("TRealXGetTReal64d: "); // come here to round up
1.1200 + asm("adds r1, r1, #0x800 "); // increment the mantissa
1.1201 + asm("adcs r2, r2, #0 ");
1.1202 + asm("movcs r2, #0x80000000 "); // if carry, mantissa=10000...0
1.1203 + asm("addcs r12, r12, #1 "); // and increment exponent
1.1204 + asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1.1205 + asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1.1206 + asm("TRealXGetTReal64c: "); // come here to truncate
1.1207 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1208 + asm("orr r0, r0, r12, lsl #20 "); // exponent into r0 bits 20-30
1.1209 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1210 + asm("orr r0, r0, r2, lsr #11 "); // non-integer mantissa bits into r0 bits 0-19
1.1211 + asm("mov r1, r1, lsr #11 "); // and r1
1.1212 + asm("orr r1, r1, r2, lsl #21 ");
1.1213 + asm("add r12, r12, #1 ");
1.1214 + asm("cmp r12, #0x800 "); // check for overflow
1.1215 + asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
1.1216 + asm("beq TRealXGetTReal64_end ");
1.1217 +
1.1218 + asm("bics r12, r0, #0x80000000 "); // check for underflow
1.1219 + asm("cmpeq r1, #0 ");
1.1220 + asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
1.1221 + asm("movne r12, #0 "); // else return KErrNone
1.1222 + asm("b TRealXGetTReal64_end ");
1.1223 +
1.1224 + asm("TRealXGetTReal64a: "); // come here if overflow, infinity or NaN
1.1225 + asm("cmn r3, #0x10000 "); // check for infinity or NaN
1.1226 + asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1.1227 + asm("movcc r1, #0 ");
1.1228 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.1229 + asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1.1230 + asm("orr r0, r0, #0x7F000000 "); // r0 bits 20-30 = 7FF = exponent
1.1231 + asm("orr r0, r0, #0x00F00000 ");
1.1232 + asm("orr r0, r0, r2, lsr #11 "); // r0 bits 0-19 = result mantissa high bits
1.1233 + asm("mov r1, r1, lsr #11 "); // and r1=result mantissa low bits
1.1234 + asm("orr r1, r1, r2, lsl #21 ");
1.1235 + asm("movs r12, r0, lsl #12 "); // check if result is infinity or NaN
1.1236 + asm("cmpeq r1, #0 ");
1.1237 + asm("mvneq r12, #8 "); // if infinity return KErrOverflow
1.1238 + asm("mvnne r12, #5 "); // else return KErrArgument
1.1239 + asm("TRealXGetTReal64_end: ");
1.1240 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.1241 + asm("mov r2, r0 ");
1.1242 + asm("mov r0, r1 ");
1.1243 + asm("mov r1, r2 ");
1.1244 +#endif
1.1245 + __JUMP(,lr);
1.1246 + }
1.1247 +
1.1248 +
1.1249 +
1.1250 +
1.1251 +__NAKED__ EXPORT_C TRealX TRealX::operator+() const
1.1252 +/**
1.1253 +Returns this extended precision number unchanged.
1.1254 +
1.1255 +Note that this may also be referred to as a unary plus operator.
1.1256 +
1.1257 +@return The extended precision number.
1.1258 +*/
1.1259 + {
1.1260 + asm("ldmia r1, {r2,r3,r12} ");
1.1261 + asm("stmia r0, {r2,r3,r12} ");
1.1262 + __JUMP(,lr);
1.1263 + }
1.1264 +
1.1265 +
1.1266 +
1.1267 +
1.1268 +__NAKED__ EXPORT_C TRealX TRealX::operator-() const
1.1269 +/**
1.1270 +Negates this extended precision number.
1.1271 +
1.1272 +This may also be referred to as a unary minus operator.
1.1273 +
1.1274 +@return The negative of the extended precision number.
1.1275 +*/
1.1276 + {
1.1277 + asm("ldmia r1, {r2,r3,r12} ");
1.1278 + asm("eor r12, r12, #1 "); // unary - changes sign bit
1.1279 + asm("stmia r0, {r2,r3,r12} ");
1.1280 + __JUMP(,lr);
1.1281 + }
1.1282 +
1.1283 +
1.1284 +
1.1285 +
1.1286 +__NAKED__ EXPORT_C TRealX::TRealXOrder TRealX::Compare(const TRealX& /*aVal*/) const
1.1287 +/**
1.1288 +*/
1.1289 + {
1.1290 + asm("stmfd sp!, {r4,r5,r6,lr} ");
1.1291 + asm("ldmia r1, {r4,r5,r6} ");
1.1292 + asm("ldmia r0, {r1,r2,r3} ");
1.1293 + asm("bl TRealXCompare ");
1.1294 + __POPRET("r4-r6,");
1.1295 +
1.1296 + // Compare TRealX in r1,r2,r3 to TRealX in r4,r5,r6
1.1297 + // Return TRealXOrder result in r0
1.1298 + asm("TRealXCompare: ");
1.1299 + asm("cmn r3, #0x10000 "); // check for NaNs/infinity
1.1300 + asm("bcs TRealXCompare1 ");
1.1301 + asm("TRealXCompare6: "); // will come back here if infinity
1.1302 + asm("cmn r6, #0x10000 ");
1.1303 + asm("bcs TRealXCompare2 ");
1.1304 + asm("TRealXCompare7: "); // will come back here if infinity
1.1305 + asm("cmp r3, #0x10000 "); // check for zeros
1.1306 + asm("bcc TRealXCompare3 ");
1.1307 + asm("cmp r6, #0x10000 ");
1.1308 + asm("bcc TRealXCompare4 ");
1.1309 + asm("mov r12, r6, lsl #31 ");
1.1310 + asm("cmp r12, r3, lsl #31 "); // compare signs
1.1311 + asm("movne r0, #4 ");
1.1312 + asm("bne TRealXCompare5 "); // branch if signs different
1.1313 + asm("mov r12, r3, lsr #16 "); // r12=first exponent
1.1314 + asm("cmp r12, r6, lsr #16 "); // compare exponents
1.1315 + asm("cmpeq r2, r5 "); // if equal compare high words of mantissa
1.1316 + asm("cmpeq r1, r4 "); // if equal compare low words of mantissa
1.1317 + asm("moveq r0, #2 "); // if equal return 2
1.1318 + __JUMP(eq,lr);
1.1319 + asm("movhi r0, #4 "); // r0=4 if first exp bigger
1.1320 + asm("movcc r0, #1 "); // else r0=1
1.1321 + asm("TRealXCompare5: ");
1.1322 + asm("tst r3, #1 "); // if signs negative
1.1323 + asm("eorne r0, r0, #5 "); // then switch 1 and 4
1.1324 + __JUMP(,lr);
1.1325 + asm("TRealXCompare3: "); // first operand zero
1.1326 + asm("cmp r6, #0x10000 "); // check if second also zero
1.1327 + asm("movcc r0, #2 "); // if so, return 2
1.1328 + __JUMP(cc,lr);
1.1329 + asm("tst r6, #1 "); // else check sign of operand 2
1.1330 + asm("moveq r0, #1 "); // if +, return 1
1.1331 + asm("movne r0, #4 "); // else return 4
1.1332 + __JUMP(,lr);
1.1333 + asm("TRealXCompare4: "); // second operand zero, first nonzero
1.1334 + asm("tst r3, #1 "); // check sign of operand 1
1.1335 + asm("moveq r0, #4 "); // if +, return 4
1.1336 + asm("movne r0, #1 "); // else return 1
1.1337 + __JUMP(,lr);
1.1338 + asm("TRealXCompare1: "); // first operand NaN or infinity
1.1339 + asm("cmp r2, #0x80000000 "); // check for infinity
1.1340 + asm("cmpeq r1, #0 ");
1.1341 + asm("beq TRealXCompare6 "); // if infinity, can handle normally
1.1342 + asm("mov r0, #8 "); // if NaN, return 8 (unordered)
1.1343 + __JUMP(,lr);
1.1344 + asm("TRealXCompare2: "); // second operand NaN or infinity
1.1345 + asm("cmp r5, #0x80000000 "); // check for infinity
1.1346 + asm("cmpeq r4, #0 ");
1.1347 + asm("beq TRealXCompare7 "); // if infinity, can handle normally
1.1348 + asm("mov r0, #8 "); // if NaN, return 8 (unordered)
1.1349 + __JUMP(,lr);
1.1350 + }
1.1351 +
1.1352 +
1.1353 +
1.1354 +
1.1355 +__NAKED__ EXPORT_C TInt TRealX::SubEq(const TRealX& /*aVal*/)
1.1356 +/**
1.1357 +Subtracts an extended precision value from this extended precision number.
1.1358 +
1.1359 +@param aVal The extended precision value to be subtracted.
1.1360 +
1.1361 +@return KErrNone, if the operation is successful;
1.1362 + KErrOverflow, if the operation results in overflow;
1.1363 + KErrUnderflow, if the operation results in underflow.
1.1364 +*/
1.1365 + {
1.1366 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.1367 + asm("ldmia r1, {r4,r5,r6} ");
1.1368 + asm("ldmia r0, {r1,r2,r3} ");
1.1369 + asm("bl TRealXSubtract ");
1.1370 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.1371 + asm("stmia r0, {r1,r2,r3} ");
1.1372 + asm("mov r0, r12 ");
1.1373 + __JUMP(,lr);
1.1374 + }
1.1375 +
1.1376 +
1.1377 +
1.1378 +
1.1379 +__NAKED__ EXPORT_C TInt TRealX::AddEq(const TRealX& /*aVal*/)
1.1380 +/**
1.1381 +Adds an extended precision value to this extended precision number.
1.1382 +
1.1383 +@param aVal The extended precision value to be added.
1.1384 +
1.1385 +@return KErrNone, if the operation is successful;
1.1386 + KErrOverflow,if the operation results in overflow;
1.1387 + KErrUnderflow, if the operation results in underflow.
1.1388 +*/
1.1389 + {
1.1390 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.1391 + asm("ldmia r1, {r4,r5,r6} ");
1.1392 + asm("ldmia r0, {r1,r2,r3} ");
1.1393 + asm("bl TRealXAdd ");
1.1394 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.1395 + asm("stmia r0, {r1,r2,r3} ");
1.1396 + asm("mov r0, r12 ");
1.1397 + __JUMP(,lr);
1.1398 +
1.1399 + // TRealX subtraction r1,r2,r3 - r4,r5,r6 result in r1,r2,r3
1.1400 + // Error code returned in r12
1.1401 + // Registers r0-r8,r12 modified
1.1402 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.1403 + asm("TRealXSubtract: ");
1.1404 + asm("eor r6, r6, #1 "); // negate second operand and add
1.1405 +
1.1406 + // TRealX addition r1,r2,r3 + r4,r5,r6 result in r1,r2,r3
1.1407 + // Error code returned in r12
1.1408 + // Registers r0-r8,r12 modified
1.1409 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.1410 + // Note: +0 + +0 = +0, -0 + -0 = -0, +0 + -0 = -0 + +0 = +0,
1.1411 + // +/-0 + X = X + +/-0 = X, X + -X = -X + X = +0
1.1412 + asm("TRealXAdd: ");
1.1413 + asm("mov r12, #0 "); // initialise return value to KErrNone
1.1414 + asm("bic r3, r3, #0x300 "); // clear rounding flags
1.1415 + asm("bic r6, r6, #0x300 "); // clear rounding flags
1.1416 + asm("cmn r3, #0x10000 "); // check if first operand is NaN or infinity
1.1417 + asm("bcs TRealXAdd1 "); // branch if it is
1.1418 + asm("cmn r6, #0x10000 "); // check if second operand is NaN or infinity
1.1419 + asm("bcs TRealXAdd2 "); // branch if it is
1.1420 + asm("cmp r6, #0x10000 "); // check if second operand zero
1.1421 + asm("bcc TRealXAdd3a "); // branch if it is
1.1422 + asm("cmp r3, #0x10000 "); // check if first operand zero
1.1423 + asm("bcc TRealXAdd3 "); // branch if it is
1.1424 + asm("mov r7, #0 "); // r7 will be rounding word
1.1425 + asm("mov r0, r3, lsr #16 "); // r0 = first operand exponent
1.1426 + asm("subs r0, r0, r6, lsr #16 "); // r0 = first exponent - second exponent
1.1427 + asm("beq TRealXAdd8 "); // if equal, no mantissa shifting needed
1.1428 + asm("bhi TRealXAdd4 "); // skip if first exponent bigger
1.1429 + asm("rsb r0, r0, #0 "); // need to shift first mantissa right by r0 to align
1.1430 + asm("mov r8, r1 "); // swap the numbers to the one to be shifted is 2nd
1.1431 + asm("mov r1, r4 ");
1.1432 + asm("mov r4, r8 ");
1.1433 + asm("mov r8, r2 ");
1.1434 + asm("mov r2, r5 ");
1.1435 + asm("mov r5, r8 ");
1.1436 + asm("mov r8, r3 ");
1.1437 + asm("mov r3, r6 ");
1.1438 + asm("mov r6, r8 ");
1.1439 + asm("TRealXAdd4: "); // need to shift 2nd mantissa right by r0 to align
1.1440 + asm("cmp r0, #64 "); // more than 64 shifts needed?
1.1441 + asm("bhi TRealXAdd6 "); // if so, smaller number cannot affect larger
1.1442 + asm("cmp r0, #32 ");
1.1443 + asm("bhi TRealXAdd7 "); // branch if shift count>32
1.1444 + asm("rsb r8, r0, #32 ");
1.1445 + asm("mov r7, r4, lsl r8 "); // shift r5:r4 right into r7
1.1446 + asm("mov r4, r4, lsr r0 ");
1.1447 + asm("orr r4, r4, r5, lsl r8 ");
1.1448 + asm("mov r5, r5, lsr r0 ");
1.1449 + asm("b TRealXAdd8 ");
1.1450 + asm("TRealXAdd7: "); // 64 >= shift count > 32
1.1451 + asm("sub r0, r0, #32 ");
1.1452 + asm("rsb r8, r0, #32 ");
1.1453 + asm("movs r7, r4, lsl r8 "); // test bits lost in shift
1.1454 + asm("orrne r6, r6, #0x100 "); // if not all zero, flag 2nd mantissa rounded down
1.1455 + asm("mov r7, r4, lsr r0 "); // shift r5:r4 right into r7 by 32+r0
1.1456 + asm("orr r7, r7, r5, lsl r8 ");
1.1457 + asm("mov r4, r5, lsr r0 ");
1.1458 + asm("mov r5, #0 ");
1.1459 + asm("TRealXAdd8: "); // mantissas are now aligned
1.1460 + asm("mov r8, r3, lsl #31 "); // r8=sign of first operand
1.1461 + asm("cmp r8, r6, lsl #31 "); // compare signs
1.1462 + asm("bne TRealXSub1 "); // if different, need to do a subtraction
1.1463 + asm("adds r1, r1, r4 "); // signs the same - add mantissas
1.1464 + asm("adcs r2, r2, r5 ");
1.1465 + asm("bcc TRealXAdd9 "); // skip if no carry
1.1466 + asm(".word 0xE1B02062 "); // movs r2, r2, rrx shift carry into mantissa
1.1467 + asm(".word 0xE1B01061 "); // movs r1, r1, rrx
1.1468 + asm(".word 0xE1B07067 "); // movs r7, r7, rrx
1.1469 + asm("orrcs r6, r6, #0x100 "); // if 1 shifted out, flag 2nd mantissa rounded down
1.1470 + asm("add r3, r3, #0x10000 "); // increment exponent
1.1471 + asm("TRealXAdd9: ");
1.1472 + asm("cmp r7, #0x80000000 "); // check rounding word
1.1473 + asm("bcc TRealXAdd10 "); // if <0x80000000 round down
1.1474 + asm("bhi TRealXAdd11 "); // if >0x80000000 round up
1.1475 + asm("tst r6, #0x100 "); // if =0x80000000 check if 2nd mantissa rounded down
1.1476 + asm("bne TRealXAdd11 "); // if so, round up
1.1477 + asm("tst r6, #0x200 "); // if =0x80000000 check if 2nd mantissa rounded up
1.1478 + asm("bne TRealXAdd10 "); // if so, round down
1.1479 + asm("tst r1, #1 "); // else round to even - check LSB
1.1480 + asm("beq TRealXAdd10 "); // if zero, round down
1.1481 + asm("TRealXAdd11: "); // come here to round up
1.1482 + asm("adds r1, r1, #1 "); // increment mantissa
1.1483 + asm("adcs r2, r2, #0 ");
1.1484 + asm("movcs r2, #0x80000000 "); // if carry, mantissa = 80000000 00000000
1.1485 + asm("addcs r3, r3, #0x10000 "); // and increment exponent
1.1486 + asm("cmn r3, #0x10000 "); // check overflow
1.1487 + asm("orrcc r3, r3, #0x200 "); // if no overflow, set rounded-up flag ...
1.1488 + __JUMP(cc,lr);
1.1489 + asm("b TRealXAdd12 "); // if overflow, return infinity
1.1490 + asm("TRealXAdd10: "); // come here to round down
1.1491 + asm("cmn r3, #0x10000 "); // check overflow
1.1492 + asm("bcs TRealXAdd12 "); // if overflow, return infinity
1.1493 + asm("cmp r7, #0 "); // if no overflow check if rounding word is zero
1.1494 + asm("orrne r3, r3, #0x100 "); // if not, set rounded-down flag ...
1.1495 + __JUMP(ne,lr);
1.1496 + asm("and r6, r6, #0x300 "); // else transfer 2nd mantissa rounding flags
1.1497 + asm("orr r3, r3, r6 "); // to result
1.1498 + __JUMP(,lr);
1.1499 +
1.1500 + asm("TRealXAdd12: "); // come here if overflow - return infinity
1.1501 + asm("mov r2, #0x80000000 ");
1.1502 + asm("mov r1, #0 ");
1.1503 + asm("mvn r12, #8 "); // and return KErrOverflow
1.1504 + __JUMP(,lr);
1.1505 +
1.1506 + asm("TRealXSub1: "); // come here if operand signs differ
1.1507 + asm("tst r6, #0x300 "); // check if 2nd mantissa rounded
1.1508 + asm("eorne r6, r6, #0x300 "); // if so, change rounding
1.1509 + asm("rsbs r7, r7, #0 "); // subtract mantissas r2:r1:0 -= r5:r4:r7
1.1510 + asm("sbcs r1, r1, r4 ");
1.1511 + asm("sbcs r2, r2, r5 ");
1.1512 + asm("bcs TRealXSub2 "); // skip if no borrow
1.1513 + asm("tst r6, #0x300 "); // check if 2nd mantissa rounded
1.1514 + asm("eorne r6, r6, #0x300 "); // if so, change rounding
1.1515 + asm("rsbs r7, r7, #0 "); // negate result
1.1516 + asm("rscs r1, r1, #0 ");
1.1517 + asm("rscs r2, r2, #0 ");
1.1518 + asm("eor r3, r3, #1 "); // and change result sign
1.1519 + asm("TRealXSub2: ");
1.1520 + asm("bne TRealXSub3 "); // skip if mantissa top word is not zero
1.1521 + asm("movs r2, r1 "); // else shift up by 32
1.1522 + asm("mov r1, r7 ");
1.1523 + asm("mov r7, #0 ");
1.1524 + asm("bne TRealXSub3a "); // skip if mantissa top word is not zero now
1.1525 + asm("movs r2, r1 "); // else shift up by 32 again
1.1526 + asm("mov r1, #0 ");
1.1527 + asm("moveq r3, #0 "); // if r2 still zero, result is zero - return +0
1.1528 + __JUMP(eq,lr);
1.1529 + asm("subs r3, r3, #0x00400000 "); // else, decrement exponent by 64
1.1530 + asm("bcs TRealXSub3 "); // if no borrow, proceed
1.1531 + asm("b TRealXSub4 "); // if borrow, underflow
1.1532 + asm("TRealXSub3a: "); // needed one 32-bit shift
1.1533 + asm("subs r3, r3, #0x00200000 "); // so decrement exponent by 32
1.1534 + asm("bcc TRealXSub4 "); // if borrow, underflow
1.1535 + asm("TRealXSub3: "); // r2 is now non-zero; still may need up to 31 shifts
1.1536 +#ifdef __CPU_ARM_HAS_CLZ
1.1537 + CLZ(0,2);
1.1538 + asm("mov r2, r2, lsl r0 ");
1.1539 +#else
1.1540 + asm("mov r0, #0 "); // r0 will be shift count
1.1541 + asm("cmp r2, #0x00010000 ");
1.1542 + asm("movcc r2, r2, lsl #16 ");
1.1543 + asm("addcc r0, r0, #16 ");
1.1544 + asm("cmp r2, #0x01000000 ");
1.1545 + asm("movcc r2, r2, lsl #8 ");
1.1546 + asm("addcc r0, r0, #8 ");
1.1547 + asm("cmp r2, #0x10000000 ");
1.1548 + asm("movcc r2, r2, lsl #4 ");
1.1549 + asm("addcc r0, r0, #4 ");
1.1550 + asm("cmp r2, #0x40000000 ");
1.1551 + asm("movcc r2, r2, lsl #2 ");
1.1552 + asm("addcc r0, r0, #2 ");
1.1553 + asm("cmp r2, #0x80000000 ");
1.1554 + asm("movcc r2, r2, lsl #1 ");
1.1555 + asm("addcc r0, r0, #1 ");
1.1556 +#endif
1.1557 + asm("rsb r8, r0, #32 ");
1.1558 + asm("subs r3, r3, r0, lsl #16 "); // subtract shift count from exponent
1.1559 + asm("bcc TRealXSub4 "); // if borrow, underflow
1.1560 + asm("orr r2, r2, r1, lsr r8 "); // else shift mantissa up
1.1561 + asm("mov r1, r1, lsl r0 ");
1.1562 + asm("orr r1, r1, r7, lsr r8 ");
1.1563 + asm("mov r7, r7, lsl r0 ");
1.1564 + asm("cmp r3, #0x10000 "); // check for underflow
1.1565 + asm("bcs TRealXAdd9 "); // if no underflow, branch to round result
1.1566 +
1.1567 + asm("TRealXSub4: "); // come here if underflow
1.1568 + asm("and r3, r3, #1 "); // set exponent to zero, leave sign
1.1569 + asm("mov r2, #0 ");
1.1570 + asm("mov r1, #0 ");
1.1571 + asm("mvn r12, #9 "); // return KErrUnderflow
1.1572 + __JUMP(,lr);
1.1573 +
1.1574 + asm("TRealXAdd6: "); // come here if exponents differ by more than 64
1.1575 + asm("mov r8, r3, lsl #31 "); // r8=sign of first operand
1.1576 + asm("cmp r8, r6, lsl #31 "); // compare signs
1.1577 + asm("orreq r3, r3, #0x100 "); // if same, result has been rounded down
1.1578 + asm("orrne r3, r3, #0x200 "); // else result has been rounded up
1.1579 + __JUMP(,lr);
1.1580 +
1.1581 + asm("TRealXAdd3a: "); // come here if second operand zero
1.1582 + asm("cmp r3, #0x10000 "); // check if first operand also zero
1.1583 + asm("andcc r3, r3, r6 "); // if so, result is negative iff both zeros negative
1.1584 + asm("andcc r3, r3, #1 ");
1.1585 + __JUMP(,lr);
1.1586 +
1.1587 + asm("TRealXAdd3: "); // come here if first operand zero, second nonzero
1.1588 + asm("mov r1, r4 "); // return second operand unchanged
1.1589 + asm("mov r2, r5 ");
1.1590 + asm("mov r3, r6 ");
1.1591 + __JUMP(,lr);
1.1592 +
1.1593 + asm("TRealXAdd1: "); // come here if first operand NaN or infinity
1.1594 + asm("cmp r2, #0x80000000 "); // check for infinity
1.1595 + asm("cmpeq r1, #0 ");
1.1596 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1597 + asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1.1598 + asm("mvncc r12, #8 "); // if neither, return KErrOverflow
1.1599 + __JUMP(cc,lr);
1.1600 + asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
1.1601 + asm("cmpeq r4, #0 ");
1.1602 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1603 + asm("mov r0, r3, lsl #31 "); // both operands are infinity - check signs
1.1604 + asm("cmp r0, r6, lsl #31 ");
1.1605 + asm("mvneq r12, #8 "); // if same, return KErrOverflow
1.1606 + __JUMP(eq,lr);
1.1607 +
1.1608 + // Return 'real indefinite'
1.1609 + asm("TRealXRealIndefinite: ");
1.1610 + asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
1.1611 + asm("mov r2, #0xC0000000 ");
1.1612 + asm("mov r1, #0 ");
1.1613 + asm("mvn r12, #5 "); // return KErrArgument
1.1614 + __JUMP(,lr);
1.1615 +
1.1616 + asm("TRealXAdd2: "); // come here if 2nd operand NaN/infinity, first finite
1.1617 + asm("cmp r5, #0x80000000 "); // check for infinity
1.1618 + asm("cmpeq r4, #0 ");
1.1619 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1620 + asm("mov r1, r4 "); // else return 2nd operand (infinity)
1.1621 + asm("mov r2, r5 ");
1.1622 + asm("mov r3, r6 ");
1.1623 + asm("mvn r12, #8 "); // return KErrOverflow
1.1624 + __JUMP(,lr);
1.1625 +
1.1626 + asm("TRealXBinOpNan: "); // generic routine to process NaNs in binary
1.1627 + // operations
1.1628 + asm("cmn r3, #0x10000 "); // check if first operand is NaN
1.1629 + asm("movcc r0, r1 "); // if not, swap the operands
1.1630 + asm("movcc r1, r4 ");
1.1631 + asm("movcc r4, r0 ");
1.1632 + asm("movcc r0, r2 ");
1.1633 + asm("movcc r2, r5 ");
1.1634 + asm("movcc r5, r0 ");
1.1635 + asm("movcc r0, r3 ");
1.1636 + asm("movcc r3, r6 ");
1.1637 + asm("movcc r6, r0 ");
1.1638 + asm("cmn r6, #0x10000 "); // both operands NaNs?
1.1639 + asm("bcc TRealXBinOpNan1 "); // skip if not
1.1640 + asm("cmp r2, r5 "); // if so, compare the significands
1.1641 + asm("cmpeq r1, r4 ");
1.1642 + asm("movcc r1, r4 "); // r1,r2,r3 will get NaN with larger significand
1.1643 + asm("movcc r2, r5 ");
1.1644 + asm("movcc r3, r6 ");
1.1645 + asm("TRealXBinOpNan1: ");
1.1646 + asm("orr r2, r2, #0x40000000 "); // convert an SNaN to a QNaN
1.1647 + asm("mvn r12, #5 "); // return KErrArgument
1.1648 + __JUMP(,lr);
1.1649 + }
1.1650 +
1.1651 +
1.1652 +
1.1653 +
1.1654 +__NAKED__ EXPORT_C TInt TRealX::MultEq(const TRealX& /*aVal*/)
1.1655 +/**
1.1656 +Multiplies this extended precision number by an extended precision value.
1.1657 +
1.1658 +@param aVal The extended precision value to be used as the multiplier.
1.1659 +
1.1660 +@return KErrNone, if the operation is successful;
1.1661 + KErrOverflow, if the operation results in overflow;
1.1662 + KErrUnderflow, if the operation results in underflow
1.1663 +*/
1.1664 + {
1.1665 + // Version for ARM 3M or later
1.1666 + // Uses umull/umlal
1.1667 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.1668 + asm("ldmia r1, {r4,r5,r6} ");
1.1669 + asm("ldmia r0, {r1,r2,r3} ");
1.1670 + asm("bl TRealXMultiply ");
1.1671 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.1672 + asm("stmia r0, {r1,r2,r3} ");
1.1673 + asm("mov r0, r12 ");
1.1674 + __JUMP(,lr);
1.1675 +
1.1676 + // TRealX multiplication r1,r2,r3 * r4,r5,r6 result in r1,r2,r3
1.1677 + // Error code returned in r12
1.1678 + // Registers r0-r7,r12 modified
1.1679 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.1680 + asm("TRealXMultiply: ");
1.1681 + asm("mov r12, #0 "); // initialise return value to KErrNone
1.1682 + asm("bic r3, r3, #0x300 "); // clear rounding flags
1.1683 + asm("tst r6, #1 ");
1.1684 + asm("eorne r3, r3, #1 "); // Exclusive-OR signs
1.1685 + asm("cmn r3, #0x10000 "); // check if first operand is NaN or infinity
1.1686 + asm("bcs TRealXMultiply1 "); // branch if it is
1.1687 + asm("cmn r6, #0x10000 "); // check if second operand is NaN or infinity
1.1688 + asm("bcs TRealXMultiply2 "); // branch if it is
1.1689 + asm("cmp r3, #0x10000 "); // check if first operand zero
1.1690 + __JUMP(cc,lr); // if so, exit
1.1691 +
1.1692 + // Multiply mantissas in r2:r1 and r5:r4, result in r2:r1:r12:r7
1.1693 + asm("umull r7, r12, r1, r4 "); // r7:r12=m1.low*m2.low
1.1694 + asm("movs r0, r6, lsr #16 "); // r0=2nd operand exponent
1.1695 + asm("beq TRealXMultiply3 "); // if zero, return zero
1.1696 + asm("mov r6, #0 "); // clear r6 initially
1.1697 + asm("umlal r12, r6, r1, r5 "); // r6:r12:r7=m1.low*m2, r1 no longer needed
1.1698 + asm("add r0, r0, r3, lsr #16 "); // r0=sum of exponents
1.1699 + asm("tst r3, #1 ");
1.1700 + asm("mov r3, #0 "); // clear r3 initially
1.1701 + asm("umlal r6, r3, r2, r5 "); // r3:r6:r12:r7=m2.low*m1+m2.high*m1.high<<64
1.1702 + // r1,r5 no longer required
1.1703 + asm("orrne lr, lr, #1 "); // save sign in bottom bit of lr
1.1704 + asm("sub r0, r0, #0x7F00 ");
1.1705 + asm("sub r0, r0, #0x00FE "); // r0 now contains result exponent
1.1706 + asm("umull r1, r5, r2, r4 "); // r5:r1=m2.high*m1.low
1.1707 + asm("adds r12, r12, r1 "); // shift left by 32 and add to give final result
1.1708 + asm("adcs r1, r6, r5 ");
1.1709 + asm("adcs r2, r3, #0 "); // final result now in r2:r1:r12:r7
1.1710 + // set flags on final value of r2 (ms word of result)
1.1711 +
1.1712 + // normalise the result mantissa
1.1713 + asm("bmi TRealXMultiply4 "); // skip if already normalised
1.1714 + asm("adds r7, r7, r7 "); // else shift left (will only ever need one shift)
1.1715 + asm("adcs r12, r12, r12 ");
1.1716 + asm("adcs r1, r1, r1 ");
1.1717 + asm("adcs r2, r2, r2 ");
1.1718 + asm("sub r0, r0, #1 "); // and decrement exponent by one
1.1719 +
1.1720 + // round the result mantissa
1.1721 + asm("TRealXMultiply4: ");
1.1722 + asm("and r3, lr, #1 "); // result sign bit back into r3
1.1723 + asm("orrs r4, r7, r12 "); // check for exact result
1.1724 + asm("beq TRealXMultiply5 "); // skip if exact
1.1725 + asm("cmp r12, #0x80000000 "); // compare bottom 64 bits to 80000000 00000000
1.1726 + asm("cmpeq r7, #0 ");
1.1727 + asm("moveqs r4, r1, lsr #1 "); // if exactly equal, set carry=lsb of result
1.1728 + // so we round up if lsb=1
1.1729 + asm("orrcc r3, r3, #0x100 "); // if rounding down, set rounded-down flag
1.1730 + asm("orrcs r3, r3, #0x200 "); // if rounding up, set rounded-up flag
1.1731 + asm("adcs r1, r1, #0 "); // increment mantissa if necessary
1.1732 + asm("adcs r2, r2, #0 ");
1.1733 + asm("movcs r2, #0x80000000 "); // if carry, set mantissa to 80000000 00000000
1.1734 + asm("addcs r0, r0, #1 "); // and increment result exponent
1.1735 +
1.1736 + // check for overflow or underflow and assemble final result
1.1737 + asm("TRealXMultiply5: ");
1.1738 + asm("add r4, r0, #1 "); // need to add 1 to get usable threshold
1.1739 + asm("cmp r4, #0x10000 "); // check if exponent >= 0xFFFF
1.1740 + asm("bge TRealXMultiply6 "); // if so, overflow
1.1741 + asm("cmp r0, #0 "); // check for underflow
1.1742 + asm("orrgt r3, r3, r0, lsl #16 "); // if no underflow, result exponent into r3, ...
1.1743 + asm("movgt r12, #0 "); // ... return KErrNone ...
1.1744 + asm("bicgt pc, lr, #3 ");
1.1745 +
1.1746 + // underflow
1.1747 + asm("mvn r12, #9 "); // return KErrUnderflow
1.1748 + asm("bic pc, lr, #3 ");
1.1749 +
1.1750 + // overflow
1.1751 + asm("TRealXMultiply6: ");
1.1752 + asm("bic r3, r3, #0x0000FF00 "); // clear rounding flags
1.1753 + asm("orr r3, r3, #0xFF000000 "); // make exponent FFFF for infinity
1.1754 + asm("orr r3, r3, #0x00FF0000 ");
1.1755 + asm("mov r2, #0x80000000 "); // mantissa = 80000000 00000000
1.1756 + asm("mov r1, #0 ");
1.1757 + asm("mvn r12, #8 "); // return KErrOverflow
1.1758 + asm("bic pc, lr, #3 ");
1.1759 +
1.1760 + // come here if second operand zero
1.1761 + asm("TRealXMultiply3: ");
1.1762 + asm("mov r1, #0 ");
1.1763 + asm("mov r2, #0 ");
1.1764 + asm("and r3, r3, #1 "); // zero exponent, keep xor sign
1.1765 + asm("mov r12, #0 "); // return KErrNone
1.1766 + asm("bic pc, lr, #3 ");
1.1767 +
1.1768 + // First operand NaN or infinity
1.1769 + asm("TRealXMultiply1: ");
1.1770 + asm("cmp r2, #0x80000000 "); // check for infinity
1.1771 + asm("cmpeq r1, #0 ");
1.1772 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1773 + asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1.1774 + asm("bcs TRealXMultiply1a "); // branch if it is
1.1775 + asm("cmp r6, #0x10000 "); // else check if second operand zero
1.1776 + asm("mvncs r12, #8 "); // if not, return infinity and KErrOverflow
1.1777 + asm("biccs pc, lr, #3 ");
1.1778 + asm("b TRealXRealIndefinite "); // else return 'real indefinite'
1.1779 +
1.1780 + asm("TRealXMultiply1a: ");
1.1781 + asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
1.1782 + asm("cmpeq r4, #0 ");
1.1783 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1784 + asm("mvn r12, #8 "); // else (infinity), return KErrOverflow
1.1785 + asm("bic pc, lr, #3 ");
1.1786 +
1.1787 + // Second operand NaN or infinity, first operand finite
1.1788 + asm("TRealXMultiply2: ");
1.1789 + asm("cmp r5, #0x80000000 "); // check for infinity
1.1790 + asm("cmpeq r4, #0 ");
1.1791 + asm("bne TRealXBinOpNan "); // branch if NaN
1.1792 + asm("cmp r3, #0x10000 "); // if infinity, check if first operand zero
1.1793 + asm("bcc TRealXRealIndefinite "); // if it is, return 'real indefinite'
1.1794 + asm("orr r3, r3, #0xFF000000 "); // else return infinity with xor sign
1.1795 + asm("orr r3, r3, #0x00FF0000 ");
1.1796 + asm("mov r2, #0x80000000 ");
1.1797 + asm("mov r1, #0 ");
1.1798 + asm("mvn r12, #8 "); // return KErrOverflow
1.1799 + asm("bic pc, lr, #3 ");
1.1800 + }
1.1801 +
1.1802 +
1.1803 +
1.1804 +
1.1805 +__NAKED__ EXPORT_C TInt TRealX::DivEq(const TRealX& /*aVal*/)
1.1806 +/**
1.1807 +Divides this extended precision number by an extended precision value.
1.1808 +
1.1809 +@param aVal The extended precision value to be used as the divisor.
1.1810 +
1.1811 +@return KErrNone, if the operation is successful;
1.1812 + KErrOverflow, if the operation results in overflow;
1.1813 + KErrUnderflow, if the operation results in underflow;
1.1814 + KErrDivideByZero, if the divisor is zero.
1.1815 +*/
1.1816 + {
1.1817 + asm("stmfd sp!, {r0,r4-r9,lr} ");
1.1818 + asm("ldmia r1, {r4,r5,r6} ");
1.1819 + asm("ldmia r0, {r1,r2,r3} ");
1.1820 + asm("bl TRealXDivide ");
1.1821 + asm("ldmfd sp!, {r0,r4-r9,lr} ");
1.1822 + asm("stmia r0, {r1,r2,r3} ");
1.1823 + asm("mov r0, r12 ");
1.1824 + __JUMP(,lr);
1.1825 +
1.1826 + // TRealX division r1,r2,r3 / r4,r5,r6 result in r1,r2,r3
1.1827 + // Error code returned in r12
1.1828 + // Registers r0-r9,r12 modified
1.1829 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.1830 + asm("TRealXDivide: ");
1.1831 + asm("mov r12, #0 "); // initialise return value to KErrNone
1.1832 + asm("bic r3, r3, #0x300 "); // clear rounding flags
1.1833 + asm("tst r6, #1 ");
1.1834 + asm("eorne r3, r3, #1 "); // Exclusive-OR signs
1.1835 + asm("cmn r3, #0x10000 "); // check if dividend is NaN or infinity
1.1836 + asm("bcs TRealXDivide1 "); // branch if it is
1.1837 + asm("cmn r6, #0x10000 "); // check if divisor is NaN or infinity
1.1838 + asm("bcs TRealXDivide2 "); // branch if it is
1.1839 + asm("cmp r6, #0x10000 "); // check if divisor zero
1.1840 + asm("bcc TRealXDivide3 "); // branch if it is
1.1841 + asm("cmp r3, #0x10000 "); // check if dividend zero
1.1842 + __JUMP(cc,lr); // if zero, exit
1.1843 + asm("tst r3, #1 ");
1.1844 + asm("orrne lr, lr, #1 "); // save sign in bottom bit of lr
1.1845 +
1.1846 + // calculate result exponent
1.1847 + asm("mov r0, r3, lsr #16 "); // r0=dividend exponent
1.1848 + asm("sub r0, r0, r6, lsr #16 "); // r0=dividend exponent - divisor exponent
1.1849 + asm("add r0, r0, #0x7F00 ");
1.1850 + asm("add r0, r0, #0x00FF "); // r0 now contains result exponent
1.1851 + asm("mov r6, r1 "); // move dividend into r6,r7,r8
1.1852 + asm("mov r7, r2 ");
1.1853 + asm("mov r8, #0 "); // use r8 to hold extra bit shifted up
1.1854 + // r2:r1 will hold result mantissa
1.1855 + asm("mov r2, #1 "); // we will make sure first bit is 1
1.1856 + asm("cmp r7, r5 "); // compare dividend mantissa to divisor mantissa
1.1857 + asm("cmpeq r6, r4 ");
1.1858 + asm("bcs TRealXDivide4 "); // branch if dividend >= divisor
1.1859 + asm("adds r6, r6, r6 "); // else shift dividend left one
1.1860 + asm("adcs r7, r7, r7 "); // ignore carry here
1.1861 + asm("sub r0, r0, #1 "); // decrement result exponent by one
1.1862 + asm("TRealXDivide4: ");
1.1863 + asm("subs r6, r6, r4 "); // subtract divisor from dividend
1.1864 + asm("sbcs r7, r7, r5 ");
1.1865 +
1.1866 + // Main mantissa division code
1.1867 + // First calculate the top 32 bits of the result
1.1868 + // Top bit is 1, do 10 lots of 3 bits the one more bit
1.1869 + asm("mov r12, #10 ");
1.1870 + asm("TRealXDivide5: ");
1.1871 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1872 + asm("adcs r7, r7, r7 ");
1.1873 + asm("adcs r8, r8, r8 ");
1.1874 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1875 + asm("sbcs r3, r7, r5 ");
1.1876 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1877 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1878 + asm("movcs r7, r3 ");
1.1879 + asm("adcs r2, r2, r2 "); // shift in new result bit
1.1880 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1881 + asm("adcs r7, r7, r7 ");
1.1882 + asm("adcs r8, r8, r8 ");
1.1883 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1884 + asm("sbcs r3, r7, r5 ");
1.1885 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1886 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1887 + asm("movcs r7, r3 ");
1.1888 + asm("adcs r2, r2, r2 "); // shift in new result bit
1.1889 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1890 + asm("adcs r7, r7, r7 ");
1.1891 + asm("adcs r8, r8, r8 ");
1.1892 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1893 + asm("sbcs r3, r7, r5 ");
1.1894 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1895 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1896 + asm("movcs r7, r3 ");
1.1897 + asm("adcs r2, r2, r2 "); // shift in new result bit
1.1898 + asm("subs r12, r12, #1 ");
1.1899 + asm("bne TRealXDivide5 "); // iterate the loop
1.1900 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1901 + asm("adcs r7, r7, r7 ");
1.1902 + asm("adcs r8, r8, r8 ");
1.1903 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1904 + asm("sbcs r3, r7, r5 ");
1.1905 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1906 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1907 + asm("movcs r7, r3 ");
1.1908 + asm("adcs r2, r2, r2 "); // shift in new result bit - now have 32 bits
1.1909 +
1.1910 + // Now calculate the bottom 32 bits of the result
1.1911 + // Do 8 lots of 4 bits
1.1912 + asm("mov r12, #8 ");
1.1913 + asm("TRealXDivide5a: ");
1.1914 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1915 + asm("adcs r7, r7, r7 ");
1.1916 + asm("adcs r8, r8, r8 ");
1.1917 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1918 + asm("sbcs r3, r7, r5 ");
1.1919 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1920 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1921 + asm("movcs r7, r3 ");
1.1922 + asm("adcs r1, r1, r1 "); // shift in new result bit
1.1923 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1924 + asm("adcs r7, r7, r7 ");
1.1925 + asm("adcs r8, r8, r8 ");
1.1926 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1927 + asm("sbcs r3, r7, r5 ");
1.1928 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1929 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1930 + asm("movcs r7, r3 ");
1.1931 + asm("adcs r1, r1, r1 "); // shift in new result bit
1.1932 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1933 + asm("adcs r7, r7, r7 ");
1.1934 + asm("adcs r8, r8, r8 ");
1.1935 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1936 + asm("sbcs r3, r7, r5 ");
1.1937 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1938 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1939 + asm("movcs r7, r3 ");
1.1940 + asm("adcs r1, r1, r1 "); // shift in new result bit
1.1941 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1942 + asm("adcs r7, r7, r7 ");
1.1943 + asm("adcs r8, r8, r8 ");
1.1944 + asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1.1945 + asm("sbcs r3, r7, r5 ");
1.1946 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1947 + asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1.1948 + asm("movcs r7, r3 ");
1.1949 + asm("adcs r1, r1, r1 "); // shift in new result bit
1.1950 + asm("subs r12, r12, #1 ");
1.1951 + asm("bne TRealXDivide5a "); // iterate the loop
1.1952 +
1.1953 + // r2:r1 now contains a 64-bit normalised mantissa
1.1954 + // need to do rounding now
1.1955 + asm("and r3, lr, #1 "); // result sign back into r3
1.1956 + asm("orrs r9, r6, r7 "); // check if accumulator zero
1.1957 + asm("beq TRealXDivide6 "); // if it is, result is exact, else generate next bit
1.1958 + asm("adds r6, r6, r6 "); // shift accumulator left by one
1.1959 + asm("adcs r7, r7, r7 ");
1.1960 + asm("adcs r8, r8, r8 ");
1.1961 + asm("subs r6, r6, r4 "); // subtract divisor from accumulator
1.1962 + asm("sbcs r7, r7, r5 ");
1.1963 + asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1.1964 + asm("orrcc r3, r3, #0x100 "); // if borrow, round down and set round-down flag
1.1965 + asm("bcc TRealXDivide6 ");
1.1966 + asm("orrs r9, r6, r7 "); // if no borrow, check if exactly half-way
1.1967 + asm("moveqs r9, r1, lsr #1 "); // if exactly half-way, round to even
1.1968 + asm("orrcc r3, r3, #0x100 "); // if C=0, round result down and set round-down flag
1.1969 + asm("bcc TRealXDivide6 ");
1.1970 + asm("orr r3, r3, #0x200 "); // else set round-up flag
1.1971 + asm("adds r1, r1, #1 "); // and round mantissa up
1.1972 + asm("adcs r2, r2, #0 ");
1.1973 + asm("movcs r2, #0x80000000 "); // if carry, mantissa = 80000000 00000000
1.1974 + asm("addcs r0, r0, #1 "); // and increment exponent
1.1975 +
1.1976 + // check for overflow or underflow and assemble final result
1.1977 + asm("TRealXDivide6: ");
1.1978 + asm("add r4, r0, #1 "); // need to add 1 to get usable threshold
1.1979 + asm("cmp r4, #0x10000 "); // check if exponent >= 0xFFFF
1.1980 + asm("bge TRealXMultiply6 "); // if so, overflow
1.1981 + asm("cmp r0, #0 "); // check for underflow
1.1982 + asm("orrgt r3, r3, r0, lsl #16 "); // if no underflow, result exponent into r3, ...
1.1983 + asm("movgt r12, #0 "); // ... return KErrNone ...
1.1984 + asm("bicgt pc, lr, #3 ");
1.1985 +
1.1986 + // underflow
1.1987 + asm("and r3, r3, #1 "); // set exponent=0, keep sign
1.1988 + asm("mvn r12, #9 "); // return KErrUnderflow
1.1989 + asm("bic pc, lr, #3 ");
1.1990 +
1.1991 + // come here if divisor is zero, dividend finite
1.1992 + asm("TRealXDivide3: ");
1.1993 + asm("cmp r3, #0x10000 "); // check if dividend also zero
1.1994 + asm("bcc TRealXRealIndefinite "); // if so, return 'real indefinite'
1.1995 + asm("orr r3, r3, #0xFF000000 "); // else return infinity with xor sign
1.1996 + asm("orr r3, r3, #0x00FF0000 ");
1.1997 + asm("mov r2, #0x80000000 ");
1.1998 + asm("mov r1, #0 ");
1.1999 + asm("mvn r12, #40 "); // return KErrDivideByZero
1.2000 + asm("bic pc, lr, #3 ");
1.2001 +
1.2002 + // Dividend is NaN or infinity
1.2003 + asm("TRealXDivide1: ");
1.2004 + asm("cmp r2, #0x80000000 "); // check for infinity
1.2005 + asm("cmpeq r1, #0 ");
1.2006 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2007 + asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1.2008 + asm("mvncc r12, #8 "); // if not, return KErrOverflow
1.2009 + asm("biccc pc, lr, #3 ");
1.2010 +
1.2011 + // Dividend=infinity, divisor=NaN or infinity
1.2012 + asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
1.2013 + asm("cmpeq r4, #0 ");
1.2014 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2015 + asm("b TRealXRealIndefinite "); // else return 'real indefinite'
1.2016 +
1.2017 + // Divisor is NaN or infinity, dividend finite
1.2018 + asm("TRealXDivide2: ");
1.2019 + asm("cmp r5, #0x80000000 "); // check for infinity
1.2020 + asm("cmpeq r4, #0 ");
1.2021 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2022 + asm("and r3, r3, #1 "); // else return zero with xor sign
1.2023 + asm("bic pc, lr, #3 ");
1.2024 + }
1.2025 +
1.2026 +
1.2027 +
1.2028 +
1.2029 +__NAKED__ EXPORT_C TInt TRealX::ModEq(const TRealX& /*aVal*/)
1.2030 +/**
1.2031 +Modulo-divides this extended precision number by an extended precision value.
1.2032 +
1.2033 +@param aVal The extended precision value to be used as the divisor.
1.2034 +
1.2035 +@return KErrNone, if the operation is successful;
1.2036 + KErrTotalLossOfPrecision, if precision is lost;
1.2037 + KErrUnderflow, if the operation results in underflow.
1.2038 +*/
1.2039 + {
1.2040 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.2041 + asm("ldmia r1, {r4,r5,r6} ");
1.2042 + asm("ldmia r0, {r1,r2,r3} ");
1.2043 + asm("bl TRealXModulo ");
1.2044 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.2045 + asm("stmia r0, {r1,r2,r3} ");
1.2046 + asm("mov r0, r12 ");
1.2047 + __JUMP(,lr);
1.2048 +
1.2049 + // TRealX remainder r1,r2,r3 % r4,r5,r6 result in r1,r2,r3
1.2050 + // Error code returned in r12
1.2051 + // Registers r0-r7,r12 modified
1.2052 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.2053 + asm("TRealXModulo: ");
1.2054 + asm("mov r12, #0 "); // initialise return value to KErrNone
1.2055 + asm("cmn r3, #0x10000 "); // check if dividend is NaN or infinity
1.2056 + asm("bcs TRealXModulo1 "); // branch if it is
1.2057 + asm("cmn r6, #0x10000 "); // check if divisor is NaN or infinity
1.2058 + asm("bcs TRealXModulo2 "); // branch if it is
1.2059 + asm("cmp r6, #0x10000 "); // check if divisor zero
1.2060 + asm("bcc TRealXRealIndefinite "); // if it is, return 'real indefinite'
1.2061 + asm("mov r0, r3, lsr #16 "); // r0=dividend exponent
1.2062 + asm("subs r0, r0, r6, lsr #16 "); // r0=dividend exponent-divisor exponent
1.2063 + __JUMP(lt,lr);
1.2064 + asm("cmp r0, #64 "); // check if difference >= 64 bits
1.2065 + asm("bcs TRealXModuloLp "); // if so, underflow
1.2066 + asm("b TRealXModulo4 "); // skip left shift on first iteration
1.2067 +
1.2068 + asm("TRealXModulo3: ");
1.2069 + asm("adds r1, r1, r1 "); // shift dividend mantissa left one bit
1.2070 + asm("adcs r2, r2, r2 ");
1.2071 + asm("bcs TRealXModulo5 "); // if one shifted out, override comparison
1.2072 + asm("TRealXModulo4: ");
1.2073 + asm("cmp r2, r5 "); // compare dividend to divisor
1.2074 + asm("cmpeq r1, r4 ");
1.2075 + asm("bcc TRealXModulo6 "); // if dividend<divisor, skip
1.2076 + asm("TRealXModulo5: ");
1.2077 + asm("subs r1, r1, r4 "); // if dividend>=divisor, dividend-=divisor
1.2078 + asm("sbcs r2, r2, r5 ");
1.2079 + asm("TRealXModulo6: ");
1.2080 + asm("subs r0, r0, #1 "); // decrement loop count
1.2081 + asm("bpl TRealXModulo3 "); // if more bits to do, loop
1.2082 +
1.2083 + asm("orrs r0, r1, r2 "); // test for exact zero result
1.2084 + asm("andeq r3, r3, #1 "); // if so, return zero with same sign as dividend
1.2085 + __JUMP(eq,lr);
1.2086 + asm("and r7, r3, #1 "); // dividend sign bit into r7
1.2087 + asm("mov r3, r6, lsr #16 "); // r3 lower 16 bits=result exponent=divisor exponent
1.2088 + asm("cmp r2, #0 "); // test if upper 32 bits zero
1.2089 + asm("moveq r2, r1 "); // if so, shift left by 32
1.2090 + asm("moveq r1, #0 ");
1.2091 + asm("subeqs r3, r3, #32 "); // and subtract 32 from exponent
1.2092 + asm("bls TRealXModuloUnderflow "); // if borrow from exponent or exponent 0, underflow
1.2093 + asm("mov r0, #32 "); // r0 will hold 32-number of shifts to normalise
1.2094 + asm("cmp r2, #0x00010000 "); // normalise
1.2095 + asm("movcc r2, r2, lsl #16 ");
1.2096 + asm("subcc r0, r0, #16 ");
1.2097 + asm("cmp r2, #0x01000000 ");
1.2098 + asm("movcc r2, r2, lsl #8 ");
1.2099 + asm("subcc r0, r0, #8 ");
1.2100 + asm("cmp r2, #0x10000000 ");
1.2101 + asm("movcc r2, r2, lsl #4 ");
1.2102 + asm("subcc r0, r0, #4 ");
1.2103 + asm("cmp r2, #0x40000000 ");
1.2104 + asm("movcc r2, r2, lsl #2 ");
1.2105 + asm("subcc r0, r0, #2 ");
1.2106 + asm("cmp r2, #0x80000000 ");
1.2107 + asm("movcc r2, r2, lsl #1 "); // top bit of r2 is now set
1.2108 + asm("subcc r0, r0, #1 ");
1.2109 + asm("orr r2, r2, r1, lsr r0 "); // top bits of r1 into bottom bits of r2
1.2110 + asm("rsb r0, r0, #32 "); // r0=number of shifts to normalise
1.2111 + asm("mov r1, r1, lsl r0 "); // shift r1 left - mantissa now normalised
1.2112 + asm("subs r3, r3, r0 "); // subtract r0 from exponent
1.2113 + asm("bls TRealXModuloUnderflow "); // if borrow from exponent or exponent 0, underflow
1.2114 + asm("orr r3, r7, r3, lsl #16 "); // else r3=result exponent and sign
1.2115 + __JUMP(,lr);
1.2116 +
1.2117 + // dividend=NaN or infinity
1.2118 + asm("TRealXModulo1: ");
1.2119 + asm("cmp r2, #0x80000000 "); // check for infinity
1.2120 + asm("cmpeq r1, #0 ");
1.2121 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2122 + asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1.2123 + asm("bcc TRealXRealIndefinite "); // infinity%finite - return 'real indefinite'
1.2124 + asm("cmp r5, #0x80000000 "); // check if divisor=infinity
1.2125 + asm("cmpeq r4, #0 ");
1.2126 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2127 + asm("b TRealXRealIndefinite "); // else infinity%infinity - return 'real indefinite'
1.2128 +
1.2129 + // divisor=NaN or infinity, dividend finite
1.2130 + asm("TRealXModulo2: ");
1.2131 + asm("cmp r5, #0x80000000 "); // check for infinity
1.2132 + asm("cmpeq r4, #0 ");
1.2133 + asm("bne TRealXBinOpNan "); // branch if NaN
1.2134 + __JUMP(,lr);
1.2135 +
1.2136 + asm("TRealXModuloLp: ");
1.2137 + asm("mvn r12, #%a0" : : "i" ((TInt)~KErrTotalLossOfPrecision));
1.2138 + asm("mov r1, #0 ");
1.2139 + asm("mov r2, #0 ");
1.2140 + asm("and r3, r3, #1 ");
1.2141 + __JUMP(,lr);
1.2142 +
1.2143 + asm("TRealXModuloUnderflow: ");
1.2144 + asm("mvn r12, #%a0" : : "i" ((TInt)~KErrUnderflow));
1.2145 + asm("mov r1, #0 ");
1.2146 + asm("mov r2, #0 ");
1.2147 + asm("and r3, r3, #1 ");
1.2148 + __JUMP(,lr);
1.2149 + }
1.2150 +
1.2151 +
1.2152 +
1.2153 +
1.2154 +__NAKED__ EXPORT_C TInt TRealX::Add(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
1.2155 +/**
1.2156 +Adds an extended precision value to this extended precision number.
1.2157 +
1.2158 +@param aResult On return, a reference to an extended precision object
1.2159 + containing the result of the operation.
1.2160 +@param aVal The extended precision value to be added.
1.2161 +
1.2162 +@return KErrNone, if the operation is successful;
1.2163 + KErrOverflow, if the operation results in overflow;
1.2164 + KErrUnderflow, if the operation results in underflow.
1.2165 +*/
1.2166 + {
1.2167 + // r0=this, r1=&aResult, r2=&aVal
1.2168 + asm("stmfd sp!, {r1,r4-r8,lr} ");
1.2169 + asm("ldmia r2, {r4,r5,r6} ");
1.2170 + asm("ldmia r0, {r1,r2,r3} ");
1.2171 + asm("bl TRealXAdd ");
1.2172 + asm("ldmfd sp!, {lr} "); // lr=&aResult
1.2173 + asm("stmia lr, {r1,r2,r3} ");
1.2174 + asm("mov r0, r12 "); // return value into r0
1.2175 + __POPRET("r4-r8,");
1.2176 + }
1.2177 +
1.2178 +
1.2179 +
1.2180 +
1.2181 +__NAKED__ EXPORT_C TInt TRealX::Sub(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
1.2182 +/**
1.2183 +Subtracts an extended precision value from this extended precision number.
1.2184 +
1.2185 +@param aResult On return, a reference to an extended precision object
1.2186 + containing the result of the operation.
1.2187 +@param aVal The extended precision value to be subtracted.
1.2188 +
1.2189 +@return KErrNone, if the operation is successful;
1.2190 + KErrOverflow, if the operation results in overflow;
1.2191 + KErrUnderflow, if the operation results in underflow.
1.2192 +*/
1.2193 + {
1.2194 + // r0=this, r1=&aResult, r2=&aVal
1.2195 + asm("stmfd sp!, {r1,r4-r8,lr} ");
1.2196 + asm("ldmia r2, {r4,r5,r6} ");
1.2197 + asm("ldmia r0, {r1,r2,r3} ");
1.2198 + asm("bl TRealXSubtract ");
1.2199 + asm("ldmfd sp!, {lr} "); // lr=&aResult
1.2200 + asm("stmia lr, {r1,r2,r3} ");
1.2201 + asm("mov r0, r12 "); // return value into r0
1.2202 + __POPRET("r4-r8,");
1.2203 + }
1.2204 +
1.2205 +
1.2206 +
1.2207 +
1.2208 +__NAKED__ EXPORT_C TInt TRealX::Mult(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
1.2209 +/**
1.2210 +Multiplies this extended precision number by an extended precision value.
1.2211 +
1.2212 +@param aResult On return, a reference to an extended precision object
1.2213 + containing the result of the operation.
1.2214 +@param aVal The extended precision value to be used as the multiplier.
1.2215 +
1.2216 +@return KErrNone, if the operation is successful;
1.2217 + KErrOverflow, if the operation results in overflow;
1.2218 + KErrUnderflow, if the operation results in underflow.
1.2219 +*/
1.2220 + {
1.2221 + // r0=this, r1=&aResult, r2=&aVal
1.2222 + asm("stmfd sp!, {r1,r4-r7,lr} ");
1.2223 + asm("ldmia r2, {r4,r5,r6} ");
1.2224 + asm("ldmia r0, {r1,r2,r3} ");
1.2225 + asm("bl TRealXMultiply ");
1.2226 + asm("ldmfd sp!, {lr} "); // lr=&aResult
1.2227 + asm("stmia lr, {r1,r2,r3} ");
1.2228 + asm("mov r0, r12 "); // return value into r0
1.2229 + __POPRET("r4-r7,");
1.2230 + }
1.2231 +
1.2232 +
1.2233 +
1.2234 +
1.2235 +__NAKED__ EXPORT_C TInt TRealX::Div(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
1.2236 +/**
1.2237 +Divides this extended precision number by an extended precision value.
1.2238 +
1.2239 +@param aResult On return, a reference to an extended precision object
1.2240 + containing the result of the operation.
1.2241 +@param aVal The extended precision value to be used as the divisor.
1.2242 +
1.2243 +@return KErrNone, if the operation is successful;
1.2244 + KErrOverflow, if the operation results in overflow;
1.2245 + KErrUnderflow, if the operation results in underflow;
1.2246 + KErrDivideByZero, if the divisor is zero.
1.2247 +*/
1.2248 + {
1.2249 + // r0=this, r1=&aResult, r2=&aVal
1.2250 + asm("stmfd sp!, {r1,r4-r9,lr} ");
1.2251 + asm("ldmia r2, {r4,r5,r6} ");
1.2252 + asm("ldmia r0, {r1,r2,r3} ");
1.2253 + asm("bl TRealXDivide ");
1.2254 + asm("ldmfd sp!, {lr} "); // lr=&aResult
1.2255 + asm("stmia lr, {r1,r2,r3} ");
1.2256 + asm("mov r0, r12 "); // return value into r0
1.2257 + __POPRET("r4-r9,");
1.2258 + }
1.2259 +
1.2260 +
1.2261 +
1.2262 +
1.2263 +__NAKED__ EXPORT_C TInt TRealX::Mod(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
1.2264 +/**
1.2265 +Modulo-divides this extended precision number by an extended precision value.
1.2266 +
1.2267 +@param aResult On return, a reference to an extended precision object
1.2268 + containing the result of the operation.
1.2269 +
1.2270 +@param aVal The extended precision value to be used as the divisor.
1.2271 +
1.2272 +@return KErrNone, if the operation is successful;
1.2273 + KErrTotalLossOfPrecision, if precision is lost;
1.2274 + KErrUnderflow, if the operation results in underflow.
1.2275 +*/
1.2276 + {
1.2277 + // r0=this, r1=&aResult, r2=&aVal
1.2278 + asm("stmfd sp!, {r1,r4-r7,lr} ");
1.2279 + asm("ldmia r2, {r4,r5,r6} ");
1.2280 + asm("ldmia r0, {r1,r2,r3} ");
1.2281 + asm("bl TRealXModulo ");
1.2282 + asm("ldmfd sp!, {lr} "); // lr=&aResult
1.2283 + asm("stmia lr, {r1,r2,r3} ");
1.2284 + asm("mov r0, r12 "); // return value into r0
1.2285 + __POPRET("r4-r7,");
1.2286 + }
1.2287 +
1.2288 +extern void PanicOverUnderflowDividebyZero(const TInt aErr);
1.2289 +
1.2290 +
1.2291 +
1.2292 +
1.2293 +__NAKED__ EXPORT_C const TRealX& TRealX::operator+=(const TRealX& /*aVal*/)
1.2294 +/**
1.2295 +Adds an extended precision value to this extended precision number.
1.2296 +
1.2297 +@param aVal The extended precision value to be added.
1.2298 +
1.2299 +@return A reference to this object.
1.2300 +
1.2301 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2302 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2303 +*/
1.2304 + {
1.2305 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2306 + asm("ldmia r1, {r4,r5,r6} ");
1.2307 + asm("ldmia r0, {r1,r2,r3} ");
1.2308 + asm("bl TRealXAdd ");
1.2309 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2310 + asm("stmia r0, {r1,r2,r3} ");
1.2311 + asm("cmp r12, #0 "); // check the error code
1.2312 + __JUMP(eq,lr);
1.2313 + asm("mov r0, r12 ");
1.2314 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2315 + }
1.2316 +
1.2317 +
1.2318 +
1.2319 +
1.2320 +__NAKED__ EXPORT_C const TRealX& TRealX::operator-=(const TRealX& /*aVal*/)
1.2321 +/**
1.2322 +Subtracts an extended precision value from this extended precision number.
1.2323 +
1.2324 +@param aVal The extended precision value to be subtracted.
1.2325 +
1.2326 +@return A reference to this object.
1.2327 +
1.2328 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2329 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2330 +*/
1.2331 + {
1.2332 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2333 + asm("ldmia r1, {r4,r5,r6} ");
1.2334 + asm("ldmia r0, {r1,r2,r3} ");
1.2335 + asm("bl TRealXSubtract ");
1.2336 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2337 + asm("stmia r0, {r1,r2,r3} ");
1.2338 + asm("cmp r12, #0 "); // check the error code
1.2339 + __JUMP(eq,lr);
1.2340 + asm("mov r0, r12 ");
1.2341 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2342 + }
1.2343 +
1.2344 +
1.2345 +
1.2346 +
1.2347 +__NAKED__ EXPORT_C const TRealX& TRealX::operator*=(const TRealX& /*aVal*/)
1.2348 +/**
1.2349 +Multiplies this extended precision number by an extended precision value.
1.2350 +
1.2351 +@param aVal The extended precision value to be subtracted.
1.2352 +
1.2353 +@return A reference to this object.
1.2354 +
1.2355 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2356 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2357 +*/
1.2358 + {
1.2359 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.2360 + asm("ldmia r1, {r4,r5,r6} ");
1.2361 + asm("ldmia r0, {r1,r2,r3} ");
1.2362 + asm("bl TRealXMultiply ");
1.2363 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.2364 + asm("stmia r0, {r1,r2,r3} ");
1.2365 + asm("cmp r12, #0 "); // check the error code
1.2366 + __JUMP(eq,lr);
1.2367 + asm("mov r0, r12 ");
1.2368 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2369 + }
1.2370 +
1.2371 +
1.2372 +
1.2373 +
1.2374 +__NAKED__ EXPORT_C const TRealX& TRealX::operator/=(const TRealX& /*aVal*/)
1.2375 +/**
1.2376 +Divides this extended precision number by an extended precision value.
1.2377 +
1.2378 +@param aVal The extended precision value to be used as the divisor.
1.2379 +
1.2380 +@return A reference to this object.
1.2381 +
1.2382 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2383 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2384 +@panic MATHX KErrDivideByZero if the divisor is zero.
1.2385 +*/
1.2386 + {
1.2387 + asm("stmfd sp!, {r0,r4-r9,lr} ");
1.2388 + asm("ldmia r1, {r4,r5,r6} ");
1.2389 + asm("ldmia r0, {r1,r2,r3} ");
1.2390 + asm("bl TRealXDivide ");
1.2391 + asm("ldmfd sp!, {r0,r4-r9,lr} ");
1.2392 + asm("stmia r0, {r1,r2,r3} ");
1.2393 + asm("cmp r12, #0 "); // check the error code
1.2394 + __JUMP(eq,lr);
1.2395 + asm("mov r0, r12 ");
1.2396 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2397 + }
1.2398 +
1.2399 +
1.2400 +
1.2401 +
1.2402 +__NAKED__ EXPORT_C const TRealX& TRealX::operator%=(const TRealX& /*aVal*/)
1.2403 +/**
1.2404 +Modulo-divides this extended precision number by an extended precision value.
1.2405 +
1.2406 +@param aVal The extended precision value to be used as the divisor.
1.2407 +
1.2408 +@return A reference to this object.
1.2409 +
1.2410 +@panic MATHX KErrTotalLossOfPrecision panic if precision is lost.
1.2411 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2412 +*/
1.2413 + {
1.2414 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.2415 + asm("ldmia r1, {r4,r5,r6} ");
1.2416 + asm("ldmia r0, {r1,r2,r3} ");
1.2417 + asm("bl TRealXModulo ");
1.2418 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.2419 + asm("stmia r0, {r1,r2,r3} ");
1.2420 + asm("cmp r12, #0 "); // check the error code
1.2421 + asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
1.2422 + __JUMP(eq,lr);
1.2423 + asm("mov r0, r12 ");
1.2424 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2425 + }
1.2426 +
1.2427 +
1.2428 +
1.2429 +
1.2430 +__NAKED__ EXPORT_C TRealX& TRealX::operator++()
1.2431 +/**
1.2432 +Increments this extended precision number by one,
1.2433 +and then returns a reference to it.
1.2434 +
1.2435 +This is also referred to as a prefix operator.
1.2436 +
1.2437 +@return A reference to this object.
1.2438 +
1.2439 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2440 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2441 +*/
1.2442 + {
1.2443 + // pre-increment
1.2444 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2445 + asm("ldmia r0, {r1,r2,r3} ");
1.2446 + asm("add r4, pc, #__TRealXOne-.-8 ");
1.2447 + asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
1.2448 + asm("bl TRealXAdd ");
1.2449 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2450 + asm("stmia r0, {r1,r2,r3} ");
1.2451 + asm("cmp r12, #0 "); // check the error code
1.2452 + __JUMP(eq,lr);
1.2453 + asm("mov r0, r12 ");
1.2454 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2455 +
1.2456 + asm("__TRealXOne: ");
1.2457 + asm(".word 0x00000000 ");
1.2458 + asm(".word 0x80000000 ");
1.2459 + asm(".word 0x7FFF0000 ");
1.2460 + }
1.2461 +
1.2462 +
1.2463 +
1.2464 +
1.2465 +__NAKED__ EXPORT_C TRealX TRealX::operator++(TInt)
1.2466 +/**
1.2467 +Returns this extended precision number before incrementing it by one.
1.2468 +
1.2469 +This is also referred to as a postfix operator.
1.2470 +
1.2471 +@return A reference to this object.
1.2472 +
1.2473 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2474 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2475 +*/
1.2476 + {
1.2477 + // post-increment
1.2478 + // r0=address of return value, r1=this
1.2479 + asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
1.2480 + asm("ldmia r1, {r1,r2,r3} ");
1.2481 + asm("stmia r0, {r1,r2,r3} "); // store old value
1.2482 + asm("add r4, pc, #__TRealXOne-.-8 ");
1.2483 + asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
1.2484 + asm("bl TRealXAdd ");
1.2485 + asm("ldmfd sp!, {r0,lr} "); // restore r0, lr=this
1.2486 + asm("stmia lr, {r1,r2,r3} "); // store incremented value
1.2487 + asm("ldmfd sp!, {r4-r8,lr} ");
1.2488 + asm("cmp r12, #0 "); // check the error code
1.2489 + __JUMP(eq,lr);
1.2490 + asm("mov r0, r12 ");
1.2491 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2492 + }
1.2493 +
1.2494 +
1.2495 +
1.2496 +
1.2497 +__NAKED__ EXPORT_C TRealX& TRealX::operator--()
1.2498 +/**
1.2499 +Decrements this extended precision number by one,
1.2500 +and then returns a reference to it.
1.2501 +
1.2502 +This is also referred to as a prefix operator.
1.2503 +
1.2504 +@return A reference to this object.
1.2505 +
1.2506 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2507 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2508 +*/
1.2509 + {
1.2510 + // pre-decrement
1.2511 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2512 + asm("ldmia r0, {r1,r2,r3} ");
1.2513 + asm("add r4, pc, #__TRealXOne-.-8 ");
1.2514 + asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
1.2515 + asm("bl TRealXSubtract ");
1.2516 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2517 + asm("stmia r0, {r1,r2,r3} ");
1.2518 + asm("cmp r12, #0 "); // check the error code
1.2519 + __JUMP(eq,lr);
1.2520 + asm("mov r0, r12 ");
1.2521 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2522 + }
1.2523 +
1.2524 +
1.2525 +
1.2526 +
1.2527 +__NAKED__ EXPORT_C TRealX TRealX::operator--(TInt)
1.2528 +/**
1.2529 +Returns this extended precision number before decrementing it by one.
1.2530 +
1.2531 +This is also referred to as a postfix operator.
1.2532 +
1.2533 +@return A reference to this object.
1.2534 +
1.2535 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2536 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2537 +*/
1.2538 + {
1.2539 + // post-decrement
1.2540 + // r0=address of return value, r1=this
1.2541 + asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
1.2542 + asm("ldmia r1, {r1,r2,r3} ");
1.2543 + asm("stmia r0, {r1,r2,r3} "); // store old value
1.2544 + asm("add r4, pc, #__TRealXOne-.-8 ");
1.2545 + asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
1.2546 + asm("bl TRealXSubtract ");
1.2547 + asm("ldmfd sp!, {r0,lr} "); // restore r0, lr=this
1.2548 + asm("stmia lr, {r1,r2,r3} "); // store decremented value
1.2549 + asm("ldmfd sp!, {r4-r8,lr} ");
1.2550 + asm("cmp r12, #0 "); // check the error code
1.2551 + __JUMP(eq,lr);
1.2552 + asm("mov r0, r12 ");
1.2553 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2554 + }
1.2555 +
1.2556 +
1.2557 +
1.2558 +
1.2559 +__NAKED__ EXPORT_C TRealX TRealX::operator+(const TRealX& /*aVal*/) const
1.2560 +/**
1.2561 +Adds an extended precision value to this extended precision number.
1.2562 +
1.2563 +@param aVal The extended precision value to be added.
1.2564 +
1.2565 +@return An extended precision object containing the result.
1.2566 +
1.2567 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2568 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2569 +*/
1.2570 + {
1.2571 + // r0=address of return value, r1=this, r2=&aVal
1.2572 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2573 + asm("ldmia r2, {r4,r5,r6} ");
1.2574 + asm("ldmia r1, {r1,r2,r3} ");
1.2575 + asm("bl TRealXAdd ");
1.2576 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2577 + asm("stmia r0, {r1,r2,r3} ");
1.2578 + asm("cmp r12, #0 "); // check the error code
1.2579 + __JUMP(eq,lr);
1.2580 + asm("mov r0, r12 ");
1.2581 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2582 + }
1.2583 +
1.2584 +
1.2585 +
1.2586 +
1.2587 +__NAKED__ EXPORT_C TRealX TRealX::operator-(const TRealX& /*aVal*/) const
1.2588 +/**
1.2589 +Subtracts an extended precision value from this extended precision number.
1.2590 +
1.2591 +@param aVal The extended precision value to be subtracted.
1.2592 +
1.2593 +@return An extended precision object containing the result.
1.2594 +
1.2595 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2596 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2597 +*/
1.2598 + {
1.2599 + // r0=address of return value, r1=this, r2=&aVal
1.2600 + asm("stmfd sp!, {r0,r4-r8,lr} ");
1.2601 + asm("ldmia r2, {r4,r5,r6} ");
1.2602 + asm("ldmia r1, {r1,r2,r3} ");
1.2603 + asm("bl TRealXSubtract ");
1.2604 + asm("ldmfd sp!, {r0,r4-r8,lr} ");
1.2605 + asm("stmia r0, {r1,r2,r3} ");
1.2606 + asm("cmp r12, #0 "); // check the error code
1.2607 + __JUMP(eq,lr);
1.2608 + asm("mov r0, r12 ");
1.2609 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2610 + }
1.2611 +
1.2612 +
1.2613 +
1.2614 +
1.2615 +__NAKED__ EXPORT_C TRealX TRealX::operator*(const TRealX& /*aVal*/) const
1.2616 +/**
1.2617 +Multiplies this extended precision number by an extended precision value.
1.2618 +
1.2619 +@param aVal The extended precision value to be used as the multiplier.
1.2620 +
1.2621 +@return An extended precision object containing the result.
1.2622 +
1.2623 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2624 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2625 +*/
1.2626 + {
1.2627 + // r0=address of return value, r1=this, r2=&aVal
1.2628 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.2629 + asm("ldmia r2, {r4,r5,r6} ");
1.2630 + asm("ldmia r1, {r1,r2,r3} ");
1.2631 + asm("bl TRealXMultiply ");
1.2632 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.2633 + asm("stmia r0, {r1,r2,r3} ");
1.2634 + asm("cmp r12, #0 "); // check the error code
1.2635 + __JUMP(eq,lr);
1.2636 + asm("mov r0, r12 ");
1.2637 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2638 + }
1.2639 +
1.2640 +
1.2641 +
1.2642 +
1.2643 +__NAKED__ EXPORT_C TRealX TRealX::operator/(const TRealX& /*aVal*/) const
1.2644 +/**
1.2645 +Divides this extended precision number by an extended precision value.
1.2646 +
1.2647 +@param aVal The extended precision value to be used as the divisor.
1.2648 +
1.2649 +@return An extended precision object containing the result.
1.2650 +
1.2651 +@panic MATHX KErrOverflow if the operation results in overflow.
1.2652 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2653 +@panic MATHX KErrDivideByZero if the divisor is zero.
1.2654 +*/
1.2655 + {
1.2656 + // r0=address of return value, r1=this, r2=&aVal
1.2657 + asm("stmfd sp!, {r0,r4-r9,lr} ");
1.2658 + asm("ldmia r2, {r4,r5,r6} ");
1.2659 + asm("ldmia r1, {r1,r2,r3} ");
1.2660 + asm("bl TRealXDivide ");
1.2661 + asm("ldmfd sp!, {r0,r4-r9,lr} ");
1.2662 + asm("stmia r0, {r1,r2,r3} ");
1.2663 + asm("cmp r12, #0 "); // check the error code
1.2664 + __JUMP(eq,lr);
1.2665 + asm("mov r0, r12 ");
1.2666 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2667 + }
1.2668 +
1.2669 +
1.2670 +
1.2671 +
1.2672 +__NAKED__ EXPORT_C TRealX TRealX::operator%(const TRealX& /*aVal*/) const
1.2673 +/**
1.2674 +Modulo-divides this extended precision number by an extended precision value.
1.2675 +
1.2676 +@param aVal The extended precision value to be used as the divisor.
1.2677 +
1.2678 +@return An extended precision object containing the result.
1.2679 +
1.2680 +@panic MATHX KErrTotalLossOfPrecision if precision is lost.
1.2681 +@panic MATHX KErrUnderflow if the operation results in underflow.
1.2682 +*/
1.2683 + {
1.2684 + // r0=address of return value, r1=this, r2=&aVal
1.2685 + asm("stmfd sp!, {r0,r4-r7,lr} ");
1.2686 + asm("ldmia r2, {r4,r5,r6} ");
1.2687 + asm("ldmia r1, {r1,r2,r3} ");
1.2688 + asm("bl TRealXModulo ");
1.2689 + asm("ldmfd sp!, {r0,r4-r7,lr} ");
1.2690 + asm("stmia r0, {r1,r2,r3} ");
1.2691 + asm("cmp r12, #0 "); // check the error code
1.2692 + asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
1.2693 + __JUMP(eq,lr);
1.2694 + asm("mov r0, r12 ");
1.2695 + asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
1.2696 + }
1.2697 +
1.2698 +
1.2699 +
1.2700 +
1.2701 +#ifdef __REALS_MACHINE_CODED__
1.2702 +__NAKED__ EXPORT_C TInt Math::Sqrt( TReal &/*aDest*/, const TReal &/*aSrc*/ )
1.2703 +/**
1.2704 +Calculates the square root of a number.
1.2705 +
1.2706 +@param aDest A reference containing the result.
1.2707 +@param aSrc The number whose square-root is required.
1.2708 +
1.2709 +@return KErrNone if successful, otherwise another of
1.2710 + the system-wide error codes.
1.2711 +*/
1.2712 + {
1.2713 + // r0=address of aDest, r1=address of aSrc
1.2714 +
1.2715 +
1.2716 +#ifdef __USE_VFP_MATH
1.2717 + VFP_FLDD(CC_AL,0,1,0);
1.2718 + VFP_FSQRTD(,0,0);
1.2719 + VFP_FMRRD(CC_AL,3,2,0);
1.2720 + asm("bic r1, r2, #0x80000000 "); // remove sign bit
1.2721 + asm("cmn r1, #0x00100000 "); // check if exp=7FF
1.2722 + asm("movpl r1, #0 "); // if not return KErrNone
1.2723 + asm("bpl donesqrt ");
1.2724 + asm("movs r1, r1, lsl #12 "); // if exp=7FF, check mantissa
1.2725 + asm("cmpeq r3, #0 ");
1.2726 + asm("moveq r1, #-9 "); // if exp=7FF, mant=0, return KErrOverflow
1.2727 + asm("mvnne r2, #0x80000000 "); // else set NaN
1.2728 + asm("mvnne r3, #0 ");
1.2729 + asm("movne r1, #-6 "); // and return KErrArgument
1.2730 + asm("donesqrt: ");
1.2731 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.2732 + asm("stmia r0, {r2,r3} "); // store the result
1.2733 +#else
1.2734 + asm("str r2, [r0, #4] ");
1.2735 + asm("str r3, [r0, #0] ");
1.2736 +#endif
1.2737 + asm("mov r0, r1 ");
1.2738 + __JUMP(,lr);
1.2739 +#else // __USE_VFP_MATH
1.2740 + asm("stmfd sp!, {r4-r10,lr} ");
1.2741 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.2742 + asm("ldmia r1, {r3,r4} "); // low mant into r4, sign:exp:high mant into r3
1.2743 +#else
1.2744 + asm("ldr r3, [r1, #4] ");
1.2745 + asm("ldr r4, [r1, #0] ");
1.2746 +#endif
1.2747 + asm("bic r5, r3, #0xFF000000 ");
1.2748 + asm("bic r5, r5, #0x00F00000 "); // high word of mantissa into r5
1.2749 + asm("mov r2, r3, lsr #20 ");
1.2750 + asm("bics r2, r2, #0x800 "); // exponent now in r2
1.2751 + asm("beq fastsqrt1 "); // branch if exponent zero (zero or denormal)
1.2752 + asm("mov r6, #0xFF ");
1.2753 + asm("orr r6, r6, #0x700 ");
1.2754 + asm("cmp r2, r6 "); // check for infinity or NaN
1.2755 + asm("beq fastsqrt2 "); // branch if infinity or NaN
1.2756 + asm("movs r3, r3 "); // test sign
1.2757 + asm("bmi fastsqrtn "); // branch if negative
1.2758 + asm("sub r2, r2, #0xFF "); // unbias the exponent
1.2759 + asm("sub r2, r2, #0x300 "); //
1.2760 + asm("fastsqrtd1: ");
1.2761 + asm("mov r1, #0x40000000 "); // value for comparison
1.2762 + asm("mov r3, #27 "); // loop counter (number of bits/2)
1.2763 + asm("movs r2, r2, asr #1 "); // divide exponent by 2, LSB into CF
1.2764 + asm("movcs r7, r5, lsl #11 "); // mantissa into r6,r7 with MSB in MSB of r7
1.2765 + asm("orrcs r7, r7, r4, lsr #21 ");
1.2766 + asm("movcs r6, r4, lsl #11 ");
1.2767 + asm("movcs r4, #0 "); // r4, r5 will hold result mantissa
1.2768 + asm("orrcs r7, r7, #0x80000000 "); // if exponent odd, restore MSB of mantissa
1.2769 + asm("movcc r7, r5, lsl #12 "); // mantissa into r6,r7 with MSB in MSB of r7
1.2770 + asm("orrcc r7, r7, r4, lsr #20 "); // if exponent even, shift mantissa left an extra
1.2771 + asm("movcc r6, r4, lsl #12 "); // place, lose top bit, and
1.2772 + asm("movcc r4, #1 "); // set MSB of result, and
1.2773 + asm("mov r5, #0 "); // r4, r5 will hold result mantissa
1.2774 + asm("mov r8, #0 "); // r8, r9 will be comparison accumulator
1.2775 + asm("mov r9, #0 ");
1.2776 + asm("bcc fastsqrt4 "); // if exponent even, calculate one less bit
1.2777 + // as result MSB already known
1.2778 +
1.2779 + // Main mantissa square-root loop
1.2780 + asm("fastsqrt3: "); // START OF MAIN LOOP
1.2781 + asm("subs r10, r7, r1 "); // subtract result:01 from acc:mant
1.2782 + asm("sbcs r12, r8, r4 "); // result into r14:r12:r10
1.2783 + asm("sbcs r14, r9, r5 ");
1.2784 + asm("movcs r7, r10 "); // if no borrow replace accumulator with result
1.2785 + asm("movcs r8, r12 ");
1.2786 + asm("movcs r9, r14 ");
1.2787 + asm("adcs r4, r4, r4 "); // shift result left one, putting in next bit
1.2788 + asm("adcs r5, r5, r5 ");
1.2789 + asm("mov r9, r9, lsl #2 "); // shift acc:mant left by 2 bits
1.2790 + asm("orr r9, r9, r8, lsr #30 ");
1.2791 + asm("mov r8, r8, lsl #2 ");
1.2792 + asm("orr r8, r8, r7, lsr #30 ");
1.2793 + asm("mov r7, r7, lsl #2 ");
1.2794 + asm("orr r7, r7, r6, lsr #30 ");
1.2795 + asm("mov r6, r6, lsl #2 ");
1.2796 + asm("fastsqrt4: "); // Come in here if we need to do one less iteration
1.2797 + asm("subs r10, r7, r1 "); // subtract result:01 from acc:mant
1.2798 + asm("sbcs r12, r8, r4 "); // result into r14:r12:r10
1.2799 + asm("sbcs r14, r9, r5 ");
1.2800 + asm("movcs r7, r10 "); // if no borrow replace accumulator with result
1.2801 + asm("movcs r8, r12 ");
1.2802 + asm("movcs r9, r14 ");
1.2803 + asm("adcs r4, r4, r4 "); // shift result left one, putting in next bit
1.2804 + asm("adcs r5, r5, r5 ");
1.2805 + asm("mov r9, r9, lsl #2 "); // shift acc:mant left by 2 bits
1.2806 + asm("orr r9, r9, r8, lsr #30 ");
1.2807 + asm("mov r8, r8, lsl #2 ");
1.2808 + asm("orr r8, r8, r7, lsr #30 ");
1.2809 + asm("mov r7, r7, lsl #2 ");
1.2810 + asm("orr r7, r7, r6, lsr #30 ");
1.2811 + asm("mov r6, r6, lsl #2 ");
1.2812 + asm("subs r3, r3, #1 "); // decrement loop counter
1.2813 + asm("bne fastsqrt3 "); // do necessary number of iterations
1.2814 +
1.2815 + asm("movs r4, r4, lsr #1 "); // shift result mantissa right 1 place
1.2816 + asm("orr r4, r4, r5, lsl #31 "); // LSB (=rounding bit) into carry
1.2817 + asm("mov r5, r5, lsr #1 ");
1.2818 + asm("adcs r4, r4, #0 "); // round the mantissa to 53 bits
1.2819 + asm("adcs r5, r5, #0 ");
1.2820 + asm("cmp r5, #0x00200000 "); // check for mantissa overflow
1.2821 + asm("addeq r2, r2, #1 "); // if so, increment exponent - can never overflow
1.2822 + asm("bic r5, r5, #0x00300000 "); // remove top bit of mantissa - it is implicit
1.2823 + asm("add r2, r2, #0xFF "); // re-bias the exponent
1.2824 + asm("add r3, r2, #0x300 "); // and move into r3
1.2825 + asm("orr r3, r5, r3, lsl #20 "); // r3 now contains exponent + top of mantissa
1.2826 + asm("fastsqrt_ok: ");
1.2827 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.2828 + asm("stmia r0, {r3,r4} "); // store the result
1.2829 +#else
1.2830 + asm("str r3, [r0, #4] ");
1.2831 + asm("str r4, [r0, #0] ");
1.2832 +#endif
1.2833 + asm("mov r0, #0 "); // error code KErrNone
1.2834 + __POPRET("r4-r10,");
1.2835 +
1.2836 + asm("fastsqrt1: ");
1.2837 + asm("orrs r6, r5, r4 "); // exponent zero - test mantissa
1.2838 + asm("beq fastsqrt_ok "); // if zero, return 0
1.2839 +
1.2840 + asm("movs r3, r3 "); // denormal - test sign
1.2841 + asm("bmi fastsqrtn "); // branch out if negative
1.2842 + asm("sub r2, r2, #0xFE "); // unbias the exponent
1.2843 + asm("sub r2, r2, #0x300 "); //
1.2844 + asm("fastsqrtd: ");
1.2845 + asm("adds r4, r4, r4 "); // shift mantissa left
1.2846 + asm("adcs r5, r5, r5 ");
1.2847 + asm("sub r2, r2, #1 "); // and decrement exponent
1.2848 + asm("tst r5, #0x00100000 "); // test if normalised
1.2849 + asm("beq fastsqrtd "); // loop until normalised
1.2850 + asm("b fastsqrtd1 "); // now treat as a normalised number
1.2851 + asm("fastsqrt2: "); // get here if infinity or NaN
1.2852 + asm("orrs r6, r5, r4 "); // if mantissa zero, infinity
1.2853 + asm("bne fastsqrtnan "); // branch if not - must be NaN
1.2854 + asm("movs r3, r3 "); // test sign of infinity
1.2855 + asm("bmi fastsqrtn "); // branch if -ve
1.2856 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.2857 + asm("stmia r0, {r3,r4} "); // store the result
1.2858 +#else
1.2859 + asm("str r3, [r0, #4] ");
1.2860 + asm("str r4, [r0, #0] ");
1.2861 +#endif
1.2862 + asm("mov r0, #-9 "); // return KErrOverflow
1.2863 + asm("b fastsqrt_end ");
1.2864 +
1.2865 + asm("fastsqrtn: "); // get here if negative or QNaN operand
1.2866 + asm("mov r3, #0xFF000000 "); // generate "real indefinite" QNaN
1.2867 + asm("orr r3, r3, #0x00F80000 "); // sign=1, exp=7FF, mantissa = 1000...0
1.2868 + asm("mov r4, #0 ");
1.2869 + asm("fastsqrtxa: ");
1.2870 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.2871 + asm("stmia r0, {r3,r4} "); // store the result
1.2872 +#else
1.2873 + asm("str r3, [r0, #4] ");
1.2874 + asm("str r4, [r0, #0] ");
1.2875 +#endif
1.2876 + asm("mov r0, #-6 "); // return KErrArgument
1.2877 + asm("fastsqrt_end: ");
1.2878 + __POPRET("r4-r10,");
1.2879 +
1.2880 + asm("fastsqrtnan: "); // operand is a NaN
1.2881 + asm("tst r5, #0x00080000 "); // test MSB of mantissa
1.2882 + asm("bne fastsqrtn "); // if set it is a QNaN - so return "real indefinite"
1.2883 + asm("bic r3, r3, #0x00080000 "); // else convert SNaN to QNaN
1.2884 + asm("b fastsqrtxa "); // and return KErrArgument
1.2885 +#endif // __USE_VFP_MATH
1.2886 + }
1.2887 +
1.2888 +
1.2889 +
1.2890 +
1.2891 +__NAKED__ EXPORT_C TReal Math::Poly(TReal /*aX*/,const SPoly* /*aPoly*/) __SOFTFP
1.2892 +/**
1.2893 +Evaluates the polynomial:
1.2894 +{a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
1.2895 +
1.2896 +
1.2897 +@param aX The value of the x-variable
1.2898 +@param aPoly A pointer to the structure containing the set of coefficients
1.2899 + in the order: a[0], a[1], ..., a[n-1], a[n].
1.2900 +
1.2901 +@return The result of the evaluation.
1.2902 +*/
1.2903 +//
1.2904 +// Evaluate a power series in x for a P_POLY coefficient table.
1.2905 +// Changed to use TRealX throughout the calculation
1.2906 +//
1.2907 + {
1.2908 + // On entry r0,r1=aX, r2=aPoly
1.2909 + asm("stmfd sp!, {r4-r11,lr} ");
1.2910 + asm("mov r11, r2 ");
1.2911 + asm("ldr r10, [r11], #4 "); // r10=number of coefficients, r11=first coeff addr
1.2912 + asm("add r11, r11, r10, lsl #3 "); // r11=address of last coefficient+8
1.2913 + asm("mov r2, r1 "); // aX into r1,r2
1.2914 + asm("mov r1, r0 ");
1.2915 + asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
1.2916 + asm("mov r4, r1 "); // move into r4,r5,r6
1.2917 + asm("mov r5, r2 ");
1.2918 + asm("mov r6, r3 ");
1.2919 + asm("ldmdb r11!, {r1,r2} "); // last coefficient into r1,r2
1.2920 + asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
1.2921 + asm("subs r10, r10, #1 ");
1.2922 + asm("beq polynomial0 "); // if no more coefficients, exit
1.2923 +
1.2924 + asm("polynomial1: ");
1.2925 + asm("stmfd sp!, {r4,r5,r6} "); // save value of aX
1.2926 + asm("bl TRealXMultiply "); // r *= aX
1.2927 + asm("mov r4, r1 "); // move result into r4,r5,r6
1.2928 + asm("mov r5, r2 ");
1.2929 + asm("mov r6, r3 ");
1.2930 + asm("ldmdb r11!, {r1,r2} "); // next coefficient into r1,r2
1.2931 + asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
1.2932 + asm("bl TRealXAdd "); // r += *--pR
1.2933 + asm("ldmfd sp!, {r4,r5,r6} "); // aX back into r4,r5,r6
1.2934 + asm("subs r10, r10, #1 "); // iterate until all coefficients processed
1.2935 + asm("bne polynomial1 ");
1.2936 +
1.2937 + asm("polynomial0: "); // result now in r1,r2,r3
1.2938 + asm("bl ConvertTRealXToTReal64 "); // convert back to TReal64
1.2939 + __POPRET("r4-r11,");
1.2940 + }
1.2941 +
1.2942 +
1.2943 +
1.2944 +
1.2945 +__NAKED__ EXPORT_C void Math::PolyX(TRealX& /*aY*/,const TRealX& /*aX*/,TInt /*aDeg*/,const TRealX* /*aCoef*/)
1.2946 +/**
1.2947 +Evaluates the polynomial:
1.2948 +{a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
1.2949 +
1.2950 +@param aY A reference containing the result.
1.2951 +@param aX The value of the x-variable.
1.2952 +@param aDeg The degree of the polynomial (the highest power of x
1.2953 + which is present).
1.2954 +@param aCoef A pointer to a contiguous set of TRealX values containing
1.2955 + the coefficients.
1.2956 + They must be in the order: a[0], a[1], ..., a[n-1], a[n].
1.2957 +*/
1.2958 +//
1.2959 +// Evaluate a polynomial with TRealX argument, coefficients and result
1.2960 +//
1.2961 + {
1.2962 + // On entry r0=&aY, r1=&aX, r2=aDeg, r3=aCoef
1.2963 + asm("stmfd sp!, {r0,r4-r11,lr} ");
1.2964 + asm("add r11, r3, r2, lsl #3 "); // r11=address of last coefficient
1.2965 + asm("add r11, r11, r2, lsl #2 ");
1.2966 + asm("mov r9, r1 "); // r9=address of argument
1.2967 + asm("movs r10, r2 "); // r10=number of coefficients-1
1.2968 + asm("ldmia r11, {r1,r2,r3} "); // last coefficient into r1,r2,r3
1.2969 + asm("beq polyx0 "); // if no more coefficients, exit
1.2970 +
1.2971 + asm("polyx1: ");
1.2972 + asm("ldmia r9, {r4,r5,r6} "); // aX into r4,r5,r6
1.2973 + asm("bl TRealXMultiply "); // result *= aX
1.2974 + asm("ldmdb r11!, {r4,r5,r6} "); // next coefficient into r4,r5,r6
1.2975 + asm("bl TRealXAdd "); // result += next coeff
1.2976 + asm("subs r10, r10, #1 "); // iterate until all coefficients processed
1.2977 + asm("bne polyx1 ");
1.2978 +
1.2979 + asm("polyx0: "); // result now in r1,r2,r3
1.2980 + asm("ldmfd sp!, {r0,r4-r11,lr} "); // restore registers, including destination address in r0
1.2981 + asm("stmia r0, {r1,r2,r3} "); // store result
1.2982 + __JUMP(,lr);
1.2983 + }
1.2984 +
1.2985 +
1.2986 +
1.2987 +
1.2988 +#ifndef __USE_VFP_MATH
1.2989 +__NAKED__ EXPORT_C TInt Math::Int(TReal& /*aTrg*/, const TReal& /*aSrc*/)
1.2990 +/**
1.2991 +Calculates the integer part of a number.
1.2992 +
1.2993 +The integer part is that before a decimal point.
1.2994 +Truncation is toward zero, so that
1.2995 +int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
1.2996 +
1.2997 +
1.2998 +@param aTrg A reference containing the result.
1.2999 +@param aSrc The number whose integer part is required.
1.3000 +
1.3001 +@return KErrNone if successful, otherwise another of
1.3002 + the system-wide error codes.
1.3003 +*/
1.3004 +//
1.3005 +// Write the integer part of aSrc to the TReal at aTrg
1.3006 +// Negative numbers are rounded towards zero.
1.3007 +//
1.3008 + {
1.3009 + // r0=&aTrg, r1=&aSrc, return value in r0
1.3010 + asm("stmfd sp!, {lr} ");
1.3011 + asm("mov r12, r0 "); // r12=&aTrg
1.3012 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3013 + asm("ldmia r1, {r0,r1} "); // input value into r0,r1
1.3014 +#else
1.3015 + asm("ldr r0, [r1, #4] ");
1.3016 + asm("ldr r1, [r1, #0] ");
1.3017 +#endif
1.3018 + asm("bl TReal64Int ");
1.3019 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3020 + asm("stmia r12, {r0,r1} "); // store result
1.3021 +#else
1.3022 + asm("str r0, [r12, #4] ");
1.3023 + asm("str r1, [r12, #0] ");
1.3024 +#endif
1.3025 + asm("bic r0, r0, #0x80000000 "); // remove sign bit
1.3026 + asm("cmn r0, #0x00100000 "); // check for NaN or infinity
1.3027 + asm("movpl r0, #0 "); // if neither, return KErrNone
1.3028 + asm("bpl math_int_0 ");
1.3029 + asm("movs r0, r0, lsl #12 "); // check for infinity
1.3030 + asm("cmpeq r1, #0 ");
1.3031 + asm("mvneq r0, #8 "); // if infinity return KErrOverflow
1.3032 + asm("mvnne r0, #5 "); // else return KErrArgument
1.3033 + asm("math_int_0: ");
1.3034 + __POPRET("");
1.3035 +
1.3036 + // Take integer part of TReal64 in r0,r1
1.3037 + // Infinity and NaNs are unaffected
1.3038 + // r0-r3 modified
1.3039 + asm("TReal64Int: ");
1.3040 + asm("mov r2, r0, lsr #20 ");
1.3041 + asm("bic r2, r2, #0x800 "); // r2=exponent
1.3042 + asm("mov r3, #0x300 ");
1.3043 + asm("orr r3, r3, #0xFF "); // r3=0x3FF
1.3044 + asm("subs r2, r2, r3 "); // r2=exponent-3FF=number of integer bits-1
1.3045 + asm("ble TReal64Int1 "); // branch if <=1 integer bits
1.3046 + asm("cmp r2, #52 ");
1.3047 + __JUMP(ge,lr);
1.3048 + asm("cmp r2, #20 ");
1.3049 + asm("bgt TReal64Int2 "); // jump if >21 integer bits (r0 will be unaffected)
1.3050 + asm("rsb r2, r2, #20 "); // r2=number of bits to clear at bottom end of r0
1.3051 + asm("mov r0, r0, lsr r2 "); // clear them
1.3052 + asm("mov r0, r0, lsl r2 ");
1.3053 + asm("mov r1, #0 "); // clear r1
1.3054 + __JUMP(,lr);
1.3055 + asm("TReal64Int2: ");
1.3056 + asm("rsb r2, r2, #52 "); // r2=number of bits to clear at bottom end of r1
1.3057 + asm("mov r1, r1, lsr r2 "); // clear them
1.3058 + asm("mov r1, r1, lsl r2 ");
1.3059 + __JUMP(,lr);
1.3060 + asm("TReal64Int1: "); // result is either 0 or 1
1.3061 + asm("mov r1, #0 "); // lower mantissa bits of result will be zero
1.3062 + asm("moveq r0, r0, lsr #20 "); // if result is 1, clear mantissa but leave exponent
1.3063 + asm("moveq r0, r0, lsl #20 ");
1.3064 + asm("andlt r0, r0, #0x80000000 "); // if result is 0, clear mantissa and exponent
1.3065 + __JUMP(,lr);
1.3066 + }
1.3067 +
1.3068 +
1.3069 +
1.3070 +
1.3071 +__NAKED__ EXPORT_C TInt Math::Int(TInt16& /*aTrg*/, const TReal& /*aSrc*/)
1.3072 +/**
1.3073 +Calculates the integer part of a number.
1.3074 +
1.3075 +The integer part is that before a decimal point.
1.3076 +Truncation is toward zero, so that:
1.3077 +int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
1.3078 +
1.3079 +This function is suitable when the result is known to be small enough
1.3080 +for a 16-bit signed integer.
1.3081 +
1.3082 +@param aTrg A reference containing the result.
1.3083 +@param aSrc The number whose integer part is required.
1.3084 +
1.3085 +@return KErrNone if successful, otherwise another of
1.3086 + the system-wide error codes.
1.3087 +*/
1.3088 +//
1.3089 +// If the integer part of aSrc is in the range -32768 to +32767
1.3090 +// inclusive, write the integer part to the TInt16 at aTrg
1.3091 +// Negative numbers are rounded towards zero.
1.3092 +// If an overflow or underflow occurs, aTrg is set to the max/min value
1.3093 +//
1.3094 + {
1.3095 + // r0=&aTrg, r1=&aSrc
1.3096 + asm("stmfd sp!, {lr} ");
1.3097 + asm("mov r3, r0 "); // r3=&aTrg
1.3098 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3099 + asm("ldmia r1, {r0,r1} "); // input value into r0,r1
1.3100 +#else
1.3101 + asm("ldr r0, [r1, #4] ");
1.3102 + asm("ldr r1, [r1, #0] ");
1.3103 +#endif
1.3104 + asm("bl TReal64GetTInt "); // do the conversion
1.3105 + asm("cmp r0, #0x8000 "); // limit answer to TInt16 range
1.3106 + asm("movge r0, #0x7F00 ");
1.3107 + asm("orrge r0, r0, #0xFF ");
1.3108 + asm("mvnge r12, #8 "); // set error code if limiting occurred
1.3109 + asm("cmn r0, #0x8000 ");
1.3110 + asm("movlt r0, #0x8000 ");
1.3111 + asm("mvnlt r12, #9 "); // set error code if limiting occurred
1.3112 + asm("mov r1, r0, lsr #8 "); // top byte of answer into r1
1.3113 + asm("strb r0, [r3] "); // store result in aTrg
1.3114 + asm("strb r1, [r3, #1] ");
1.3115 + asm("mov r0, r12 "); // return error code in r0
1.3116 + __POPRET("");
1.3117 + }
1.3118 +
1.3119 +
1.3120 +
1.3121 +__NAKED__ EXPORT_C TInt Math::Int(TInt32& /*aTrg*/, const TReal& /*aSrc*/)
1.3122 +/**
1.3123 +Calculates the integer part of a number.
1.3124 +
1.3125 +The integer part is that before a decimal point.
1.3126 +Truncation is toward zero, so that
1.3127 +int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
1.3128 +
1.3129 +This function is suitable when the result is known to be small enough
1.3130 +for a 32-bit signed integer.
1.3131 +
1.3132 +@param aTrg A reference containing the result.
1.3133 +@param aSrc The number whose integer part is required.
1.3134 +
1.3135 +@return KErrNone if successful, otherwise another of
1.3136 + the system-wide error codes.
1.3137 +*/
1.3138 +//
1.3139 +// If the integer part of the float is in the range -2147483648 to +2147483647
1.3140 +// inclusive, write the integer part to the TInt32 at aTrg
1.3141 +// Negative numbers are rounded towards zero.
1.3142 +// If an overflow or underflow occurs, aTrg is set to the max/min value
1.3143 +//
1.3144 + {
1.3145 + // r0=&aTrg, r1=&aSrc
1.3146 + asm("stmfd sp!, {lr} ");
1.3147 + asm("mov r3, r0 "); // r3=&aTrg
1.3148 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3149 + asm("ldmia r1, {r0,r1} "); // input value into r0,r1
1.3150 +#else
1.3151 + asm("ldr r0, [r1, #4] ");
1.3152 + asm("ldr r1, [r1, #0] ");
1.3153 +#endif
1.3154 + asm("bl TReal64GetTInt "); // do the conversion
1.3155 + asm("str r0, [r3] "); // store result in aTrg
1.3156 + asm("mov r0, r12 "); // return error code in r0
1.3157 + __POPRET("");
1.3158 +
1.3159 + // Convert double in r0,r1 to int in r0
1.3160 + // Return error code in r12
1.3161 + // Registers r0,r1,r2,r12 modified
1.3162 + asm("TReal64GetTInt: ");
1.3163 + asm("mov r2, r0, lsr #20 ");
1.3164 + asm("bic r2, r2, #0x800 "); // r1=exponent
1.3165 + asm("add r12, r2, #1 ");
1.3166 + asm("cmp r12, #0x800 "); // check for NaN
1.3167 + asm("bne TReal64GetTInt1 ");
1.3168 + asm("movs r12, r0, lsl #12 "); // exponent=FF, check mantissa
1.3169 + asm("cmpeq r1, #0 ");
1.3170 + asm("movne r0, #0 "); // if non-zero, input is a NaN so return 0
1.3171 + asm("mvnne r12, #5 "); // and return KErrArgument
1.3172 + __JUMP(ne,lr);
1.3173 + asm("TReal64GetTInt1: ");
1.3174 + asm("mov r12, #0x400 ");
1.3175 + asm("orr r12, r12, #0x1E "); // r12=0x41E (exponent of 2^31)
1.3176 + asm("subs r2, r12, r2 "); // r2=number of shifts to produce integer
1.3177 + asm("mov r12, #0 "); // set return code to KErrNone
1.3178 + asm("ble TReal64GetTInt2 "); // if <=0, saturate result
1.3179 + asm("cmp r2, #31 "); // check if more than 31 shifts needed
1.3180 + asm("movhi r0, #0 "); // if so, underflow result to 0
1.3181 + __JUMP(hi,lr);
1.3182 + asm("cmp r0, #0 "); // check sign bit
1.3183 + asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
1.3184 + asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
1.3185 + asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
1.3186 + asm("mov r0, r0, lsr r2 "); // r0=absolute integer
1.3187 + asm("rsbmi r0, r0, #0 "); // if negative, negate
1.3188 + __JUMP(,lr);
1.3189 + asm("TReal64GetTInt2: ");
1.3190 + asm("blt TReal64GetTInt3 "); // if exponent>0x41E, definitely an overflow
1.3191 + asm("cmp r0, #0 "); // check sign bit
1.3192 + asm("bpl TReal64GetTInt3 "); // if positive, definitely an overflow
1.3193 + asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
1.3194 + asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
1.3195 + asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
1.3196 + asm("cmp r0, #0x80000000 "); // check if value is = -2^31
1.3197 + __JUMP(eq,lr);
1.3198 + asm("TReal64GetTInt3: ");
1.3199 + asm("cmp r0, #0 "); // check sign
1.3200 + asm("mov r0, #0x80000000 ");
1.3201 + asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
1.3202 + asm("mvnpl r12, #8 "); // if +ve return KErrOverflow
1.3203 + asm("mvnmi r12, #9 "); // if -ve return KErrUnderflow
1.3204 + __JUMP(,lr);
1.3205 + }
1.3206 +#endif // __USE_VFP_MATH
1.3207 +
1.3208 +
1.3209 +
1.3210 +
1.3211 +__NAKED__ EXPORT_C TBool Math::IsZero(const TReal& /*aVal*/)
1.3212 +/**
1.3213 +Determines whether a value is zero.
1.3214 +
1.3215 +@param aVal A reference to the value to be checked.
1.3216 +
1.3217 +@return True, if aVal is zero; false, otherwise.
1.3218 +*/
1.3219 + {
1.3220 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3221 + asm("ldmia r0, {r1,r2} "); // input value into r0,r1
1.3222 +#else
1.3223 + asm("ldr r2, [r0, #0] ");
1.3224 + asm("ldr r1, [r0, #4] ");
1.3225 +#endif
1.3226 + asm("TReal64IsZero: ");
1.3227 + asm("mov r0, #0 "); // default return value is 0
1.3228 + asm("bics r1, r1, #0x80000000 "); // remove sign bit
1.3229 + asm("cmpeq r2, #0 "); // and check both exponent and mantissa are zero
1.3230 + asm("moveq r0, #1 "); // return 1 if zero
1.3231 + __JUMP(,lr);
1.3232 + }
1.3233 +
1.3234 +
1.3235 +
1.3236 +
1.3237 +__NAKED__ EXPORT_C TBool Math::IsNaN(const TReal& /*aVal*/)
1.3238 +/**
1.3239 +Determines whether a value is not a number.
1.3240 +
1.3241 +@param aVal A reference to the value to be checked.
1.3242 +
1.3243 +@return True, if aVal is not a number; false, otherwise.
1.3244 +*/
1.3245 + {
1.3246 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3247 + asm("ldmia r0, {r1,r2} "); // input value into r0,r1
1.3248 +#else
1.3249 + asm("ldr r2, [r0, #0] ");
1.3250 + asm("ldr r1, [r0, #4] ");
1.3251 +#endif
1.3252 + asm("TReal64IsNaN: ");
1.3253 + asm("mov r0, #0 "); // default return value is 0
1.3254 + asm("bic r1, r1, #0x80000000 "); // remove sign bit
1.3255 + asm("cmn r1, #0x00100000 "); // check if exponent=7FF
1.3256 + __JUMP(pl,lr);
1.3257 + asm("movs r1, r1, lsl #12 "); // exponent=7FF, check mantissa
1.3258 + asm("cmpeq r2, #0 ");
1.3259 + asm("movne r0, #1 "); // if mantissa nonzero, return 1
1.3260 + __JUMP(,lr);
1.3261 + }
1.3262 +
1.3263 +
1.3264 +
1.3265 +
1.3266 +__NAKED__ EXPORT_C TBool Math::IsInfinite(const TReal& /*aVal*/)
1.3267 +/**
1.3268 +Determines whether a value is infinite.
1.3269 +
1.3270 +@param aVal A reference to the value to be checked.
1.3271 +
1.3272 +@return True, if aVal is infinite; false, otherwise.
1.3273 +*/
1.3274 + {
1.3275 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3276 + asm("ldmia r0, {r1,r2} "); // input value into r0,r1
1.3277 +#else
1.3278 + asm("ldr r2, [r0, #0] ");
1.3279 + asm("ldr r1, [r0, #4] ");
1.3280 +#endif
1.3281 + asm("TReal64IsInfinite: ");
1.3282 + asm("mov r0, #0 "); // default return value is 0
1.3283 + asm("mov r3, #0x00200000 "); // r3 == - (0x7ff00000 << 1)
1.3284 + asm("cmp r2, #0 ");
1.3285 + asm("cmneq r3, r1, lsl #1 "); // check exp=7FF && mant=0
1.3286 + asm("moveq r0, #1 "); // if so, return 1
1.3287 + __JUMP(,lr);
1.3288 + }
1.3289 +
1.3290 +
1.3291 +
1.3292 +
1.3293 +__NAKED__ EXPORT_C TBool Math::IsFinite(const TReal& /*aVal*/)
1.3294 +/**
1.3295 +Determines whether a value is finite.
1.3296 +
1.3297 +In this context, a value is finite if it is a valid number and
1.3298 +is not infinite.
1.3299 +
1.3300 +@param aVal A reference to the value to be checked.
1.3301 +
1.3302 +@return True, if aVal is finite; false, otherwise.
1.3303 +*/
1.3304 + {
1.3305 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3306 + asm("ldr r1, [r0, #0] "); // only need exponent - get it into r0
1.3307 +#else
1.3308 + asm("ldr r1, [r0, #4] "); // only need exponent - get it into r0
1.3309 +#endif
1.3310 + asm("TReal64IsFinite: ");
1.3311 + asm("mov r0, #0 "); // default return value is 0
1.3312 + asm("bic r1, r1, #0x80000000 "); // remove sign bit
1.3313 + asm("cmn r1, #0x00100000 "); // check if exponent=7FF
1.3314 + asm("movpl r0, #1 "); // else return 1
1.3315 + __JUMP(,lr);
1.3316 + }
1.3317 +
1.3318 +
1.3319 +
1.3320 +
1.3321 +__NAKED__ EXPORT_C void Math::SetZero(TReal& /*aVal*/, TInt /*aSign*/)
1.3322 +//
1.3323 +// Constructs zeros, assuming default sign is positive
1.3324 +//
1.3325 + {
1.3326 + asm("cmp r1, #0 "); // test aSign
1.3327 + asm("movne r1, #0x80000000 "); // if nonzero, set sign bit
1.3328 + asm("mov r2, #0 "); // mantissa=0
1.3329 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3330 + asm("stmia r0, {r1,r2} ");
1.3331 +#else
1.3332 + asm("str r2, [r0, #0] ");
1.3333 + asm("str r1, [r0, #4] ");
1.3334 +#endif
1.3335 + __JUMP(,lr);
1.3336 + }
1.3337 +
1.3338 +
1.3339 +
1.3340 +
1.3341 +__NAKED__ EXPORT_C void Math::SetNaN(TReal& /*aVal*/)
1.3342 +//
1.3343 +// Constructs NaN (+ve sign for Java)
1.3344 +//
1.3345 + {
1.3346 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3347 + asm("mvn r1, #0x80000000 "); // r1=7FFFFFFF
1.3348 + asm("mvn r2, #0 "); // r2=FFFFFFFF
1.3349 +#else
1.3350 + asm("mvn r2, #0x80000000 "); // r2=7FFFFFFF
1.3351 + asm("mvn r1, #0 "); // r1=FFFFFFFF
1.3352 +#endif
1.3353 + asm("stmia r0, {r1,r2} ");
1.3354 + __JUMP(,lr);
1.3355 + }
1.3356 +
1.3357 +
1.3358 +
1.3359 +
1.3360 +__NAKED__ EXPORT_C void Math::SetInfinite(TReal& /*aVal*/, TInt /*aSign*/)
1.3361 +//
1.3362 +// Constructs infinities
1.3363 +//
1.3364 + {
1.3365 + asm("cmp r1, #0 "); // test aSign
1.3366 + asm("movne r1, #0x80000000 "); // if nonzero, set sign bit
1.3367 + asm("orr r1, r1, #0x70000000 "); // set exponent to 7FF
1.3368 + asm("orr r1, r1, #0x0FF00000 ");
1.3369 + asm("mov r2, #0 "); // mantissa=0
1.3370 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3371 + asm("stmia r0, {r1,r2} ");
1.3372 +#else
1.3373 + asm("str r2, [r0, #0] ");
1.3374 + asm("str r1, [r0, #4] ");
1.3375 +#endif
1.3376 + __JUMP(,lr);
1.3377 + }
1.3378 +
1.3379 +
1.3380 +
1.3381 +#ifndef __USE_VFP_MATH
1.3382 +__NAKED__ EXPORT_C TInt Math::Frac(TReal& /*aTrg*/, const TReal& /*aSrc*/)
1.3383 +/**
1.3384 +Calculates the fractional part of a number.
1.3385 +
1.3386 +The fractional part is that after a decimal point.
1.3387 +Truncation is toward zero, so that
1.3388 +Frac(2.4)=0.4, Frac(2)=0, Frac(-1)=0, Frac(-1.4)=0.4.
1.3389 +
1.3390 +@param aTrg A reference containing the result.
1.3391 +@param aSrc The number whose fractional part is required.
1.3392 +
1.3393 +@return KErrNone if successful, otherwise another of
1.3394 + the system-wide error codes.
1.3395 +*/
1.3396 + {
1.3397 + // on entry r0=aTrg, r1=&Src
1.3398 + // on exit r0=return code
1.3399 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3400 + asm("ldmia r1, {r1,r2} "); // r1,r2=aSrc
1.3401 +#else
1.3402 + asm("ldr r2, [r1, #0] ");
1.3403 + asm("ldr r1, [r1, #4] ");
1.3404 +#endif
1.3405 + asm("and r3, r1, #0x80000000 ");
1.3406 + asm("str r3, [sp, #-4]! "); // save sign
1.3407 + asm("mov r3, r1, lsr #20 ");
1.3408 + asm("bic r3, r3, #0x800 "); // r3=exponent of aSrc
1.3409 + asm("mov r12, #0x300 ");
1.3410 + asm("orr r12, r12, #0xFE "); // r12=0x3FE
1.3411 + asm("subs r3, r3, r12 "); // r3=exponent of aSrc-0x3FE=number of integer bits
1.3412 + asm("ble MathFrac0 "); // if <=0, return aSrc unaltered
1.3413 + asm("cmp r3, #53 ");
1.3414 + asm("bge MathFrac1 "); // if >=53 integer bits, there is no fractional part
1.3415 + asm("mov r1, r1, lsl #11 "); // left-justify mantissa in r1,r2
1.3416 + asm("orr r1, r1, r2, lsr #21 ");
1.3417 + asm("mov r2, r2, lsl #11 ");
1.3418 + asm("cmp r3, #32 "); // check for >=32 integer bits
1.3419 + asm("bge MathFrac2 ");
1.3420 + asm("rsb r12, r3, #32 ");
1.3421 + asm("mov r1, r1, lsl r3 "); // shift mantissa left by number of integer bits
1.3422 + asm("orrs r1, r1, r2, lsr r12 ");
1.3423 + asm("mov r2, r2, lsl r3 ");
1.3424 + asm("mov r3, #0x300 "); // r3 holds exponent = 0x3FE initially
1.3425 + asm("orr r3, r3, #0xFE ");
1.3426 + asm("beq MathFrac3 "); // branch if >=32 shifts to normalise
1.3427 +#ifdef __CPU_ARM_HAS_CLZ
1.3428 + CLZ(12,1);
1.3429 + asm("mov r1, r1, lsl r12 ");
1.3430 + asm("rsb r12, r12, #32 ");
1.3431 + asm("orr r1, r1, r2, lsr r12 ");
1.3432 + asm("rsb r12, r12, #32 ");
1.3433 +#else
1.3434 + asm("mov r12, #32 "); // else r12=32-number of shifts needed
1.3435 + asm("cmp r1, #0x10000 "); // calculate shift count
1.3436 + asm("movcc r1, r1, lsl #16 ");
1.3437 + asm("subcc r12, r12, #16 ");
1.3438 + asm("cmp r1, #0x1000000 ");
1.3439 + asm("movcc r1, r1, lsl #8 ");
1.3440 + asm("subcc r12, r12, #8 ");
1.3441 + asm("cmp r1, #0x10000000 ");
1.3442 + asm("movcc r1, r1, lsl #4 ");
1.3443 + asm("subcc r12, r12, #4 ");
1.3444 + asm("cmp r1, #0x40000000 ");
1.3445 + asm("movcc r1, r1, lsl #2 ");
1.3446 + asm("subcc r12, r12, #2 ");
1.3447 + asm("cmp r1, #0x80000000 ");
1.3448 + asm("movcc r1, r1, lsl #1 ");
1.3449 + asm("subcc r12, r12, #1 ");
1.3450 + asm("orr r1, r1, r2, lsr r12 "); // normalise
1.3451 + asm("rsb r12, r12, #32 "); // r12=shift count
1.3452 +#endif
1.3453 + asm("mov r2, r2, lsl r12 ");
1.3454 + asm("sub r3, r3, r12 "); // exponent-=shift count
1.3455 + asm("b MathFrac4 "); // branch to assemble and store result
1.3456 +
1.3457 + // come here if >=32 shifts to normalise
1.3458 + asm("MathFrac3: ");
1.3459 + asm("sub r3, r3, #32 "); // decrement exponent by 32
1.3460 + asm("movs r1, r2 "); // shift left by 32, set Z if result zero
1.3461 + asm("mov r2, #0 ");
1.3462 + asm("bne MathFrac6 "); // if result nonzero, normalise
1.3463 + asm("beq MathFrac5 "); // branch if result zero
1.3464 +
1.3465 + // come here if >=32 integer bits
1.3466 + asm("MathFrac2: ");
1.3467 + asm("sub r3, r3, #32 ");
1.3468 + asm("movs r1, r2, lsl r3 "); // shift left by number of integer bits, set Z if result zero
1.3469 + asm("mov r2, #0 ");
1.3470 + asm("mov r3, #0x300 "); // r3 holds exponent = 0x3FE initially
1.3471 + asm("orr r3, r3, #0xFE ");
1.3472 + asm("beq MathFrac5 "); // branch if result zero
1.3473 + asm("MathFrac6: ");
1.3474 + asm("cmp r1, #0x10000 "); // else normalise
1.3475 + asm("movcc r1, r1, lsl #16 ");
1.3476 + asm("subcc r3, r3, #16 ");
1.3477 + asm("cmp r1, #0x1000000 ");
1.3478 + asm("movcc r1, r1, lsl #8 ");
1.3479 + asm("subcc r3, r3, #8 ");
1.3480 + asm("cmp r1, #0x10000000 ");
1.3481 + asm("movcc r1, r1, lsl #4 ");
1.3482 + asm("subcc r3, r3, #4 ");
1.3483 + asm("cmp r1, #0x40000000 ");
1.3484 + asm("movcc r1, r1, lsl #2 ");
1.3485 + asm("subcc r3, r3, #2 ");
1.3486 + asm("cmp r1, #0x80000000 ");
1.3487 + asm("movcc r1, r1, lsl #1 ");
1.3488 + asm("subcc r3, r3, #1 ");
1.3489 +
1.3490 + // come here to assemble and store result
1.3491 + asm("MathFrac4: ");
1.3492 + asm("bic r1, r1, #0x80000000 "); // remove integer bit
1.3493 + asm("mov r2, r2, lsr #11 "); // shift mantissa right by 11
1.3494 + asm("orr r2, r2, r1, lsl #21 ");
1.3495 + asm("mov r1, r1, lsr #11 ");
1.3496 + asm("ldr r12, [sp] ");
1.3497 + asm("orr r1, r1, r3, lsl #20 "); // exponent into r1 bits 20-30
1.3498 + asm("orr r1, r1, r12 "); // sign bit into r1 bit 31
1.3499 +
1.3500 + // come here to return source unaltered
1.3501 + asm("MathFrac0: ");
1.3502 + asm("add sp, sp, #4 ");
1.3503 + asm("MathFrac_ok: ");
1.3504 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3505 + asm("stmia r0, {r1,r2} "); // store result
1.3506 +#else
1.3507 + asm("str r2, [r0, #0] ");
1.3508 + asm("str r1, [r0, #4] ");
1.3509 +#endif
1.3510 + asm("mov r0, #0 "); // return KErrNone
1.3511 + __JUMP(,lr);
1.3512 +
1.3513 + // come here if infinity, NaN or >=53 integer bits
1.3514 + asm("MathFrac1: ");
1.3515 + asm("cmp r3, #0x400 "); // check for infinity/NaN
1.3516 + asm("bhi MathFrac7 "); // branch if so
1.3517 +
1.3518 + // come here to return zero
1.3519 + asm("MathFrac5: ");
1.3520 + asm("ldr r1, [sp], #4 "); // r1 bit 31=sign, rest zero
1.3521 + asm("mov r2, #0 ");
1.3522 + asm("b MathFrac_ok ");
1.3523 +
1.3524 + // come here if infinity/NaN
1.3525 + asm("MathFrac7: ");
1.3526 + asm("movs r12, r1, lsl #12 "); // check for infinity
1.3527 + asm("cmpeq r2, #0 ");
1.3528 + asm("bne MathFrac8 "); // branch if NaN
1.3529 + asm("ldr r1, [sp], #4 "); // r1 bit 31=sign, rest zero
1.3530 + asm("mov r2, #0 ");
1.3531 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3532 + asm("stmia r0, {r1,r2} "); // store zero result
1.3533 +#else
1.3534 + asm("str r2, [r0, #0] ");
1.3535 + asm("str r1, [r0, #4] ");
1.3536 +#endif
1.3537 + asm("mvn r0, #8 "); // return KErrOverflow
1.3538 + __JUMP(,lr);
1.3539 + asm("MathFrac8: "); // NaN
1.3540 + asm("add sp, sp, #4 ");
1.3541 +#ifdef __DOUBLE_WORDS_SWAPPED__
1.3542 + asm("stmia r0, {r1,r2} "); // store NaN unchanged
1.3543 +#else
1.3544 + asm("str r2, [r0, #0] ");
1.3545 + asm("str r1, [r0, #4] ");
1.3546 +#endif
1.3547 + asm("mvn r0, #5 "); // return KErrArgument
1.3548 + __JUMP(,lr);
1.3549 + }
1.3550 +#endif // __USE_VFP_MATH
1.3551 +#endif
1.3552 +
1.3553 +#ifdef __REALS_MACHINE_CODED__
1.3554 +#ifndef __ARMCC__
1.3555 +extern "C" {
1.3556 +
1.3557 +extern "C" void __math_exception(TInt aErrType);
1.3558 +__NAKED__ EXPORT_C TReal32 __addsf3(TReal32 /*a1*/, TReal32 /*a2*/)
1.3559 +//
1.3560 +// Add two floats
1.3561 +//
1.3562 + {
1.3563 + // a1 is in r0, a2 in r1 on entry; return with answer in r0
1.3564 + asm("stmfd sp!, {r4-r8,lr} ");
1.3565 + asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3566 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3567 + asm("mov r5, r2 ");
1.3568 + asm("mov r6, r3 ");
1.3569 + asm("mov r1, r0 "); // a1 into r1
1.3570 + asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3571 + asm("bl TRealXAdd "); // add a1+a2, result in r1,r2,r3
1.3572 + asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
1.3573 + asm("cmp r12, #0 "); // check error code
1.3574 + __CPOPRET(eq,"r4-r8,");
1.3575 + asm("stmfd sp!, {r0} "); // save result
1.3576 + asm("mov r0, r12 "); // error code into r0
1.3577 + asm("bl __math_exception "); // raise exception
1.3578 + __POPRET("r0,r4-r8,");
1.3579 + }
1.3580 +
1.3581 +__NAKED__ EXPORT_C TReal64 __adddf3(TReal64 /*a1*/, TReal64 /*a2*/)
1.3582 +//
1.3583 +// Add two doubles
1.3584 +//
1.3585 + {
1.3586 + // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
1.3587 + asm("stmfd sp!, {r4-r8,lr} ");
1.3588 + asm("mov r7, r2 "); // save a2
1.3589 + asm("mov r8, r3 ");
1.3590 + asm("mov r2, r1 "); // a1 into r1,r2
1.3591 + asm("mov r1, r0 ");
1.3592 + asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3593 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3594 + asm("mov r5, r2 ");
1.3595 + asm("mov r6, r3 ");
1.3596 + asm("mov r1, r7 "); // a2 into r1,r2
1.3597 + asm("mov r2, r8 ");
1.3598 + asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3599 + asm("bl TRealXAdd "); // add a1+a2, result in r1,r2,r3
1.3600 + asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
1.3601 + asm("cmp r12, #0 "); // check error code
1.3602 + __CPOPRET(eq,"r4-r8,");
1.3603 + asm("stmfd sp!, {r0,r1} "); // save result
1.3604 + asm("mov r0, r12 "); // error code into r0
1.3605 + asm("bl __math_exception "); // raise exception
1.3606 + __POPRET("r0,r1,r4-r8,");
1.3607 + }
1.3608 +
1.3609 +__NAKED__ EXPORT_C TReal32 __subsf3(TReal32 /*a1*/, TReal32 /*a2*/)
1.3610 +//
1.3611 +// Subtract two floats
1.3612 +//
1.3613 + {
1.3614 + // a1 is in r0, a2 in r1 on entry; return with answer in r0
1.3615 + asm("stmfd sp!, {r4-r8,lr} ");
1.3616 + asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3617 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3618 + asm("mov r5, r2 ");
1.3619 + asm("mov r6, r3 ");
1.3620 + asm("mov r1, r0 "); // a1 into r1
1.3621 + asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3622 + asm("bl TRealXSubtract "); // subtract a1-a2, result in r1,r2,r3
1.3623 + asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
1.3624 + asm("cmp r12, #0 "); // check error code
1.3625 + __CPOPRET(eq,"r4-r8,");
1.3626 + asm("stmfd sp!, {r0} "); // save result
1.3627 + asm("mov r0, r12 "); // error code into r0
1.3628 + asm("bl __math_exception "); // raise exception
1.3629 + __POPRET("r0,r4-r8,");
1.3630 + }
1.3631 +
1.3632 +__NAKED__ EXPORT_C TReal64 __subdf3(TReal64 /*a1*/, TReal64 /*a2*/)
1.3633 +//
1.3634 +// Subtract two doubles
1.3635 +//
1.3636 + {
1.3637 + // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
1.3638 + asm("stmfd sp!, {r4-r8,lr} ");
1.3639 + asm("mov r7, r0 "); // save a1
1.3640 + asm("mov r8, r1 ");
1.3641 + asm("mov r1, r2 "); // a2 into r1,r2
1.3642 + asm("mov r2, r3 ");
1.3643 + asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3644 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3645 + asm("mov r5, r2 ");
1.3646 + asm("mov r6, r3 ");
1.3647 + asm("mov r1, r7 "); // a1 into r1,r2
1.3648 + asm("mov r2, r8 ");
1.3649 + asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3650 + asm("bl TRealXSubtract "); // subtract a1-a2, result in r1,r2,r3
1.3651 + asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
1.3652 + asm("cmp r12, #0 "); // check error code
1.3653 + __CPOPRET(eq,"r4-r8,");
1.3654 + asm("stmfd sp!, {r0,r1} "); // save result
1.3655 + asm("mov r0, r12 "); // error code into r0
1.3656 + asm("bl __math_exception "); // raise exception
1.3657 + __POPRET("r0,r1,r4-r8,");
1.3658 + }
1.3659 +
1.3660 +__NAKED__ EXPORT_C TInt __cmpsf3(TReal32 /*a1*/, TReal32 /*a2*/)
1.3661 +//
1.3662 +// Compare two floats
1.3663 +//
1.3664 + {
1.3665 + // a1 in r0, a2 in r1 on entry
1.3666 + asm("stmfd sp!, {lr} ");
1.3667 + asm("bl CompareTReal32 "); // compare the two numbers
1.3668 + asm("mov r0, r0, lsl #28 ");
1.3669 + asm("msr cpsr_flg, r0 "); // N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
1.3670 + asm("mov r0, #0 ");
1.3671 + asm("mvnvs r0, #0 "); // if a1<a2 r0=-1
1.3672 + asm("moveq r0, #1 "); // if a1>a2 r0=+1
1.3673 + __POPRET("");
1.3674 +
1.3675 + // Compare two TReal32s in r0, r1.
1.3676 + // Return 1 if r0<r1, 2 if r0=r1, 4 if r0>r1, 8 if unordered
1.3677 + // Registers r0,r1,r12 modified
1.3678 + asm("CompareTReal32: ");
1.3679 + asm("mov r12, r0, lsr #23 ");
1.3680 + asm("and r12, r12, #0xFF "); // r12=r0 exponent
1.3681 + asm("cmp r12, #0xFF "); // check if r0 is a NaN
1.3682 + asm("bne CompareTReal32a ");
1.3683 + asm("movs r12, r0, lsl #9 "); // exponent=FF, check mantissa
1.3684 + asm("movne r0, #8 "); // if not zero, r0 is a NaN so result is unordered
1.3685 + __JUMP(ne,lr);
1.3686 + asm("CompareTReal32a: ");
1.3687 + asm("mov r12, r1, lsr #23 ");
1.3688 + asm("and r12, r12, #0xFF "); // r12=r1 exponent
1.3689 + asm("cmp r12, #0xFF "); // check if r1 is a NaN
1.3690 + asm("bne CompareTReal32b ");
1.3691 + asm("movs r12, r1, lsl #9 "); // exponent=FF, check mantissa
1.3692 + asm("movne r0, #8 "); // if not zero, r1 is a NaN so result is unordered
1.3693 + __JUMP(ne,lr);
1.3694 + asm("CompareTReal32b: ");
1.3695 + asm("bics r12, r0, #0x80000000 "); // check if r0=0 (can be +0 or -0)
1.3696 + asm("moveq r0, #0 "); // if it is, make it +0
1.3697 + asm("bics r12, r1, #0x80000000 "); // check if r1=0 (can be +0 or -0)
1.3698 + asm("moveq r1, #0 "); // if it is, make it +0
1.3699 + asm("teq r0, r1 "); // test if signs different
1.3700 + asm("bmi CompareTReal32c "); // branch if different
1.3701 + asm("cmp r0, r1 "); // if same, check exponents + mantissas
1.3702 + asm("moveq r0, #2 "); // if equal, return 2
1.3703 + __JUMP(eq,lr);
1.3704 + asm("movhi r0, #4 "); // if r0>r1, r0=4
1.3705 + asm("movcc r0, #1 "); // if r0<r1, r0=1
1.3706 + asm("cmp r1, #0 "); // check signs
1.3707 + asm("eormi r0, r0, #5 "); // if negative, switch 1 and 4
1.3708 + __JUMP(,lr);
1.3709 + asm("CompareTReal32c: "); // come here if signs different
1.3710 + asm("cmp r0, #0 "); // check sign of r0
1.3711 + asm("movpl r0, #4 "); // if r0 nonnegative, then r0 is greater so return 4
1.3712 + asm("movmi r0, #1 "); // if r0 negative, return 1
1.3713 + __JUMP(,lr);
1.3714 + }
1.3715 +
1.3716 +__NAKED__ EXPORT_C TInt __cmpdf3(TReal64 /*a1*/,TReal64 /*a2*/)
1.3717 +//
1.3718 +// Compare two doubles
1.3719 +//
1.3720 + {
1.3721 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3722 + asm("stmfd sp!, {lr} ");
1.3723 + asm("bl CompareTReal64 "); // compare the two numbers
1.3724 + asm("mov r0, r0, lsl #28 ");
1.3725 + asm("msr cpsr_flg, r0 "); // N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
1.3726 + asm("mov r0, #0 ");
1.3727 + asm("mvnvs r0, #0 "); // if a1<a2 r0=-1
1.3728 + asm("moveq r0, #1 "); // if a1>a2 r0=+1
1.3729 + __POPRET("");
1.3730 +
1.3731 + // Compare two TReal64s in r0,r1 and r2,r3.
1.3732 + // Return 1 if r0,r1<r2,r3
1.3733 + // Return 2 if r0,r1=r2,r3
1.3734 + // Return 4 if r0,r1>r2,r3
1.3735 + // Return 8 if unordered
1.3736 + // Registers r0,r1,r12 modified
1.3737 + asm("CompareTReal64: ");
1.3738 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.3739 + asm("mov r12, r0 ");
1.3740 + asm("mov r0, r1 ");
1.3741 + asm("mov r1, r12 ");
1.3742 + asm("mov r12, r2 ");
1.3743 + asm("mov r2, r3 ");
1.3744 + asm("mov r3, r12 ");
1.3745 +#endif
1.3746 + asm("mov r12, r0, lsr #20 ");
1.3747 + asm("bic r12, r12, #0x800 "); // r12=first operand exponent
1.3748 + asm("add r12, r12, #1 "); // add 1 to get usable compare value
1.3749 + asm("cmp r12, #0x800 "); // check if first operand is a NaN
1.3750 + asm("bne CompareTReal64a ");
1.3751 + asm("movs r12, r0, lsl #12 "); // exponent=7FF, check mantissa
1.3752 + asm("cmpeq r1, #0 ");
1.3753 + asm("movne r0, #8 "); // if not zero, 1st op is a NaN so result is unordered
1.3754 + __JUMP(ne,lr);
1.3755 + asm("CompareTReal64a: ");
1.3756 + asm("mov r12, r2, lsr #20 ");
1.3757 + asm("bic r12, r12, #0x800 "); // r12=second operand exponent
1.3758 + asm("add r12, r12, #1 "); // add 1 to get usable compare value
1.3759 + asm("cmp r12, #0x800 "); // check if second operand is a NaN
1.3760 + asm("bne CompareTReal64b ");
1.3761 + asm("movs r12, r2, lsl #12 "); // exponent=7FF, check mantissa
1.3762 + asm("cmpeq r3, #0 ");
1.3763 + asm("movne r0, #8 "); // if not zero, 2nd op is a NaN so result is unordered
1.3764 + __JUMP(ne,lr);
1.3765 + asm("CompareTReal64b: ");
1.3766 + asm("bics r12, r0, #0x80000000 "); // check if first operand is zero (can be +0 or -0)
1.3767 + asm("cmpeq r1, #0 ");
1.3768 + asm("moveq r0, #0 "); // if it is, make it +0
1.3769 + asm("bics r12, r2, #0x80000000 "); // check if second operand is zero (can be +0 or -0)
1.3770 + asm("cmpeq r3, #0 ");
1.3771 + asm("moveq r2, #0 "); // if it is, make it +0
1.3772 + asm("teq r0, r2 "); // test if signs different
1.3773 + asm("bmi CompareTReal64c "); // branch if different
1.3774 + asm("cmp r0, r2 "); // if same, check exponents + mantissas
1.3775 + asm("cmpeq r1, r3 ");
1.3776 + asm("moveq r0, #2 "); // if equal, return 2
1.3777 + __JUMP(eq,lr);
1.3778 + asm("movhi r0, #4 "); // if 1st operand > 2nd operand, r0=4
1.3779 + asm("movcc r0, #1 "); // if 1st operand < 2nd operand, r0=1
1.3780 + asm("cmp r2, #0 "); // check signs
1.3781 + asm("eormi r0, r0, #5 "); // if negative, switch 1 and 4
1.3782 + __JUMP(,lr);
1.3783 + asm("CompareTReal64c: "); // come here if signs different
1.3784 + asm("cmp r0, #0 "); // check sign of r0
1.3785 + asm("movpl r0, #4 "); // if first operand nonnegative, return 4
1.3786 + asm("movmi r0, #1 "); // if first operand negative, return 1
1.3787 + __JUMP(,lr);
1.3788 + }
1.3789 +
1.3790 +__NAKED__ EXPORT_C TInt __eqsf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3791 +//
1.3792 +// Compare if two floats are equal
1.3793 +//
1.3794 + {
1.3795 + // a1 in r0, a2 in r1 on entry
1.3796 + asm("stmfd sp!, {lr} ");
1.3797 + asm("bl CompareTReal32 "); // compare the two numbers
1.3798 + asm("tst r0, #2 ");
1.3799 + asm("movne r0, #0 "); // if ordered and equal return 0
1.3800 + asm("moveq r0, #1 "); // else return 1
1.3801 + __POPRET("");
1.3802 + }
1.3803 +
1.3804 +__NAKED__ EXPORT_C TInt __eqdf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3805 +//
1.3806 +// Compare if two doubles are equal
1.3807 +//
1.3808 + {
1.3809 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3810 + asm("stmfd sp!, {lr} ");
1.3811 + asm("bl CompareTReal64 "); // compare the two numbers
1.3812 + asm("tst r0, #2 ");
1.3813 + asm("movne r0, #0 "); // if ordered and equal return 0
1.3814 + asm("moveq r0, #1 "); // else return 1
1.3815 + __POPRET("");
1.3816 + }
1.3817 +
1.3818 +__NAKED__ EXPORT_C TInt __nesf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3819 +//
1.3820 +// Compare if two floats are not equal
1.3821 +//
1.3822 + {
1.3823 + // a1 in r0, a2 in r1 on entry
1.3824 + asm("stmfd sp!, {lr} ");
1.3825 + asm("bl CompareTReal32 "); // compare the two numbers
1.3826 + asm("tst r0, #5 "); // test if ordered and unequal
1.3827 + asm("moveq r0, #0 "); // if equal or unordered return 0
1.3828 + asm("movne r0, #1 "); // if ordered and unequal return 1
1.3829 + __POPRET("");
1.3830 + }
1.3831 +
1.3832 +__NAKED__ EXPORT_C TInt __nedf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3833 +//
1.3834 +// Compare if two doubles are not equal
1.3835 +//
1.3836 + {
1.3837 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3838 + asm("stmfd sp!, {lr} ");
1.3839 + asm("bl CompareTReal64 "); // compare the two numbers
1.3840 + asm("tst r0, #5 "); // test if ordered and unequal
1.3841 + asm("moveq r0, #0 "); // if equal or unordered return 0
1.3842 + asm("movne r0, #1 "); // if ordered and unequal return 1
1.3843 + __POPRET("");
1.3844 + }
1.3845 +
1.3846 +__NAKED__ EXPORT_C TInt __gtsf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3847 +//
1.3848 +// Compare if one float is greater than another
1.3849 +//
1.3850 + {
1.3851 + // a1 in r0, a2 in r1 on entry
1.3852 + asm("stmfd sp!, {lr} ");
1.3853 + asm("bl CompareTReal32 "); // compare the two numbers
1.3854 + asm("tst r0, #4 "); // test if ordered and a1>a2
1.3855 + asm("movne r0, #1 "); // if ordered and a1>a2 return +1
1.3856 + asm("mvneq r0, #0 "); // else return -1
1.3857 + __POPRET("");
1.3858 + }
1.3859 +
1.3860 +__NAKED__ EXPORT_C TInt __gtdf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3861 +//
1.3862 +// Compare if one double is greater than another
1.3863 +//
1.3864 + {
1.3865 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3866 + asm("stmfd sp!, {lr} ");
1.3867 + asm("bl CompareTReal64 "); // compare the two numbers
1.3868 + asm("tst r0, #4 "); // test if ordered and a1>a2
1.3869 + asm("movne r0, #1 "); // if ordered and a1>a2 return +1
1.3870 + asm("mvneq r0, #0 "); // else return -1
1.3871 + __POPRET("");
1.3872 + }
1.3873 +
1.3874 +__NAKED__ EXPORT_C TInt __gesf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3875 +//
1.3876 +// Compare if one float is greater than or equal to another
1.3877 +//
1.3878 + {
1.3879 + // a1 in r0, a2 in r1 on entry
1.3880 + asm("stmfd sp!, {lr} ");
1.3881 + asm("bl CompareTReal32 "); // compare the two numbers
1.3882 + asm("tst r0, #6 "); // test if ordered and a1>=a2
1.3883 + asm("movne r0, #1 "); // if ordered and a1>=a2 return +1
1.3884 + asm("mvneq r0, #0 "); // else return -1
1.3885 + __POPRET("");
1.3886 + }
1.3887 +
1.3888 +__NAKED__ EXPORT_C TInt __gedf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3889 +//
1.3890 +// Compare if one double is greater than or equal to another
1.3891 +//
1.3892 + {
1.3893 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3894 + asm("stmfd sp!, {lr} ");
1.3895 + asm("bl CompareTReal64 "); // compare the two numbers
1.3896 + asm("tst r0, #6 "); // test if ordered and a1>=a2
1.3897 + asm("movne r0, #1 "); // if ordered and a1>=a2 return +1
1.3898 + asm("mvneq r0, #0 "); // else return -1
1.3899 + __POPRET("");
1.3900 + }
1.3901 +
1.3902 +__NAKED__ EXPORT_C TInt __ltsf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3903 +//
1.3904 +// Compare if one float is less than another
1.3905 +//
1.3906 + {
1.3907 + // a1 in r0, a2 in r1 on entry
1.3908 + asm("stmfd sp!, {lr} ");
1.3909 + asm("bl CompareTReal32 "); // compare the two numbers
1.3910 + asm("tst r0, #1 "); // test if ordered and a1<a2
1.3911 + asm("mvnne r0, #0 "); // if ordered and a1<a2 return -1
1.3912 + asm("moveq r0, #1 "); // else return +1
1.3913 + __POPRET("");
1.3914 + }
1.3915 +
1.3916 +__NAKED__ EXPORT_C TInt __ltdf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3917 +//
1.3918 +// Compare if one double is less than another
1.3919 +//
1.3920 + {
1.3921 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3922 + asm("stmfd sp!, {lr} ");
1.3923 + asm("bl CompareTReal64 "); // compare the two numbers
1.3924 + asm("tst r0, #1 "); // test if ordered and a1<a2
1.3925 + asm("mvnne r0, #0 "); // if ordered and a1<a2 return -1
1.3926 + asm("moveq r0, #1 "); // else return +1
1.3927 + __POPRET("");
1.3928 + }
1.3929 +
1.3930 +__NAKED__ EXPORT_C TInt __lesf2(TReal32 /*a1*/, TReal32 /*a2*/)
1.3931 +//
1.3932 +// Compare if one float is less than or equal to another
1.3933 +//
1.3934 + {
1.3935 + // a1 in r0, a2 in r1 on entry
1.3936 + asm("stmfd sp!, {lr} ");
1.3937 + asm("bl CompareTReal32 "); // compare the two numbers
1.3938 + asm("tst r0, #3 "); // test if ordered and a1<=a2
1.3939 + asm("mvnne r0, #0 "); // if ordered and a1<=a2 return -1
1.3940 + asm("moveq r0, #1 "); // else return +1
1.3941 + __POPRET("");
1.3942 + }
1.3943 +
1.3944 +__NAKED__ EXPORT_C TInt __ledf2(TReal64 /*a1*/, TReal64 /*a2*/)
1.3945 +//
1.3946 +// Compare if one double is less than or equal to another
1.3947 +//
1.3948 + {
1.3949 + // a1 in r0,r1, a2 in r2,r3 on entry
1.3950 + asm("stmfd sp!, {lr} ");
1.3951 + asm("bl CompareTReal64 "); // compare the two numbers
1.3952 + asm("tst r0, #3 "); // test if ordered and a1<=a2
1.3953 + asm("mvnne r0, #0 "); // if ordered and a1<=a2 return -1
1.3954 + asm("moveq r0, #1 "); // else return +1
1.3955 + __POPRET("");
1.3956 + }
1.3957 +
1.3958 +__NAKED__ EXPORT_C TReal32 __mulsf3(TReal32 /*a1*/,TReal32 /*a2*/)
1.3959 +//
1.3960 +// Multiply two floats
1.3961 +//
1.3962 + {
1.3963 + // a1 is in r0, a2 in r1 on entry; return with answer in r0
1.3964 + asm("stmfd sp!, {r4-r7,lr} ");
1.3965 + asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3966 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3967 + asm("mov r5, r2 ");
1.3968 + asm("mov r6, r3 ");
1.3969 + asm("mov r1, r0 "); // a1 into r1
1.3970 + asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3971 + asm("bl TRealXMultiply "); // multiply a1*a2, result in r1,r2,r3
1.3972 + asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
1.3973 + asm("cmp r12, #0 "); // check error code
1.3974 + __CPOPRET(eq,"r4-r7,");
1.3975 + asm("stmfd sp!, {r0} "); // save result
1.3976 + asm("mov r0, r12 "); // error code into r0
1.3977 + asm("bl __math_exception "); // raise exception
1.3978 + __POPRET("r0,r4-r7,");
1.3979 + }
1.3980 +
1.3981 +__NAKED__ EXPORT_C TReal64 __muldf3(TReal64 /*a1*/, TReal64 /*a2*/)
1.3982 +//
1.3983 +// Multiply two doubles
1.3984 +//
1.3985 + {
1.3986 + // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
1.3987 + asm("stmfd sp!, {r4-r8,lr} ");
1.3988 + asm("mov r7, r2 "); // save a2
1.3989 + asm("mov r8, r3 ");
1.3990 + asm("mov r2, r1 "); // a1 into r1,r2
1.3991 + asm("mov r1, r0 ");
1.3992 + asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.3993 + asm("mov r4, r1 "); // move into r4,r5,r6
1.3994 + asm("mov r5, r2 ");
1.3995 + asm("mov r6, r3 ");
1.3996 + asm("mov r1, r7 "); // a2 into r1,r2
1.3997 + asm("mov r2, r8 ");
1.3998 + asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.3999 + asm("bl TRealXMultiply "); // multiply a1*a2, result in r1,r2,r3
1.4000 + asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
1.4001 + asm("cmp r12, #0 "); // check error code
1.4002 + __CPOPRET(eq,"r4-r8,");
1.4003 + asm("stmfd sp!, {r0,r1} "); // save result
1.4004 + asm("mov r0, r12 "); // error code into r0
1.4005 + asm("bl __math_exception "); // raise exception
1.4006 + __POPRET("r0,r1,r4-r8,");
1.4007 + }
1.4008 +
1.4009 +__NAKED__ EXPORT_C TReal32 __divsf3(TReal32 /*a1*/, TReal32 /*a2*/)
1.4010 +//
1.4011 +// Divide two floats
1.4012 +//
1.4013 + {
1.4014 + // a1 is in r0, a2 in r1 on entry; return with answer in r0
1.4015 + asm("stmfd sp!, {r4-r9,lr} ");
1.4016 + asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.4017 + asm("mov r4, r1 "); // move into r4,r5,r6
1.4018 + asm("mov r5, r2 ");
1.4019 + asm("mov r6, r3 ");
1.4020 + asm("mov r1, r0 "); // a1 into r1
1.4021 + asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.4022 + asm("bl TRealXDivide "); // divide a1/a2, result in r1,r2,r3 error code in r12
1.4023 + asm("mov r9, r12 "); // save error code in case it's division by zero
1.4024 + asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
1.4025 + asm("cmn r9, #41 "); // check for KErrDivideByZero
1.4026 + asm("moveq r12, r9 ");
1.4027 + asm("cmp r12, #0 "); // check error code
1.4028 + __CPOPRET(eq,"r4-r9,");
1.4029 + asm("stmfd sp!, {r0} "); // save result
1.4030 + asm("mov r0, r12 "); // error code into r0
1.4031 + asm("bl __math_exception "); // raise exception
1.4032 + __POPRET("r0,r4-r9,");
1.4033 + }
1.4034 +
1.4035 +__NAKED__ EXPORT_C TReal64 __divdf3(TReal64 /*a1*/, TReal64 /*a2*/)
1.4036 + //
1.4037 + // Divide two doubles
1.4038 + //
1.4039 + {
1.4040 + // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
1.4041 + asm("stmfd sp!, {r4-r9,lr} ");
1.4042 + asm("mov r7, r0 "); // save a1
1.4043 + asm("mov r8, r1 ");
1.4044 + asm("mov r1, r2 "); // a2 into r1,r2
1.4045 + asm("mov r2, r3 ");
1.4046 + asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
1.4047 + asm("mov r4, r1 "); // move into r4,r5,r6
1.4048 + asm("mov r5, r2 ");
1.4049 + asm("mov r6, r3 ");
1.4050 + asm("mov r1, r7 "); // a1 into r1,r2
1.4051 + asm("mov r2, r8 ");
1.4052 + asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
1.4053 + asm("bl TRealXDivide "); // divide a1/a2, result in r1,r2,r3
1.4054 + asm("mov r9, r12 "); // save error code in case it's division by zero
1.4055 + asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
1.4056 + asm("cmn r9, #41 "); // check for KErrDivideByZero
1.4057 + asm("moveq r12, r9 ");
1.4058 + asm("cmp r12, #0 "); // check error code
1.4059 + __CPOPRET(eq,"r4-r9,");
1.4060 + asm("stmfd sp!, {r0,r1} "); // save result
1.4061 + asm("mov r0, r12 "); // error code into r0
1.4062 + asm("bl __math_exception "); // raise exception
1.4063 + __POPRET("r0,r1,r4-r9,");
1.4064 + }
1.4065 +
1.4066 +__NAKED__ EXPORT_C TReal32 __negsf2(TReal32 /*a1*/)
1.4067 +//
1.4068 +// Negate a float
1.4069 +//
1.4070 + {
1.4071 + // a1 in r0 on entry, return value in r0
1.4072 + asm("eor r0, r0, #0x80000000 "); // change sign bit
1.4073 + __JUMP(,lr);
1.4074 + }
1.4075 +
1.4076 +__NAKED__ EXPORT_C TReal64 __negdf2(TReal64 /*a1*/)
1.4077 +//
1.4078 +// Negate a double
1.4079 +//
1.4080 + {
1.4081 + // a1 in r0,r1 on entry, return value in r0,r1
1.4082 + asm("eor r0, r0, #0x80000000 "); // change sign bit
1.4083 + __JUMP(,lr);
1.4084 + }
1.4085 +
1.4086 +__NAKED__ EXPORT_C TReal32 __floatsisf(TInt /*a1*/)
1.4087 +//
1.4088 +// Convert int to float
1.4089 +//
1.4090 + {
1.4091 + // a1 in r0 on entry, return value in r0
1.4092 + asm("cmp r0, #0 "); // test for zero or negative
1.4093 + __JUMP(eq,lr);
1.4094 + asm("and ip, r0, #0x80000000 "); // ip=bit 31 of r0 (sign bit)
1.4095 + asm("rsbmi r0, r0, #0 "); // if negative, negate it
1.4096 + asm("mov r2, #0x9E "); // r2=0x9E=exponent of 2^31
1.4097 + asm("cmp r0, #0x00010000 "); // normalise integer, adjusting exponent
1.4098 + asm("movcc r0, r0, lsl #16 ");
1.4099 + asm("subcc r2, r2, #16 ");
1.4100 + asm("cmp r0, #0x01000000 ");
1.4101 + asm("movcc r0, r0, lsl #8 ");
1.4102 + asm("subcc r2, r2, #8 ");
1.4103 + asm("cmp r0, #0x10000000 ");
1.4104 + asm("movcc r0, r0, lsl #4 ");
1.4105 + asm("subcc r2, r2, #4 ");
1.4106 + asm("cmp r0, #0x40000000 ");
1.4107 + asm("movcc r0, r0, lsl #2 ");
1.4108 + asm("subcc r2, r2, #2 ");
1.4109 + asm("cmp r0, #0x80000000 ");
1.4110 + asm("movcc r0, r0, lsl #1 ");
1.4111 + asm("subcc r2, r2, #1 ");
1.4112 + asm("and r1, r0, #0xFF "); // r1=bottom 8 bits=rounding bits
1.4113 + asm("cmp r1, #0x80 "); // check if we need to round up (carry=1 if we do)
1.4114 + asm("moveqs r1, r0, lsr #9 "); // if bottom 8 bits=0x80, set carry=LSB of mantissa
1.4115 + asm("addcss r0, r0, #0x100 "); // round up if necessary
1.4116 + asm("addcs r2, r2, #1 "); // if carry, increment exponent
1.4117 + asm("bic r0, r0, #0x80000000 "); // remove top bit (integer bit of mantissa implicit)
1.4118 + asm("mov r0, r0, lsr #8 "); // mantissa into r0 bits 0-22
1.4119 + asm("orr r0, r0, r2, lsl #23 "); // exponent into r0 bits 23-30
1.4120 + asm("orr r0, r0, ip "); // sign bit into r0 bit 31
1.4121 + __JUMP(,lr);
1.4122 + }
1.4123 +
1.4124 +__NAKED__ EXPORT_C TReal64 __floatsidf(TInt /*a1*/)
1.4125 +//
1.4126 +// Convert int to double
1.4127 +//
1.4128 + {
1.4129 + // a1 in r0 on entry, return value in r0,r1
1.4130 + asm("cmp r0, #0 "); // test for zero or negative
1.4131 + asm("moveq r1, #0 "); // if zero, return 0
1.4132 + __JUMP(eq,lr);
1.4133 + asm("and ip, r0, #0x80000000 "); // ip=bit 31 of r0 (sign bit)
1.4134 + asm("rsbmi r0, r0, #0 "); // if negative, negate it
1.4135 + asm("mov r2, #0x400 "); // r2=0x41E=exponent of 2^31
1.4136 + asm("orr r2, r2, #0x1E ");
1.4137 + asm("cmp r0, #0x00010000 "); // normalise integer, adjusting exponent
1.4138 + asm("movcc r0, r0, lsl #16 ");
1.4139 + asm("subcc r2, r2, #16 ");
1.4140 + asm("cmp r0, #0x01000000 ");
1.4141 + asm("movcc r0, r0, lsl #8 ");
1.4142 + asm("subcc r2, r2, #8 ");
1.4143 + asm("cmp r0, #0x10000000 ");
1.4144 + asm("movcc r0, r0, lsl #4 ");
1.4145 + asm("subcc r2, r2, #4 ");
1.4146 + asm("cmp r0, #0x40000000 ");
1.4147 + asm("movcc r0, r0, lsl #2 ");
1.4148 + asm("subcc r2, r2, #2 ");
1.4149 + asm("cmp r0, #0x80000000 ");
1.4150 + asm("movcc r0, r0, lsl #1 ");
1.4151 + asm("subcc r2, r2, #1 ");
1.4152 + asm("bic r0, r0, #0x80000000 "); // remove top bit (integer bit of mantissa implicit)
1.4153 + asm("mov r1, r0, lsl #21 "); // low 11 bits of mantissa into r1
1.4154 + asm("mov r0, r0, lsr #11 "); // high 20 bits of mantissa into r0 bits 0-19
1.4155 + asm("orr r0, r0, r2, lsl #20 "); // exponent into r0 bits 20-30
1.4156 + asm("orr r0, r0, ip "); // sign bit into r0 bit 31
1.4157 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.4158 + asm("mov ip, r0 ");
1.4159 + asm("mov r0, r1 ");
1.4160 + asm("mov r1, ip ");
1.4161 +#endif
1.4162 + __JUMP(,lr);
1.4163 + }
1.4164 +
1.4165 +__NAKED__ EXPORT_C TInt __fixsfsi(TReal32 /*a1*/)
1.4166 +//
1.4167 +// Convert float to int
1.4168 +//
1.4169 + {
1.4170 + // a1 in r0 on entry, return value in r0
1.4171 + asm("mov r1, r0, lsr #23 ");
1.4172 + asm("and r1, r1, #0xFF "); // r1=exponent of a1
1.4173 + asm("cmp r1, #0xFF "); // check for NaN
1.4174 + asm("bne fixsfsi1 ");
1.4175 + asm("movs r2, r0, lsl #9 "); // exponent=FF, check mantissa
1.4176 + asm("movne r0, #0 "); // if non-zero, a1 is a NaN so return 0
1.4177 + __JUMP(ne,lr);
1.4178 + asm("fixsfsi1: ");
1.4179 + asm("rsbs r1, r1, #0x9E "); // r1=number of shifts to produce integer
1.4180 + asm("ble fixsfsi2 "); // if <=0, saturate result
1.4181 + asm("cmp r0, #0 "); // check sign bit
1.4182 + asm("orr r0, r0, #0x00800000 "); // set implicit integer bit
1.4183 + asm("mov r0, r0, lsl #8 "); // shift mantissa up so MSB is in MSB of r0
1.4184 + asm("mov r0, r0, lsr r1 "); // r0=absolute integer
1.4185 + asm("rsbmi r0, r0, #0 "); // if negative, negate
1.4186 + __JUMP(,lr);
1.4187 + asm("fixsfsi2: ");
1.4188 + asm("cmp r0, #0 "); // check sign
1.4189 + asm("mov r0, #0x80000000 ");
1.4190 + asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
1.4191 + __JUMP(,lr);
1.4192 + }
1.4193 +
1.4194 +__NAKED__ EXPORT_C TInt __fixdfsi(TReal64 /*a1*/)
1.4195 +//
1.4196 +// Convert double to int
1.4197 +//
1.4198 + {
1.4199 + // a1 in r0,r1 on entry, return value in r0
1.4200 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.4201 + asm("mov r2, r0 ");
1.4202 + asm("mov r0, r1 ");
1.4203 + asm("mov r1, r2 ");
1.4204 +#endif
1.4205 + asm("mov r2, r0, lsr #20 ");
1.4206 + asm("bic r2, r2, #0x800 "); // r1=exponent of a1
1.4207 + asm("add r3, r2, #1 ");
1.4208 + asm("cmp r3, #0x800 "); // check for NaN
1.4209 + asm("bne fixdfsi1 ");
1.4210 + asm("movs r3, r0, lsl #12 "); // exponent=FF, check mantissa
1.4211 + asm("cmpeq r1, #0 ");
1.4212 + asm("movne r0, #0 "); // if non-zero, a1 is a NaN so return 0
1.4213 + __JUMP(ne,lr);
1.4214 + asm("fixdfsi1: ");
1.4215 + asm("mov r3, #0x400 ");
1.4216 + asm("orr r3, r3, #0x1E "); // r3=0x41E (exponent of 2^31)
1.4217 + asm("subs r2, r3, r2 "); // r2=number of shifts to produce integer
1.4218 + asm("ble fixdfsi2 "); // if <=0, saturate result
1.4219 + asm("cmp r2, #31 "); // check if more than 31 shifts needed
1.4220 + asm("movhi r0, #0 "); // if so, underflow result to 0
1.4221 + __JUMP(hi,lr);
1.4222 + asm("cmp r0, #0 "); // check sign bit
1.4223 + asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
1.4224 + asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
1.4225 + asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
1.4226 + asm("mov r0, r0, lsr r2 "); // r0=absolute integer
1.4227 + asm("rsbmi r0, r0, #0 "); // if negative, negate
1.4228 + __JUMP(,lr);
1.4229 + asm("fixdfsi2: ");
1.4230 + asm("cmp r0, #0 "); // check sign
1.4231 + asm("mov r0, #0x80000000 ");
1.4232 + asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
1.4233 + __JUMP(,lr);
1.4234 + }
1.4235 +
1.4236 +__NAKED__ EXPORT_C TReal64 __extendsfdf2(TReal32 /*a1*/)
1.4237 +//
1.4238 +// Convert a float to a double
1.4239 +//
1.4240 + {
1.4241 + // a1 in r0, return in r0,r1
1.4242 + asm("mov r3, r0, lsr #3 ");
1.4243 + asm("ands r3, r3, #0x0FF00000 "); // r3 bits 20-27 hold exponent, Z=1 if zero/denormal
1.4244 + asm("mov r1, r0, lsl #9 "); // r1 = TReal32 mantissa << 9
1.4245 + asm("and r0, r0, #0x80000000 "); // leave only sign bit in r0
1.4246 + asm("beq extendsfdf2a "); // branch if zero/denormal
1.4247 + asm("cmp r3, #0x0FF00000 "); // check for infinity or NaN
1.4248 + asm("orrcs r3, r3, #0x70000000 "); // if infinity or NaN, exponent = 7FF
1.4249 + asm("addcc r3, r3, #0x38000000 "); // else exponent = TReal32 exponent + 380
1.4250 + asm("orr r0, r0, r1, lsr #12 "); // top 20 mantissa bits into r0 bits 0-19
1.4251 + asm("mov r1, r1, lsl #20 "); // remaining mantissa bits in r1 bits 29-31
1.4252 + asm("orr r0, r0, r3 "); // exponent into r0 bits 20-30
1.4253 + asm("b 0f ");
1.4254 + asm("extendsfdf2a: "); // come here if zero or denormal
1.4255 + asm("cmp r1, #0 "); // check for zero
1.4256 + asm("beq 0f ");
1.4257 + asm("mov r3, #0x38000000 "); // else exponent = 380 (highest denormal exponent)
1.4258 + asm("cmp r1, #0x10000 "); // normalise mantissa, decrementing exponent as needed
1.4259 + asm("movcc r1, r1, lsl #16 ");
1.4260 + asm("subcc r3, r3, #0x01000000 ");
1.4261 + asm("cmp r1, #0x1000000 ");
1.4262 + asm("movcc r1, r1, lsl #8 ");
1.4263 + asm("subcc r3, r3, #0x00800000 ");
1.4264 + asm("cmp r1, #0x10000000 ");
1.4265 + asm("movcc r1, r1, lsl #4 ");
1.4266 + asm("subcc r3, r3, #0x00400000 ");
1.4267 + asm("cmp r1, #0x40000000 ");
1.4268 + asm("movcc r1, r1, lsl #2 ");
1.4269 + asm("subcc r3, r3, #0x00200000 ");
1.4270 + asm("cmp r1, #0x80000000 ");
1.4271 + asm("movcc r1, r1, lsl #1 ");
1.4272 + asm("subcc r3, r3, #0x00100000 ");
1.4273 + asm("add r1, r1, r1 "); // shift mantissa left one more to remove integer bit
1.4274 + asm("orr r0, r0, r1, lsr #12 "); // top 20 mantissa bits into r0 bits 0-19
1.4275 + asm("mov r1, r1, lsl #20 "); // remaining mantissa bits in r1 bits 29-31
1.4276 + asm("orr r0, r0, r3 "); // exponent into r0 bits 20-30
1.4277 + asm("0: ");
1.4278 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.4279 + asm("mov r3, r0 ");
1.4280 + asm("mov r0, r1 ");
1.4281 + asm("mov r1, r3 ");
1.4282 +#endif
1.4283 + __JUMP(,lr);
1.4284 + }
1.4285 +
1.4286 +__NAKED__ EXPORT_C TReal32 __truncdfsf2(TReal64 /*a1*/)
1.4287 +//
1.4288 +// Convert a double to a float
1.4289 +// Raises an exception if conversion results in an error
1.4290 +//
1.4291 + {
1.4292 + asm("stmfd sp!, {lr} ");
1.4293 + asm("bl TReal64GetTReal32 "); // do the conversion
1.4294 + asm("cmp r12, #0 "); // check error code
1.4295 + __CPOPRET(eq,"");
1.4296 + asm("stmfd sp!, {r0} "); // else save result
1.4297 + asm("mov r0, r12 "); // error code into r0
1.4298 + asm("bl __math_exception "); // raise exception
1.4299 + __POPRET("r0,");
1.4300 +
1.4301 + // Convert TReal64 in r0,r1 to TReal32 in r0
1.4302 + // Return error code in r12
1.4303 + // r0-r3, r12 modified
1.4304 + // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1.4305 + asm("TReal64GetTReal32: ");
1.4306 +#ifndef __DOUBLE_WORDS_SWAPPED__
1.4307 + asm("mov r2, r0 ");
1.4308 + asm("mov r0, r1 ");
1.4309 + asm("mov r1, r2 ");
1.4310 +#endif
1.4311 + asm("mov r12, r0, lsr #20 ");
1.4312 + asm("bic r12, r12, #0x800 "); // r12=a1 exponent
1.4313 + asm("sub r12, r12, #0x380 "); // r12=exp in - 380 = result exponent if in range
1.4314 + asm("cmp r12, #0xFF "); // check if input exponent too big for TReal32
1.4315 + asm("bge TReal64GetTReal32a "); // branch if it is
1.4316 + asm("mov r2, r0, lsl #11 "); // left justify mantissa in r2:r1
1.4317 + asm("orr r2, r2, r1, lsr #21 ");
1.4318 + asm("mov r1, r1, lsl #11 ");
1.4319 + asm("orr r2, r2, #0x80000000 "); // set implied integer bit in mantissa
1.4320 + asm("cmp r12, #0 ");
1.4321 + asm("bgt TReal64GetTReal32b "); // branch if normalised result
1.4322 + asm("cmn r12, #23 "); // check for total underflow or zero
1.4323 + asm("bge TReal64GetTReal32e "); // skip if not
1.4324 + asm("bics r2, r0, #0x80000000 "); // check if input value zero
1.4325 + asm("cmpeq r1, #0 ");
1.4326 + asm("moveq r12, #0 "); // if zero return KErrNone
1.4327 + asm("mvnne r12, #9 "); // else return KErrUnderflow
1.4328 + asm("and r0, r0, #0x80000000 "); // return zero of appropriate sign
1.4329 + asm("mov r1, #0 ");
1.4330 + __JUMP(,lr);
1.4331 + asm("TReal64GetTReal32e: "); // result will be a denormal
1.4332 + asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
1.4333 + asm("movs r3, r1, lsl r12 "); // r3=lost bits when r2:r1 is shifted
1.4334 + asm("orrne lr, lr, #1 "); // if these are not zero, set rounded down flag
1.4335 + asm("rsb r3, r12, #32 ");
1.4336 + asm("mov r1, r1, lsr r3 ");
1.4337 + asm("orr r1, r1, r2, lsl r12 ");
1.4338 + asm("mov r2, r2, lsr r3 "); // r2 top 24 bits now give unrounded result mantissa
1.4339 + asm("mov r12, #0 "); // result exponent will be zero
1.4340 + asm("TReal64GetTReal32b: ");
1.4341 + asm("movs r3, r2, lsl #24 "); // top 8 truncated bits into top byte of r3
1.4342 + asm("bpl TReal64GetTReal32c "); // if top bit clear, truncate
1.4343 + asm("cmp r3, #0x80000000 ");
1.4344 + asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
1.4345 + asm("bhi TReal64GetTReal32d "); // if >, round up
1.4346 + asm("tst lr, #1 "); // check rounded-down flag
1.4347 + asm("bne TReal64GetTReal32d "); // if rounded down, round up
1.4348 + asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
1.4349 + asm("beq TReal64GetTReal32c "); // if zero, truncate, else round up
1.4350 + asm("TReal64GetTReal32d: "); // come here to round up
1.4351 + asm("adds r2, r2, #0x100 "); // increment the mantissa
1.4352 + asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
1.4353 + asm("addcs r12, r12, #1 "); // and increment exponent
1.4354 + asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1.4355 + asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1.4356 + asm("TReal64GetTReal32c: "); // come here to truncate
1.4357 + asm("and r0, r0, #0x80000000 "); // leave only sign bit in r0
1.4358 + asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
1.4359 + asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1.4360 + asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
1.4361 + asm("cmp r12, #0xFF "); // check for overflow
1.4362 + asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
1.4363 + asm("biceq pc, lr, #3 ");
1.4364 + asm("bics r1, r0, #0x80000000 "); // check for underflow
1.4365 + asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
1.4366 + asm("movne r12, #0 "); // else return KErrNone
1.4367 + asm("bic pc, lr, #3 ");
1.4368 + asm("TReal64GetTReal32a: "); // come here if overflow, infinity or NaN
1.4369 + asm("add r3, r12, #1 ");
1.4370 + asm("cmp r3, #0x480 "); // check for infinity or NaN
1.4371 + asm("movne r1, #0 "); // if not, set mantissa to 0 for infinity result
1.4372 + asm("movne r0, r0, lsr #20 ");
1.4373 + asm("movne r0, r0, lsl #20 ");
1.4374 + asm("mov r1, r1, lsr #29 "); // assemble 23 bit mantissa in r1
1.4375 + asm("orr r1, r1, r0, lsl #3 ");
1.4376 + asm("bic r1, r1, #0xFF000000 ");
1.4377 + asm("and r0, r0, #0x80000000 "); // leave only sign in r0
1.4378 + asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
1.4379 + asm("orr r0, r0, #0x00800000 ");
1.4380 + asm("orr r0, r0, r1 "); // r0 bits 0-22 = result mantissa
1.4381 + asm("movs r12, r0, lsl #9 "); // check if result is infinity or NaN
1.4382 + asm("mvneq r12, #8 "); // if infinity return KErrOverflow
1.4383 + asm("mvnne r12, #5 "); // else return KErrArgument
1.4384 + asm("bic pc, lr, #3 ");
1.4385 + }
1.4386 +} // end of extern "C" declaration
1.4387 +#endif
1.4388 +#endif
1.4389 +