Update contrib.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\euser\epoc\arm\uc_realx.cia
25 #if defined(__USE_VFP_MATH) && !defined(__CPU_HAS_VFP)
26 #error __USE_VFP_MATH was defined but not __CPU_HAS_VFP - impossible combination, check variant.mmh
29 #ifndef __EABI_CTORS__
30 __NAKED__ EXPORT_C TRealX::TRealX()
32 Constructs a default extended precision object.
34 This sets the value to zero.
39 asm("str r1, [r0, #4] ");
40 asm("str r1, [r0, #8] ");
47 __NAKED__ EXPORT_C TRealX::TRealX(TUint /*anExp*/, TUint /*aMantHi*/, TUint /*aMantLo*/)
49 Constructs an extended precision object from an explicit exponent and
52 @param anExp The exponent
53 @param aMantHi The high order 32 bits of the 64 bit mantissa
54 @param aMantLo The low order 32 bits of the 64 bit mantissa
57 asm("str r1, [r0, #8] ");
58 asm("str r2, [r0, #4] ");
59 asm("str r3, [r0, #0] ");
68 __NAKED__ EXPORT_C TInt TRealX::Set(TInt /*anInt*/)
70 Gives this extended precision object a new value taken
71 from a signed integer.
73 @param anInt The signed integer value.
75 @return KErrNone, always.
78 asm("stmfd sp!, {lr} ");
80 asm("bl ConvertIntToTRealX ");
81 asm("stmia r0, {r1,r2,r3} ");
82 asm("mov r0, #0 "); // return KErrNone
89 #ifndef __EABI_CTORS__
90 __NAKED__ EXPORT_C TRealX::TRealX(TInt /*anInt*/)
92 Constructs an extended precision object from a signed integer value.
94 @param anInt The signed integer value.
104 __NAKED__ EXPORT_C TRealX& TRealX::operator=(TInt /*anInt*/)
106 Assigns the specified signed integer value to this extended precision object.
108 @param anInt The signed integer value.
110 @return A reference to this extended precision object.
113 asm("stmfd sp!, {lr} ");
115 asm("bl ConvertIntToTRealX ");
116 asm("stmia r0, {r1,r2,r3} ");
119 asm("ConvertIntToTRealX: ");
121 asm("movpl r3, #0 "); // if int>0, r3=0
122 asm("beq ConvertIntToTRealX0 "); // if int=0, return 0
123 asm("movmi r3, #1 "); // if int<0, r3=1
124 asm("rsbmi r2, r2, #0 "); // if int -ve, negate it
125 asm("orr r3, r3, #0x001E0000 ");
126 asm("orr r3, r3, #0x80000000 "); // r3=exponent 801E + sign bit
127 #ifdef __CPU_ARM_HAS_CLZ
129 asm("mov r2, r2, lsl r12 ");
130 asm("sub r3, r3, r12, lsl #16 ");
132 asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
133 asm("movcc r2, r2, lsl #16 ");
134 asm("subcc r3, r3, #0x100000 ");
135 asm("cmp r2, #0x1000000 ");
136 asm("movcc r2, r2, lsl #8 ");
137 asm("subcc r3, r3, #0x080000 ");
138 asm("cmp r2, #0x10000000 ");
139 asm("movcc r2, r2, lsl #4 ");
140 asm("subcc r3, r3, #0x040000 ");
141 asm("cmp r2, #0x40000000 ");
142 asm("movcc r2, r2, lsl #2 ");
143 asm("subcc r3, r3, #0x020000 ");
144 asm("cmp r2, #0x80000000 ");
145 asm("movcc r2, r2, lsl #1 ");
146 asm("subcc r3, r3, #0x010000 ");
148 asm("ConvertIntToTRealX0: ");
149 asm("mov r1, #0 "); // low order word of mantissa = 0
156 __NAKED__ EXPORT_C TInt TRealX::Set(const TInt64& /*anInt*/)
158 Gives this extended precision object a new value taken from
161 @param anInt The 64 bit integer value.
163 @return KErrNone, always.
166 asm("stmfd sp!, {lr} ");
167 asm("ldmia r1, {r1,r2} ");
168 asm("bl ConvertInt64ToTRealX ");
169 asm("stmia r0, {r1,r2,r3} ");
170 asm("mov r0, #0 "); // return KErrNone
177 #ifndef __EABI_CTORS__
178 __NAKED__ EXPORT_C TRealX::TRealX(const TInt64& /*anInt*/)
180 Constructs an extended precision object from a 64 bit integer.
182 @param anInt A reference to a 64 bit integer.
192 __NAKED__ EXPORT_C TRealX& TRealX::operator=(const TInt64& /*anInt*/)
194 Assigns the specified 64 bit integer value to this extended precision object.
196 @param anInt A reference to a 64 bit integer.
198 @return A reference to this extended precision object.
201 asm("stmfd sp!, {lr} ");
202 asm("ldmia r1, {r1,r2} ");
203 asm("bl ConvertInt64ToTRealX ");
204 asm("stmia r0, {r1,r2,r3} ");
207 asm("ConvertInt64ToTRealX: ");
208 asm("movs r3, r2, lsr #31 "); // sign bit into r3 bit 0
209 asm("beq ConvertInt64ToTRealX1 "); // skip if plus
210 asm("rsbs r1, r1, #0 "); // take absolute value
211 asm("rsc r2, r2, #0 ");
212 asm("ConvertInt64ToTRealX1: ");
213 asm("cmp r2, #0 "); // does it fit into 32 bits?
214 asm("moveq r2, r1 "); // if it does, do 32 bit conversion
215 asm("beq ConvertUintToTRealX1 ");
216 #ifdef __CPU_ARM_HAS_CLZ
218 asm("mov r2, r2, lsl r12 ");
219 asm("rsb r12, r12, #32 ");
220 asm("orr r2, r2, r1, lsr r12 ");
221 asm("rsb r12, r12, #32 ");
223 asm("mov r12, #32 "); // 32-number of left-shifts needed to normalise
224 asm("cmp r2, #0x10000 "); // calculate number required
225 asm("movcc r2, r2, lsl #16 ");
226 asm("subcc r12, r12, #16 ");
227 asm("cmp r2, #0x1000000 ");
228 asm("movcc r2, r2, lsl #8 ");
229 asm("subcc r12, r12, #8 ");
230 asm("cmp r2, #0x10000000 ");
231 asm("movcc r2, r2, lsl #4 ");
232 asm("subcc r12, r12, #4 ");
233 asm("cmp r2, #0x40000000 ");
234 asm("movcc r2, r2, lsl #2 ");
235 asm("subcc r12, r12, #2 ");
236 asm("cmp r2, #0x80000000 ");
237 asm("movcc r2, r2, lsl #1 ");
238 asm("subcc r12, r12, #1 "); // r2 is now normalised
239 asm("orr r2, r2, r1, lsr r12 "); // shift r1 left into r2
240 asm("rsb r12, r12, #32 ");
242 asm("mov r1, r1, lsl r12 ");
243 asm("add r3, r3, #0x80000000 "); // exponent = 803E-r12
244 asm("add r3, r3, #0x003E0000 ");
245 asm("sub r3, r3, r12, lsl #16 ");
252 __NAKED__ EXPORT_C TInt TRealX::Set(TUint /*anInt*/)
254 Gives this extended precision object a new value taken from
257 @param The unsigned integer value.
259 @return KErrNone, always.
262 asm("stmfd sp!, {lr} ");
264 asm("bl ConvertUintToTRealX ");
265 asm("stmia r0, {r1,r2,r3} ");
266 asm("mov r0, #0 "); // return KErrNone
273 #ifndef __EABI_CTORS__
274 __NAKED__ EXPORT_C TRealX::TRealX(TUint /*anInt*/)
276 Constructs an extended precision object from an unsigned integer value.
278 @param anInt The unsigned integer value.
288 __NAKED__ EXPORT_C TRealX& TRealX::operator=(TUint /*anInt*/)
290 Assigns the specified unsigned integer value to this extended precision object.
292 @param anInt The unsigned integer value.
294 @return A reference to this extended precision object.
297 asm("stmfd sp!, {lr} ");
299 asm("bl ConvertUintToTRealX ");
300 asm("stmia r0, {r1,r2,r3} ");
303 asm("ConvertUintToTRealX: ");
305 asm("ConvertUintToTRealX1: ");
306 asm("cmp r2, #0 "); // check for zero
307 asm("beq ConvertUintToTRealX0 ");
308 asm("orr r3, r3, #0x001E0000 ");
309 asm("orr r3, r3, #0x80000000 "); // r3=exponent 801E
310 #ifdef __CPU_ARM_HAS_CLZ
312 asm("mov r2, r2, lsl r12 ");
313 asm("sub r3, r3, r12, lsl #16 ");
315 asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
316 asm("movcc r2, r2, lsl #16 ");
317 asm("subcc r3, r3, #0x100000 ");
318 asm("cmp r2, #0x1000000 ");
319 asm("movcc r2, r2, lsl #8 ");
320 asm("subcc r3, r3, #0x080000 ");
321 asm("cmp r2, #0x10000000 ");
322 asm("movcc r2, r2, lsl #4 ");
323 asm("subcc r3, r3, #0x040000 ");
324 asm("cmp r2, #0x40000000 ");
325 asm("movcc r2, r2, lsl #2 ");
326 asm("subcc r3, r3, #0x020000 ");
327 asm("cmp r2, #0x80000000 ");
328 asm("movcc r2, r2, lsl #1 ");
329 asm("subcc r3, r3, #0x010000 ");
331 asm("ConvertUintToTRealX0: ");
332 asm("mov r1, #0 "); // low order word of mantissa = 0
339 __NAKED__ EXPORT_C void TRealX::SetZero(TBool /*aNegative*/)
341 Sets the value of this extended precision object to zero.
343 @param aNegative ETrue, the value is a negative zero;
344 EFalse, the value is a positive zero, this is the default.
349 asm("movne r3, #1 ");
352 asm("stmia r0, {r1,r2,r3} ");
359 __NAKED__ EXPORT_C void TRealX::SetNaN()
361 Sets the value of this extended precision object to 'not a number'.
364 asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
365 asm("mov r2, #0xC0000000 ");
367 asm("stmia r0, {r1,r2,r3} ");
369 asm("__RealIndefiniteExponent: ");
370 asm(".word 0xFFFF0001 ");
377 __NAKED__ EXPORT_C void TRealX::SetInfinite(TBool /*aNegative*/)
379 Sets the value of this extended precision object to infinity.
381 @param aNegative ETrue, the value is a negative zero;
382 EFalse, the value is a positive zero.
385 asm("ldr r3, [pc, #__InfiniteExponent-.-8] ");
387 asm("orrne r3, r3, #1 ");
388 asm("mov r2, #0x80000000 ");
390 asm("stmia r0, {r1,r2,r3} ");
392 asm("__InfiniteExponent: ");
393 asm(".word 0xFFFF0000 ");
399 __NAKED__ EXPORT_C TBool TRealX::IsZero() const
401 Determines whether the extended precision value is zero.
403 @return True, if the extended precision value is zero, false, otherwise.
406 asm("ldr r1, [r0, #8] "); // get exponent word
407 asm("mov r0, #0 "); // default return value is 0
408 asm("cmp r1, #0x10000 "); // is exponent=0 ?
409 asm("movcc r0, #1 "); // if so return 1
416 __NAKED__ EXPORT_C TBool TRealX::IsNaN() const
418 Determines whether the extended precision value is 'not a number'.
420 @return True, if the extended precision value is 'not a number',
424 asm("ldmia r0, {r1,r2,r3} ");
425 asm("mov r0, #0 "); // default return value is 0
426 asm("cmn r3, #0x10000 "); // check for exponent 65535
427 asm("bcc 1f "); // branch if not
428 asm("cmp r2, #0x80000000 "); // check if infinity
429 asm("cmpeq r1, #0 ");
430 asm("movne r0, #1 "); // if not, return 1
438 __NAKED__ EXPORT_C TBool TRealX::IsInfinite() const
440 Determines whether the extended precision value has a finite value.
442 @return True, if the extended precision value is finite,
443 false, if the value is 'not a number' or is infinite,
446 asm("ldmia r0, {r1,r2,r3} ");
447 asm("mov r0, #0 "); // default return value is 0
448 asm("cmn r3, #0x10000 "); // check for exponent 65535
449 asm("bcc 1f "); // branch if not
450 asm("cmp r2, #0x80000000 "); // check if infinity
451 asm("cmpeq r1, #0 ");
452 asm("moveq r0, #1 "); // if it is, return 1
460 __NAKED__ EXPORT_C TBool TRealX::IsFinite() const
462 Determines whether the extended precision value has a finite value.
464 @return True, if the extended precision value is finite,
465 false, if the value is 'not a number' or is infinite,
468 asm("ldr r1, [r0, #8] "); // get exponent word
469 asm("mov r0, #0 "); // default return value is 0
470 asm("cmn r1, #0x10000 "); // is exponent=65535 (infinity or NaN) ?
471 asm("movcc r0, #1 "); // if not return 1
478 #ifndef __EABI_CTORS__
479 __NAKED__ EXPORT_C TRealX::TRealX(TReal32 /*aReal*/) __SOFTFP
481 Constructs an extended precision object from
482 a single precision floating point number.
484 @param aReal The single precision floating point value.
494 __NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal32 /*aReal*/) __SOFTFP
496 Assigns the specified single precision floating point number to
497 this extended precision object.
499 @param aReal The single precision floating point value.
501 @return A reference to this extended precision object.
504 asm("stmfd sp!, {lr} ");
505 asm("bl ConvertTReal32ToTRealX ");
506 asm("stmia r0, {r1,r2,r3} ");
513 __NAKED__ EXPORT_C TInt TRealX::Set(TReal32 /*aReal*/) __SOFTFP
515 Gives this extended precision object a new value taken from
516 a single precision floating point number.
518 @param aReal The single precision floating point value.
520 @return KErrNone, if a valid number;
521 KErrOverflow, if the number is infinite;
522 KErrArgument, if not a number.
525 // aReal is in r1 on entry
526 // sign in bit 31, exponent in 30-23, mantissa (non-integer bits) in 22-0
527 asm("stmfd sp!, {lr} ");
528 asm("bl ConvertTReal32ToTRealX ");
529 asm("stmia r0, {r1,r2,r3} ");
530 asm("cmn r3, #0x10000 "); // check for infinity or NaN
531 asm("movcc r0, #0 "); // if neither, return KErrNone
532 asm("bcc trealx_set_treal32_0 ");
533 asm("cmp r2, #0x80000000 "); // check for infinity
534 asm("mvneq r0, #8 "); // if so, return KErrOverflow
535 asm("mvnne r0, #5 "); // else return KErrArgument
536 asm("trealx_set_treal32_0: ");
539 // Convert 32-bit real in r1 to TRealX in r1,r2,r3
540 // r0 unmodified, r1,r2,r3,r12 modified
541 asm("ConvertTReal32ToTRealX: ");
542 asm("mov r3, r1, lsr #7 "); // r3 bits 16-31 = TReal32 exponent
543 asm("ands r3, r3, #0x00FF0000 ");
544 asm("mov r2, r1, lsl #8 "); // r2 = TReal32 mantissa << 8, bit 31 not yet in
545 asm("orrne r2, r2, #0x80000000 "); // if not zero/denormal, put in implied integer bit
546 asm("orr r3, r3, r1, lsr #31 "); // r3 bit 0 = sign bit
547 asm("mov r1, #0 "); // low word of mantissa = 0
548 asm("beq ConvertTReal32ToTRealX0 "); // branch if zero/denormal
549 asm("cmp r3, #0x00FF0000 "); // check for infinity or NaN
550 asm("orrcs r3, r3, #0xFF000000 "); // if infinity or NaN, exponent = FFFF
551 asm("addcc r3, r3, #0x7F000000 "); // else exponent = TReal32 exponent + 7F80
552 asm("addcc r3, r3, #0x00800000 ");
554 asm("ConvertTReal32ToTRealX0: "); // come here if zero or denormal
555 asm("adds r2, r2, r2 "); // shift mantissa left one more and check if zero
557 asm("add r3, r3, #0x7F000000 "); // else exponent = 7F80 (highest denormal exponent)
558 asm("add r3, r3, #0x00800000 ");
559 #ifdef __CPU_ARM_HAS_CLZ
561 asm("mov r2, r2, lsl r12 ");
562 asm("sub r3, r3, r12, lsl #16 ");
564 asm("cmp r2, #0x10000 "); // normalise mantissa, decrementing exponent as needed
565 asm("movcc r2, r2, lsl #16 ");
566 asm("subcc r3, r3, #0x100000 ");
567 asm("cmp r2, #0x1000000 ");
568 asm("movcc r2, r2, lsl #8 ");
569 asm("subcc r3, r3, #0x080000 ");
570 asm("cmp r2, #0x10000000 ");
571 asm("movcc r2, r2, lsl #4 ");
572 asm("subcc r3, r3, #0x040000 ");
573 asm("cmp r2, #0x40000000 ");
574 asm("movcc r2, r2, lsl #2 ");
575 asm("subcc r3, r3, #0x020000 ");
576 asm("cmp r2, #0x80000000 ");
577 asm("movcc r2, r2, lsl #1 ");
578 asm("subcc r3, r3, #0x010000 ");
586 #ifndef __EABI_CTORS__
587 __NAKED__ EXPORT_C TRealX::TRealX(TReal64 /*aReal*/) __SOFTFP
589 Constructs an extended precision object from
590 a double precision floating point number.
592 @param aReal The double precision floating point value.
602 __NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal64 /*aReal*/) __SOFTFP
604 Assigns the specified double precision floating point number to
605 this extended precision object.
607 @param aReal The double precision floating point value.
609 @return A reference to this extended precision object.
612 asm("stmfd sp!, {lr} ");
613 asm("bl ConvertTReal64ToTRealX ");
614 asm("stmia r0, {r1,r2,r3} ");
621 __NAKED__ EXPORT_C TInt TRealX::Set(TReal64 /*aReal*/) __SOFTFP
623 Gives this extended precision object a new value taken from
624 a double precision floating point number.
626 @param aReal The double precision floating point value.
628 @return KErrNone, if a valid number;
629 KErrOverflow, if the number is infinite;
630 KErrArgument, if not a number.
633 // aReal is in r1,r2 on entry
634 // sign in bit 31 of r1, exponent in 30-20 of r1
635 // mantissa (non-integer bits) in 19-0 of r1 (high) and r2 (low)
636 asm("stmfd sp!, {lr} ");
637 asm("bl ConvertTReal64ToTRealX ");
638 asm("stmia r0, {r1,r2,r3} ");
639 asm("cmn r3, #0x10000 "); // check for infinity or NaN
640 asm("movcc r0, #0 "); // if neither, return KErrNone
641 asm("bcc trealx_set_treal64_0 ");
642 asm("cmp r2, #0x80000000 "); // check for infinity
643 asm("cmpeq r1, #0 ");
644 asm("mvneq r0, #8 "); // if so, return KErrOverflow
645 asm("mvnne r0, #5 "); // else return KErrArgument
646 asm("trealx_set_treal64_0: ");
649 // convert TReal64 in r1,r2 in GCC and r2 and r3 in RVCT
650 // if __DOUBLE_WORDS_SWAPPED__ r1=sign,exp,high mant, r2=low mant
651 // else r1 unused , r2=low mant, r3=sign,exp,high mant (as a result of EABI alignment reqs)
652 // into TRealX in r1,r2,r3 (r2,r1=mant high,low r3=exp,flag,sign)
653 // r0 unmodified, r1,r2,r3,r12 modified
654 asm("ConvertTReal64ToTRealX: ");
655 #ifdef __DOUBLE_WORDS_SWAPPED__
656 asm("mov r12, r2 "); // ls word of mantissa into r12
658 asm("mov r12, r2 "); // ls word of mantissa into r12
661 asm("mov r3, r1, lsr #20 "); // sign and exp into bottom 12 bits of r3
662 asm("mov r2, r1, lsl #11 "); // left justify mantissa in r2,r1
663 asm("mov r3, r3, lsl #16 "); // and into bits 16-27
664 asm("bics r3, r3, #0x08000000 "); // remove sign, leaving exponent in bits 16-26
665 asm("orr r2, r2, r12, lsr #21 ");
666 asm("orrne r2, r2, #0x80000000 "); // if not zero/denormal, put in implied integer bit
667 asm("orr r3, r3, r1, lsr #31 "); // sign bit into bit 0 of r3
668 asm("mov r1, r12, lsl #11 ");
669 asm("beq ConvertTReal64ToTRealX0 "); // branch if zero or denormal
670 asm("mov r12, r3, lsl #5 "); // exponent into bits 21-31 of r12
671 asm("cmn r12, #0x00200000 "); // check if exponent=7FF (infinity or NaN)
672 asm("addcs r3, r3, #0xF8000000 "); // if so, result exponent=FFFF
673 asm("addcc r3, r3, #0x7C000000 "); // else result exponent = TReal64 exponent + 7C00
675 asm("ConvertTReal64ToTRealX0: "); // come here if zero or denormal
676 asm("adds r1, r1, r1 "); // shift mantissa left one more bit
677 asm("adcs r2, r2, r2 ");
678 asm("cmpeq r1, #0 "); // and test for zero
680 asm("add r3, r3, #0x7C000000 "); // else exponent=7C00 (highest denormal exponent)
681 asm("cmp r2, #0 "); // normalise - first check if r2=0
682 asm("moveq r2, r1 "); // if so, shift up by 32
683 asm("moveq r1, #0 ");
684 asm("subeq r3, r3, #0x200000 "); // and subtract 32 from exponent
685 #ifdef __CPU_ARM_HAS_CLZ
687 asm("mov r2, r2, lsl r12 ");
688 asm("rsb r12, r12, #32 ");
689 asm("orr r2, r2, r1, lsr r12 ");
690 asm("rsb r12, r12, #32 ");
692 asm("mov r12, #32 "); // 32-number of left-shifts needed to normalise
693 asm("cmp r2, #0x10000 "); // calculate number required
694 asm("movcc r2, r2, lsl #16 ");
695 asm("subcc r12, r12, #16 ");
696 asm("cmp r2, #0x1000000 ");
697 asm("movcc r2, r2, lsl #8 ");
698 asm("subcc r12, r12, #8 ");
699 asm("cmp r2, #0x10000000 ");
700 asm("movcc r2, r2, lsl #4 ");
701 asm("subcc r12, r12, #4 ");
702 asm("cmp r2, #0x40000000 ");
703 asm("movcc r2, r2, lsl #2 ");
704 asm("subcc r12, r12, #2 ");
705 asm("cmp r2, #0x80000000 ");
706 asm("movcc r2, r2, lsl #1 ");
707 asm("subcc r12, r12, #1 "); // r2 is now normalised
708 asm("orr r2, r2, r1, lsr r12 "); // shift r1 left into r2
709 asm("rsb r12, r12, #32 ");
711 asm("mov r1, r1, lsl r12 ");
712 asm("sub r3, r3, r12, lsl #16 "); // exponent -= number of left shifts
720 __NAKED__ EXPORT_C TRealX::operator TInt() const
722 Gets the extended precision value as a signed integer value.
724 The operator returns:
726 1. zero , if the extended precision value is not a number
728 2. 0x7FFFFFFF, if the value is positive and too big to fit into a TInt.
730 3. 0x80000000, if the value is negative and too big to fit into a TInt.
733 asm("ldmia r0, {r1,r2,r3} "); // get value into r1,r2,r3
735 asm("ConvertTRealXToInt: ");
736 asm("mov r12, #0x8000 "); // r12=0x801E
737 asm("orr r12, r12, #0x001E ");
738 asm("subs r12, r12, r3, lsr #16 "); // r12=801E-exponent
739 asm("bls ConvertTRealXToInt1 "); // branch if exponent>=801E
740 asm("cmp r12, #31 "); // test if exponent<7FFF
741 asm("movhi r0, #0 "); // if so, underflow result to zero
743 asm("mov r0, r2, lsr r12 "); // shift mantissa right to form integer
744 asm("tst r3, #1 "); // check sign bit
745 asm("rsbne r0, r0, #0 "); // if negative, r0=-r0
747 asm("ConvertTRealXToInt1: ");
748 asm("cmn r3, #0x10000 "); // check for infinity or NaN
749 asm("bcc ConvertTRealXToInt2 "); // branch if neither
750 asm("cmp r2, #0x80000000 "); // check for infinity
751 asm("cmpeq r1, #0 ");
752 asm("movne r0, #0 "); // if NaN, return 0
754 asm("ConvertTRealXToInt2: ");
755 asm("mov r0, #0x80000000 "); // return 0x80000000 if -ve overflow, 0x7FFFFFFF if +ve
756 asm("movs r3, r3, lsr #1 ");
757 asm("sbc r0, r0, #0 ");
764 __NAKED__ EXPORT_C TRealX::operator TUint() const
766 Returns the extended precision value as an unsigned signed integer value.
768 The operator returns:
770 1. zero, if the extended precision value is not a number
772 2. 0xFFFFFFFF, if the value is positive and too big to fit into a TUint.
774 3. zero, if the value is negative and too big to fit into a TUint.
777 asm("ldmia r0, {r1,r2,r3} "); // get value into r1,r2,r3
779 asm("ConvertTRealXToUint: ");
780 asm("mov r12, #0x8000 "); // r12=0x801E
781 asm("orr r12, r12, #0x001E ");
782 asm("subs r12, r12, r3, lsr #16 "); // r12=801E-exponent
783 asm("bcc ConvertTRealXToUint1 "); // branch if exponent>801E
784 asm("cmp r12, #31 "); // test if exponent<7FFF
785 asm("movhi r0, #0 "); // if so, underflow result to zero
787 asm("tst r3, #1 "); // check sign bit
788 asm("moveq r0, r2, lsr r12 "); // if +ve, shift mantissa right to form integer
789 asm("movne r0, #0 "); // if negative, r0=0
791 asm("ConvertTRealXToUint1: ");
792 asm("mov r0, #0 "); // r0=0 initially
793 asm("cmn r3, #0x10000 "); // check for infinity or NaN
794 asm("bcc ConvertTRealXToUint2 "); // branch if neither
795 asm("cmp r2, #0x80000000 "); // check for infinity
796 asm("cmpeq r1, #0 ");
798 asm("ConvertTRealXToUint2: ");
799 asm("movs r3, r3, lsr #1 "); // sign bit into carry
800 asm("sbc r0, r0, #0 "); // r0=0 if -ve, 0xFFFFFFFF if +ve
807 __NAKED__ EXPORT_C TRealX::operator TInt64() const
809 Returns the extended precision value as a 64 bit integer value.
811 The operator returns:
813 1. zero, if the extended precision value is not a number
815 2. 0x7FFFFFFF FFFFFFFF, if the value is positive and too big to fit
818 3. 0x80000000 00000000, if the value is negative and too big to fit
822 // r0 = this, result in r1:r0
823 asm("ldmia r0, {r0,r1,r2} "); // get value into r0,r1,r2
824 asm("ConvertTRealXToInt64: ");
825 asm("mov r3, #0x8000 "); // r3=0x803E
826 asm("orr r3, r3, #0x003E ");
827 asm("subs r3, r3, r2, lsr #16 "); // r3=803E-exponent
828 asm("bls ConvertTRealXToInt64a "); // branch if exponent>=803E
829 asm("cmp r3, #63 "); // test if exponent<7FFF
830 asm("movhi r1, #0 "); // if so, underflow result to zero
831 asm("movhi r0, #0 ");
833 asm("cmp r3, #32 "); // >=32 shifts required?
834 asm("subcs r3, r3, #32 "); // if so, r3-=32
835 asm("movcs r0, r1, lsr r3 "); // r1:r0 >>= (r3+32)
836 asm("movcs r1, #0 ");
837 asm("movcc r0, r0, lsr r3 "); // else r1:r0>>=r3
838 asm("rsbcc r3, r3, #32 ");
839 asm("orrcc r0, r0, r1, lsl r3 ");
840 asm("rsbcc r3, r3, #32 ");
841 asm("movcc r1, r1, lsr r3 "); // r1:r0 = absolute integer
842 asm("tst r2, #1 "); // check sign bit
844 asm("rsbs r0, r0, #0 "); // else negate answer
845 asm("rsc r1, r1, #0 ");
847 asm("ConvertTRealXToInt64a: ");
848 asm("cmn r2, #0x10000 "); // check for infinity or NaN
849 asm("bcc ConvertTRealXToInt64b "); // branch if neither
850 asm("cmp r1, #0x80000000 "); // check for infinity
851 asm("cmpeq r0, #0 ");
852 asm("movne r1, #0 "); // if NaN, return 0
853 asm("movne r0, #0 ");
855 asm("ConvertTRealXToInt64b: ");
856 asm("mov r1, #0x80000000 "); // return KMaxTInt64/KMinTInt64 depending on sign
858 asm("movs r2, r2, lsr #1 ");
859 asm("sbcs r0, r0, #0 ");
860 asm("sbc r1, r1, #0 ");
867 __NAKED__ EXPORT_C TRealX::operator TReal32() const __SOFTFP
869 Returns the extended precision value as
870 a single precision floating point value.
873 asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
875 // Convert TRealX in r1,r2,r3 to TReal32 in r0
876 asm("ConvertTRealXToTReal32: ");
877 asm("mov r12, #0x8000 ");
878 asm("orr r12, r12, #0x007F "); // r12=0x807F
879 asm("cmp r3, r12, lsl #16 "); // check if exponent>=807F
880 asm("bcs ConvertTRealXToTReal32a "); // branch if it is
881 asm("sub r12, r12, #0x00FF "); // r12=0x7F80
882 asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7F80 = result exponent if in range
883 asm("bgt ConvertTRealXToTReal32b "); // branch if normalised result
884 asm("cmn r12, #23 "); // check for total underflow or zero
885 asm("movlt r0, r3, lsl #31 "); // in this case, return zero with appropriate sign
887 asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
888 asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
889 asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
890 asm("orrne r3, r3, #0x100 ");
891 asm("rsb r0, r12, #32 ");
892 asm("mov r1, r1, lsr r0 ");
893 asm("orr r1, r1, r2, lsl r12 ");
894 asm("mov r2, r2, lsr r0 "); // r2 top 24 bits now give unrounded result mantissa
895 asm("mov r12, #0 "); // result exponent will be zero
896 asm("ConvertTRealXToTReal32b: ");
897 asm("movs r0, r2, lsl #24 "); // top 8 truncated bits into top byte of r0
898 asm("bpl ConvertTRealXToTReal32c "); // if top bit clear, truncate
899 asm("cmp r0, #0x80000000 ");
900 asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
901 asm("bhi ConvertTRealXToTReal32d "); // if >, round up
902 asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
903 asm("bcs ConvertTRealXToTReal32c "); // if rounded up, truncate
904 asm("bmi ConvertTRealXToTReal32d "); // if rounded down, round up
905 asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
906 asm("beq ConvertTRealXToTReal32c "); // if zero, truncate, else round up
907 asm("ConvertTRealXToTReal32d: "); // come here to round up
908 asm("adds r2, r2, #0x100 "); // increment the mantissa
909 asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
910 asm("addcs r12, r12, #1 "); // and increment exponent
911 asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
912 asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
913 asm("ConvertTRealXToTReal32c: "); // come here to truncate
914 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
915 asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
916 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
917 asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
919 asm("ConvertTRealXToTReal32a: "); // come here if overflow, infinity or NaN
920 asm("cmn r3, #0x10000 "); // check for infinity or NaN
921 asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
922 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
923 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
924 asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
925 asm("orr r0, r0, #0x00800000 ");
926 asm("orr r0, r0, r2, lsr #8 "); // r0 bits 0-22 = result mantissa
933 __NAKED__ EXPORT_C TRealX::operator TReal64() const __SOFTFP
935 Returns the extended precision value as
936 a double precision floating point value.
939 asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
941 // Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
942 // if __DOUBLE_WORDS_SWAPPED__ r0=sign,exp,high mant, r1=low mant
943 // else r0, r1 reversed
944 asm("ConvertTRealXToTReal64: ");
945 asm("mov r12, #0x8300 ");
946 asm("orr r12, r12, #0x00FF "); // r12=0x83FF
947 asm("cmp r3, r12, lsl #16 "); // check if exponent>=83FF
948 asm("bcs ConvertTRealXToTReal64a "); // branch if it is
949 asm("mov r12, #0x7C00 ");
950 asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7C00 = result exponent if in range
951 asm("bgt ConvertTRealXToTReal64b "); // branch if normalised result
952 asm("cmn r12, #52 "); // check for total underflow or zero
953 asm("movlt r0, r3, lsl #31 "); // in this case, return zero with appropriate sign
954 asm("movlt r1, #0 ");
955 asm("blt ConvertTRealXToTReal64_end ");
957 asm("adds r12, r12, #31 "); // check if >=32 shifts needed, r12=32-shift count
958 asm("ble ConvertTRealXToTReal64e "); // branch if >=32 shifts needed
959 asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
960 asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
961 asm("orrne r3, r3, #0x100 ");
962 asm("rsb r0, r12, #32 "); // r0=shift count
963 asm("mov r1, r1, lsr r0 ");
964 asm("orr r1, r1, r2, lsl r12 ");
965 asm("mov r2, r2, lsr r0 "); // r2:r1 top 53 bits = unrounded result mantissa
966 asm("b ConvertTRealXToTReal64f ");
967 asm("ConvertTRealXToTReal64e: ");
968 asm("add r12, r12, #32 "); // r12=64-shift count
969 asm("cmp r1, #0 "); // r1 bits are all lost - test them
970 asm("moveqs r0, r2, lsl r12 "); // if zero, test lost bits from r2
971 asm("bicne r3, r3, #0x300 "); // if lost bits not all zero, set rounded down flag
972 asm("orrne r3, r3, #0x100 ");
973 asm("rsb r0, r12, #32 "); // r0=shift count-32
974 asm("mov r1, r2, lsr r0 "); // shift r2:r1 right
976 asm("ConvertTRealXToTReal64f: ");
977 asm("mov r12, #0 "); // result exponent will be zero for denormals
978 asm("ConvertTRealXToTReal64b: ");
979 asm("movs r0, r1, lsl #21 "); // 11 rounding bits to top of r0
980 asm("bpl ConvertTRealXToTReal64c "); // if top bit clear, truncate
981 asm("cmp r0, #0x80000000 "); // compare rounding bits to 10000000000
982 asm("bhi ConvertTRealXToTReal64d "); // if >, round up
983 asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
984 asm("bcs ConvertTRealXToTReal64c "); // if rounded up, truncate
985 asm("bmi ConvertTRealXToTReal64d "); // if rounded down, round up
986 asm("tst r1, #0x800 "); // else round to even - test LSB of result mantissa
987 asm("beq ConvertTRealXToTReal64c "); // if zero, truncate, else round up
988 asm("ConvertTRealXToTReal64d: "); // come here to round up
989 asm("adds r1, r1, #0x800 "); // increment the mantissa
990 asm("adcs r2, r2, #0 ");
991 asm("movcs r2, #0x80000000 "); // if carry, mantissa=10000...0
992 asm("addcs r12, r12, #1 "); // and increment exponent
993 asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
994 asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
995 asm("ConvertTRealXToTReal64c: "); // come here to truncate
996 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
997 asm("orr r0, r0, r12, lsl #20 "); // exponent into r0 bits 20-30
998 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
999 asm("orr r0, r0, r2, lsr #11 "); // non-integer mantissa bits into r0 bits 0-19
1000 asm("mov r1, r1, lsr #11 "); // and r1
1001 asm("orr r1, r1, r2, lsl #21 ");
1002 asm("b ConvertTRealXToTReal64_end ");
1004 asm("ConvertTRealXToTReal64a: "); // come here if overflow, infinity or NaN
1005 asm("cmn r3, #0x10000 "); // check for infinity or NaN
1006 asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1007 asm("movcc r1, #0 ");
1008 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1009 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1010 asm("orr r0, r0, #0x7F000000 "); // r0 bits 20-30 = 7FF = exponent
1011 asm("orr r0, r0, #0x00F00000 ");
1012 asm("orr r0, r0, r2, lsr #11 "); // r0 bits 0-19 = result mantissa high bits
1013 asm("mov r1, r1, lsr #11 "); // and r1=result mantissa low bits
1014 asm("orr r1, r1, r2, lsl #21 ");
1015 asm("ConvertTRealXToTReal64_end: ");
1016 #ifndef __DOUBLE_WORDS_SWAPPED__
1027 __NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal32& /*aVal*/) const
1029 Extracts the extended precision value as
1030 a single precision floating point value.
1032 @param aVal A reference to a single precision object which contains
1033 the result of the operation.
1035 @return KErrNone, if the operation is successful;
1036 KErrOverflow, if the operation results in overflow;
1037 KErrUnderflow, if the operation results in underflow.
1040 asm("stmfd sp!, {r4,lr} ");
1042 asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1043 asm("bl TRealXGetTReal32 ");
1044 asm("str r0, [r4] "); // store converted TReal32
1045 asm("mov r0, r12 "); // return value into r0
1048 // Convert TRealX in r1,r2,r3 to TReal32 in r0
1049 // Return error code in r12
1050 // r0-r3, r12 modified
1051 asm("TRealXGetTReal32: ");
1052 asm("mov r12, #0x8000 ");
1053 asm("orr r12, r12, #0x007F "); // r12=0x807F
1054 asm("cmp r3, r12, lsl #16 "); // check if exponent>=807F
1055 asm("bcs TRealXGetTReal32a "); // branch if it is
1056 asm("sub r12, r12, #0x00FF "); // r12=0x7F80
1057 asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7F80 = result exponent if in range
1058 asm("bgt TRealXGetTReal32b "); // branch if normalised result
1059 asm("cmn r12, #23 "); // check for total underflow or zero
1060 asm("bge TRealXGetTReal32e "); // skip if not
1061 asm("mov r0, r3, lsl #31 "); // else return zero with appropriate sign
1063 asm("cmp r3, #0x10000 "); // check for zero
1064 asm("movcc r12, #0 "); // if zero return KErrNone
1065 asm("mvncs r12, #9 "); // else return KErrUnderflow
1067 asm("TRealXGetTReal32e: ");
1068 asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
1069 asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1070 asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1071 asm("orrne r3, r3, #0x100 ");
1072 asm("rsb r0, r12, #32 ");
1073 asm("mov r1, r1, lsr r0 ");
1074 asm("orr r1, r1, r2, lsl r12 ");
1075 asm("mov r2, r2, lsr r0 "); // r2 top 24 bits now give unrounded result mantissa
1076 asm("mov r12, #0 "); // result exponent will be zero
1077 asm("TRealXGetTReal32b: ");
1078 asm("movs r0, r2, lsl #24 "); // top 8 truncated bits into top byte of r0
1079 asm("bpl TRealXGetTReal32c "); // if top bit clear, truncate
1080 asm("cmp r0, #0x80000000 ");
1081 asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
1082 asm("bhi TRealXGetTReal32d "); // if >, round up
1083 asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1084 asm("bcs TRealXGetTReal32c "); // if rounded up, truncate
1085 asm("bmi TRealXGetTReal32d "); // if rounded down, round up
1086 asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
1087 asm("beq TRealXGetTReal32c "); // if zero, truncate, else round up
1088 asm("TRealXGetTReal32d: "); // come here to round up
1089 asm("adds r2, r2, #0x100 "); // increment the mantissa
1090 asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
1091 asm("addcs r12, r12, #1 "); // and increment exponent
1092 asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1093 asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1094 asm("TRealXGetTReal32c: "); // come here to truncate
1095 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1096 asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
1097 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1098 asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
1099 asm("cmp r12, #0xFF "); // check for overflow
1100 asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
1102 asm("bics r1, r0, #0x80000000 "); // check for underflow
1103 asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
1104 asm("movne r12, #0 "); // else return KErrNone
1106 asm("TRealXGetTReal32a: "); // come here if overflow, infinity or NaN
1107 asm("cmn r3, #0x10000 "); // check for infinity or NaN
1108 asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1109 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1110 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1111 asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
1112 asm("orr r0, r0, #0x00800000 ");
1113 asm("orr r0, r0, r2, lsr #8 "); // r0 bits 0-22 = result mantissa
1114 asm("movs r12, r0, lsl #9 "); // check if result is infinity or NaN
1115 asm("mvneq r12, #8 "); // if infinity return KErrOverflow
1116 asm("mvnne r12, #5 "); // else return KErrArgument
1123 __NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal64& /*aVal*/) const
1125 Extracts the extended precision value as
1126 a double precision floating point value.
1128 @param aVal A reference to a double precision object which
1129 contains the result of the operation.
1131 @return KErrNone, if the operation is successful;
1132 KErrOverflow, if the operation results in overflow;
1133 KErrUnderflow, if the operation results in underflow.
1136 asm("stmfd sp!, {r4,lr} ");
1138 asm("ldmia r0, {r1,r2,r3} "); // r1,r2,r3=input value
1139 asm("bl TRealXGetTReal64 ");
1140 asm("stmia r4, {r0,r1} "); // store converted TReal64
1141 asm("mov r0, r12 "); // return value into r0
1144 // Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
1145 // Return error code in r12
1146 // r0-r3, r12 modified
1147 asm("TRealXGetTReal64: ");
1148 asm("mov r12, #0x8300 ");
1149 asm("orr r12, r12, #0x00FF "); // r12=0x83FF
1150 asm("cmp r3, r12, lsl #16 "); // check if exponent>=83FF
1151 asm("bcs TRealXGetTReal64a "); // branch if it is
1152 asm("mov r12, #0x7C00 ");
1153 asm("rsbs r12, r12, r3, lsr #16 "); // r12=exp in - 7C00 = result exponent if in range
1154 asm("bgt TRealXGetTReal64b "); // branch if normalised result
1155 asm("cmn r12, #52 "); // check for total underflow or zero
1156 asm("bge TRealXGetTReal64g "); // skip if not
1157 asm("mov r0, r3, lsl #31 "); // else return zero with appropriate sign
1159 asm("cmp r3, #0x10000 "); // check for zero
1160 asm("movcc r12, #0 "); // if zero return KErrNone
1161 asm("mvncs r12, #9 "); // else return KErrUnderflow
1162 asm("b TRealXGetTReal64_end ");
1164 asm("TRealXGetTReal64g: ");
1165 asm("adds r12, r12, #31 "); // check if >=32 shifts needed, r12=32-shift count
1166 asm("ble TRealXGetTReal64e "); // branch if >=32 shifts needed
1167 asm("movs r0, r1, lsl r12 "); // r0=lost bits when r2:r1 is shifted
1168 asm("bicne r3, r3, #0x300 "); // if these are not zero, set rounded down flag
1169 asm("orrne r3, r3, #0x100 ");
1170 asm("rsb r0, r12, #32 "); // r0=shift count
1171 asm("mov r1, r1, lsr r0 ");
1172 asm("orr r1, r1, r2, lsl r12 ");
1173 asm("mov r2, r2, lsr r0 "); // r2:r1 top 53 bits = unrounded result mantissa
1174 asm("b TRealXGetTReal64f ");
1175 asm("TRealXGetTReal64e: ");
1176 asm("add r12, r12, #32 "); // r12=64-shift count
1177 asm("cmp r1, #0 "); // r1 bits are all lost - test them
1178 asm("moveqs r0, r2, lsl r12 "); // if zero, test lost bits from r2
1179 asm("bicne r3, r3, #0x300 "); // if lost bits not all zero, set rounded down flag
1180 asm("orrne r3, r3, #0x100 ");
1181 asm("rsb r0, r12, #32 "); // r0=shift count-32
1182 asm("mov r1, r2, lsr r0 "); // shift r2:r1 right
1184 asm("TRealXGetTReal64f: ");
1185 asm("mov r12, #0 "); // result exponent will be zero for denormals
1186 asm("TRealXGetTReal64b: ");
1187 asm("movs r0, r1, lsl #21 "); // 11 rounding bits to top of r0
1188 asm("bpl TRealXGetTReal64c "); // if top bit clear, truncate
1189 asm("cmp r0, #0x80000000 "); // compare rounding bits to 10000000000
1190 asm("bhi TRealXGetTReal64d "); // if >, round up
1191 asm("movs r0, r3, lsl #23 "); // round up flag into C, round down flag into N
1192 asm("bcs TRealXGetTReal64c "); // if rounded up, truncate
1193 asm("bmi TRealXGetTReal64d "); // if rounded down, round up
1194 asm("tst r1, #0x800 "); // else round to even - test LSB of result mantissa
1195 asm("beq TRealXGetTReal64c "); // if zero, truncate, else round up
1196 asm("TRealXGetTReal64d: "); // come here to round up
1197 asm("adds r1, r1, #0x800 "); // increment the mantissa
1198 asm("adcs r2, r2, #0 ");
1199 asm("movcs r2, #0x80000000 "); // if carry, mantissa=10000...0
1200 asm("addcs r12, r12, #1 "); // and increment exponent
1201 asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
1202 asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
1203 asm("TRealXGetTReal64c: "); // come here to truncate
1204 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1205 asm("orr r0, r0, r12, lsl #20 "); // exponent into r0 bits 20-30
1206 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1207 asm("orr r0, r0, r2, lsr #11 "); // non-integer mantissa bits into r0 bits 0-19
1208 asm("mov r1, r1, lsr #11 "); // and r1
1209 asm("orr r1, r1, r2, lsl #21 ");
1210 asm("add r12, r12, #1 ");
1211 asm("cmp r12, #0x800 "); // check for overflow
1212 asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
1213 asm("beq TRealXGetTReal64_end ");
1215 asm("bics r12, r0, #0x80000000 "); // check for underflow
1216 asm("cmpeq r1, #0 ");
1217 asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
1218 asm("movne r12, #0 "); // else return KErrNone
1219 asm("b TRealXGetTReal64_end ");
1221 asm("TRealXGetTReal64a: "); // come here if overflow, infinity or NaN
1222 asm("cmn r3, #0x10000 "); // check for infinity or NaN
1223 asm("movcc r2, #0 "); // if not, set mantissa to 0 for infinity result
1224 asm("movcc r1, #0 ");
1225 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
1226 asm("mov r0, r3, lsl #31 "); // r0 bit 31 = sign bit
1227 asm("orr r0, r0, #0x7F000000 "); // r0 bits 20-30 = 7FF = exponent
1228 asm("orr r0, r0, #0x00F00000 ");
1229 asm("orr r0, r0, r2, lsr #11 "); // r0 bits 0-19 = result mantissa high bits
1230 asm("mov r1, r1, lsr #11 "); // and r1=result mantissa low bits
1231 asm("orr r1, r1, r2, lsl #21 ");
1232 asm("movs r12, r0, lsl #12 "); // check if result is infinity or NaN
1233 asm("cmpeq r1, #0 ");
1234 asm("mvneq r12, #8 "); // if infinity return KErrOverflow
1235 asm("mvnne r12, #5 "); // else return KErrArgument
1236 asm("TRealXGetTReal64_end: ");
1237 #ifndef __DOUBLE_WORDS_SWAPPED__
1248 __NAKED__ EXPORT_C TRealX TRealX::operator+() const
1250 Returns this extended precision number unchanged.
1252 Note that this may also be referred to as a unary plus operator.
1254 @return The extended precision number.
1257 asm("ldmia r1, {r2,r3,r12} ");
1258 asm("stmia r0, {r2,r3,r12} ");
1265 __NAKED__ EXPORT_C TRealX TRealX::operator-() const
1267 Negates this extended precision number.
1269 This may also be referred to as a unary minus operator.
1271 @return The negative of the extended precision number.
1274 asm("ldmia r1, {r2,r3,r12} ");
1275 asm("eor r12, r12, #1 "); // unary - changes sign bit
1276 asm("stmia r0, {r2,r3,r12} ");
1283 __NAKED__ EXPORT_C TRealX::TRealXOrder TRealX::Compare(const TRealX& /*aVal*/) const
1287 asm("stmfd sp!, {r4,r5,r6,lr} ");
1288 asm("ldmia r1, {r4,r5,r6} ");
1289 asm("ldmia r0, {r1,r2,r3} ");
1290 asm("bl TRealXCompare ");
1293 // Compare TRealX in r1,r2,r3 to TRealX in r4,r5,r6
1294 // Return TRealXOrder result in r0
1295 asm("TRealXCompare: ");
1296 asm("cmn r3, #0x10000 "); // check for NaNs/infinity
1297 asm("bcs TRealXCompare1 ");
1298 asm("TRealXCompare6: "); // will come back here if infinity
1299 asm("cmn r6, #0x10000 ");
1300 asm("bcs TRealXCompare2 ");
1301 asm("TRealXCompare7: "); // will come back here if infinity
1302 asm("cmp r3, #0x10000 "); // check for zeros
1303 asm("bcc TRealXCompare3 ");
1304 asm("cmp r6, #0x10000 ");
1305 asm("bcc TRealXCompare4 ");
1306 asm("mov r12, r6, lsl #31 ");
1307 asm("cmp r12, r3, lsl #31 "); // compare signs
1308 asm("movne r0, #4 ");
1309 asm("bne TRealXCompare5 "); // branch if signs different
1310 asm("mov r12, r3, lsr #16 "); // r12=first exponent
1311 asm("cmp r12, r6, lsr #16 "); // compare exponents
1312 asm("cmpeq r2, r5 "); // if equal compare high words of mantissa
1313 asm("cmpeq r1, r4 "); // if equal compare low words of mantissa
1314 asm("moveq r0, #2 "); // if equal return 2
1316 asm("movhi r0, #4 "); // r0=4 if first exp bigger
1317 asm("movcc r0, #1 "); // else r0=1
1318 asm("TRealXCompare5: ");
1319 asm("tst r3, #1 "); // if signs negative
1320 asm("eorne r0, r0, #5 "); // then switch 1 and 4
1322 asm("TRealXCompare3: "); // first operand zero
1323 asm("cmp r6, #0x10000 "); // check if second also zero
1324 asm("movcc r0, #2 "); // if so, return 2
1326 asm("tst r6, #1 "); // else check sign of operand 2
1327 asm("moveq r0, #1 "); // if +, return 1
1328 asm("movne r0, #4 "); // else return 4
1330 asm("TRealXCompare4: "); // second operand zero, first nonzero
1331 asm("tst r3, #1 "); // check sign of operand 1
1332 asm("moveq r0, #4 "); // if +, return 4
1333 asm("movne r0, #1 "); // else return 1
1335 asm("TRealXCompare1: "); // first operand NaN or infinity
1336 asm("cmp r2, #0x80000000 "); // check for infinity
1337 asm("cmpeq r1, #0 ");
1338 asm("beq TRealXCompare6 "); // if infinity, can handle normally
1339 asm("mov r0, #8 "); // if NaN, return 8 (unordered)
1341 asm("TRealXCompare2: "); // second operand NaN or infinity
1342 asm("cmp r5, #0x80000000 "); // check for infinity
1343 asm("cmpeq r4, #0 ");
1344 asm("beq TRealXCompare7 "); // if infinity, can handle normally
1345 asm("mov r0, #8 "); // if NaN, return 8 (unordered)
1352 __NAKED__ EXPORT_C TInt TRealX::SubEq(const TRealX& /*aVal*/)
1354 Subtracts an extended precision value from this extended precision number.
1356 @param aVal The extended precision value to be subtracted.
1358 @return KErrNone, if the operation is successful;
1359 KErrOverflow, if the operation results in overflow;
1360 KErrUnderflow, if the operation results in underflow.
1363 asm("stmfd sp!, {r0,r4-r8,lr} ");
1364 asm("ldmia r1, {r4,r5,r6} ");
1365 asm("ldmia r0, {r1,r2,r3} ");
1366 asm("bl TRealXSubtract ");
1367 asm("ldmfd sp!, {r0,r4-r8,lr} ");
1368 asm("stmia r0, {r1,r2,r3} ");
1369 asm("mov r0, r12 ");
1376 __NAKED__ EXPORT_C TInt TRealX::AddEq(const TRealX& /*aVal*/)
1378 Adds an extended precision value to this extended precision number.
1380 @param aVal The extended precision value to be added.
1382 @return KErrNone, if the operation is successful;
1383 KErrOverflow,if the operation results in overflow;
1384 KErrUnderflow, if the operation results in underflow.
1387 asm("stmfd sp!, {r0,r4-r8,lr} ");
1388 asm("ldmia r1, {r4,r5,r6} ");
1389 asm("ldmia r0, {r1,r2,r3} ");
1390 asm("bl TRealXAdd ");
1391 asm("ldmfd sp!, {r0,r4-r8,lr} ");
1392 asm("stmia r0, {r1,r2,r3} ");
1393 asm("mov r0, r12 ");
1396 // TRealX subtraction r1,r2,r3 - r4,r5,r6 result in r1,r2,r3
1397 // Error code returned in r12
1398 // Registers r0-r8,r12 modified
1399 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1400 asm("TRealXSubtract: ");
1401 asm("eor r6, r6, #1 "); // negate second operand and add
1403 // TRealX addition r1,r2,r3 + r4,r5,r6 result in r1,r2,r3
1404 // Error code returned in r12
1405 // Registers r0-r8,r12 modified
1406 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1407 // Note: +0 + +0 = +0, -0 + -0 = -0, +0 + -0 = -0 + +0 = +0,
1408 // +/-0 + X = X + +/-0 = X, X + -X = -X + X = +0
1410 asm("mov r12, #0 "); // initialise return value to KErrNone
1411 asm("bic r3, r3, #0x300 "); // clear rounding flags
1412 asm("bic r6, r6, #0x300 "); // clear rounding flags
1413 asm("cmn r3, #0x10000 "); // check if first operand is NaN or infinity
1414 asm("bcs TRealXAdd1 "); // branch if it is
1415 asm("cmn r6, #0x10000 "); // check if second operand is NaN or infinity
1416 asm("bcs TRealXAdd2 "); // branch if it is
1417 asm("cmp r6, #0x10000 "); // check if second operand zero
1418 asm("bcc TRealXAdd3a "); // branch if it is
1419 asm("cmp r3, #0x10000 "); // check if first operand zero
1420 asm("bcc TRealXAdd3 "); // branch if it is
1421 asm("mov r7, #0 "); // r7 will be rounding word
1422 asm("mov r0, r3, lsr #16 "); // r0 = first operand exponent
1423 asm("subs r0, r0, r6, lsr #16 "); // r0 = first exponent - second exponent
1424 asm("beq TRealXAdd8 "); // if equal, no mantissa shifting needed
1425 asm("bhi TRealXAdd4 "); // skip if first exponent bigger
1426 asm("rsb r0, r0, #0 "); // need to shift first mantissa right by r0 to align
1427 asm("mov r8, r1 "); // swap the numbers to the one to be shifted is 2nd
1436 asm("TRealXAdd4: "); // need to shift 2nd mantissa right by r0 to align
1437 asm("cmp r0, #64 "); // more than 64 shifts needed?
1438 asm("bhi TRealXAdd6 "); // if so, smaller number cannot affect larger
1439 asm("cmp r0, #32 ");
1440 asm("bhi TRealXAdd7 "); // branch if shift count>32
1441 asm("rsb r8, r0, #32 ");
1442 asm("mov r7, r4, lsl r8 "); // shift r5:r4 right into r7
1443 asm("mov r4, r4, lsr r0 ");
1444 asm("orr r4, r4, r5, lsl r8 ");
1445 asm("mov r5, r5, lsr r0 ");
1446 asm("b TRealXAdd8 ");
1447 asm("TRealXAdd7: "); // 64 >= shift count > 32
1448 asm("sub r0, r0, #32 ");
1449 asm("rsb r8, r0, #32 ");
1450 asm("movs r7, r4, lsl r8 "); // test bits lost in shift
1451 asm("orrne r6, r6, #0x100 "); // if not all zero, flag 2nd mantissa rounded down
1452 asm("mov r7, r4, lsr r0 "); // shift r5:r4 right into r7 by 32+r0
1453 asm("orr r7, r7, r5, lsl r8 ");
1454 asm("mov r4, r5, lsr r0 ");
1456 asm("TRealXAdd8: "); // mantissas are now aligned
1457 asm("mov r8, r3, lsl #31 "); // r8=sign of first operand
1458 asm("cmp r8, r6, lsl #31 "); // compare signs
1459 asm("bne TRealXSub1 "); // if different, need to do a subtraction
1460 asm("adds r1, r1, r4 "); // signs the same - add mantissas
1461 asm("adcs r2, r2, r5 ");
1462 asm("bcc TRealXAdd9 "); // skip if no carry
1463 asm(".word 0xE1B02062 "); // movs r2, r2, rrx shift carry into mantissa
1464 asm(".word 0xE1B01061 "); // movs r1, r1, rrx
1465 asm(".word 0xE1B07067 "); // movs r7, r7, rrx
1466 asm("orrcs r6, r6, #0x100 "); // if 1 shifted out, flag 2nd mantissa rounded down
1467 asm("add r3, r3, #0x10000 "); // increment exponent
1468 asm("TRealXAdd9: ");
1469 asm("cmp r7, #0x80000000 "); // check rounding word
1470 asm("bcc TRealXAdd10 "); // if <0x80000000 round down
1471 asm("bhi TRealXAdd11 "); // if >0x80000000 round up
1472 asm("tst r6, #0x100 "); // if =0x80000000 check if 2nd mantissa rounded down
1473 asm("bne TRealXAdd11 "); // if so, round up
1474 asm("tst r6, #0x200 "); // if =0x80000000 check if 2nd mantissa rounded up
1475 asm("bne TRealXAdd10 "); // if so, round down
1476 asm("tst r1, #1 "); // else round to even - check LSB
1477 asm("beq TRealXAdd10 "); // if zero, round down
1478 asm("TRealXAdd11: "); // come here to round up
1479 asm("adds r1, r1, #1 "); // increment mantissa
1480 asm("adcs r2, r2, #0 ");
1481 asm("movcs r2, #0x80000000 "); // if carry, mantissa = 80000000 00000000
1482 asm("addcs r3, r3, #0x10000 "); // and increment exponent
1483 asm("cmn r3, #0x10000 "); // check overflow
1484 asm("orrcc r3, r3, #0x200 "); // if no overflow, set rounded-up flag ...
1486 asm("b TRealXAdd12 "); // if overflow, return infinity
1487 asm("TRealXAdd10: "); // come here to round down
1488 asm("cmn r3, #0x10000 "); // check overflow
1489 asm("bcs TRealXAdd12 "); // if overflow, return infinity
1490 asm("cmp r7, #0 "); // if no overflow check if rounding word is zero
1491 asm("orrne r3, r3, #0x100 "); // if not, set rounded-down flag ...
1493 asm("and r6, r6, #0x300 "); // else transfer 2nd mantissa rounding flags
1494 asm("orr r3, r3, r6 "); // to result
1497 asm("TRealXAdd12: "); // come here if overflow - return infinity
1498 asm("mov r2, #0x80000000 ");
1500 asm("mvn r12, #8 "); // and return KErrOverflow
1503 asm("TRealXSub1: "); // come here if operand signs differ
1504 asm("tst r6, #0x300 "); // check if 2nd mantissa rounded
1505 asm("eorne r6, r6, #0x300 "); // if so, change rounding
1506 asm("rsbs r7, r7, #0 "); // subtract mantissas r2:r1:0 -= r5:r4:r7
1507 asm("sbcs r1, r1, r4 ");
1508 asm("sbcs r2, r2, r5 ");
1509 asm("bcs TRealXSub2 "); // skip if no borrow
1510 asm("tst r6, #0x300 "); // check if 2nd mantissa rounded
1511 asm("eorne r6, r6, #0x300 "); // if so, change rounding
1512 asm("rsbs r7, r7, #0 "); // negate result
1513 asm("rscs r1, r1, #0 ");
1514 asm("rscs r2, r2, #0 ");
1515 asm("eor r3, r3, #1 "); // and change result sign
1516 asm("TRealXSub2: ");
1517 asm("bne TRealXSub3 "); // skip if mantissa top word is not zero
1518 asm("movs r2, r1 "); // else shift up by 32
1521 asm("bne TRealXSub3a "); // skip if mantissa top word is not zero now
1522 asm("movs r2, r1 "); // else shift up by 32 again
1524 asm("moveq r3, #0 "); // if r2 still zero, result is zero - return +0
1526 asm("subs r3, r3, #0x00400000 "); // else, decrement exponent by 64
1527 asm("bcs TRealXSub3 "); // if no borrow, proceed
1528 asm("b TRealXSub4 "); // if borrow, underflow
1529 asm("TRealXSub3a: "); // needed one 32-bit shift
1530 asm("subs r3, r3, #0x00200000 "); // so decrement exponent by 32
1531 asm("bcc TRealXSub4 "); // if borrow, underflow
1532 asm("TRealXSub3: "); // r2 is now non-zero; still may need up to 31 shifts
1533 #ifdef __CPU_ARM_HAS_CLZ
1535 asm("mov r2, r2, lsl r0 ");
1537 asm("mov r0, #0 "); // r0 will be shift count
1538 asm("cmp r2, #0x00010000 ");
1539 asm("movcc r2, r2, lsl #16 ");
1540 asm("addcc r0, r0, #16 ");
1541 asm("cmp r2, #0x01000000 ");
1542 asm("movcc r2, r2, lsl #8 ");
1543 asm("addcc r0, r0, #8 ");
1544 asm("cmp r2, #0x10000000 ");
1545 asm("movcc r2, r2, lsl #4 ");
1546 asm("addcc r0, r0, #4 ");
1547 asm("cmp r2, #0x40000000 ");
1548 asm("movcc r2, r2, lsl #2 ");
1549 asm("addcc r0, r0, #2 ");
1550 asm("cmp r2, #0x80000000 ");
1551 asm("movcc r2, r2, lsl #1 ");
1552 asm("addcc r0, r0, #1 ");
1554 asm("rsb r8, r0, #32 ");
1555 asm("subs r3, r3, r0, lsl #16 "); // subtract shift count from exponent
1556 asm("bcc TRealXSub4 "); // if borrow, underflow
1557 asm("orr r2, r2, r1, lsr r8 "); // else shift mantissa up
1558 asm("mov r1, r1, lsl r0 ");
1559 asm("orr r1, r1, r7, lsr r8 ");
1560 asm("mov r7, r7, lsl r0 ");
1561 asm("cmp r3, #0x10000 "); // check for underflow
1562 asm("bcs TRealXAdd9 "); // if no underflow, branch to round result
1564 asm("TRealXSub4: "); // come here if underflow
1565 asm("and r3, r3, #1 "); // set exponent to zero, leave sign
1568 asm("mvn r12, #9 "); // return KErrUnderflow
1571 asm("TRealXAdd6: "); // come here if exponents differ by more than 64
1572 asm("mov r8, r3, lsl #31 "); // r8=sign of first operand
1573 asm("cmp r8, r6, lsl #31 "); // compare signs
1574 asm("orreq r3, r3, #0x100 "); // if same, result has been rounded down
1575 asm("orrne r3, r3, #0x200 "); // else result has been rounded up
1578 asm("TRealXAdd3a: "); // come here if second operand zero
1579 asm("cmp r3, #0x10000 "); // check if first operand also zero
1580 asm("andcc r3, r3, r6 "); // if so, result is negative iff both zeros negative
1581 asm("andcc r3, r3, #1 ");
1584 asm("TRealXAdd3: "); // come here if first operand zero, second nonzero
1585 asm("mov r1, r4 "); // return second operand unchanged
1590 asm("TRealXAdd1: "); // come here if first operand NaN or infinity
1591 asm("cmp r2, #0x80000000 "); // check for infinity
1592 asm("cmpeq r1, #0 ");
1593 asm("bne TRealXBinOpNan "); // branch if NaN
1594 asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1595 asm("mvncc r12, #8 "); // if neither, return KErrOverflow
1597 asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
1598 asm("cmpeq r4, #0 ");
1599 asm("bne TRealXBinOpNan "); // branch if NaN
1600 asm("mov r0, r3, lsl #31 "); // both operands are infinity - check signs
1601 asm("cmp r0, r6, lsl #31 ");
1602 asm("mvneq r12, #8 "); // if same, return KErrOverflow
1605 // Return 'real indefinite'
1606 asm("TRealXRealIndefinite: ");
1607 asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
1608 asm("mov r2, #0xC0000000 ");
1610 asm("mvn r12, #5 "); // return KErrArgument
1613 asm("TRealXAdd2: "); // come here if 2nd operand NaN/infinity, first finite
1614 asm("cmp r5, #0x80000000 "); // check for infinity
1615 asm("cmpeq r4, #0 ");
1616 asm("bne TRealXBinOpNan "); // branch if NaN
1617 asm("mov r1, r4 "); // else return 2nd operand (infinity)
1620 asm("mvn r12, #8 "); // return KErrOverflow
1623 asm("TRealXBinOpNan: "); // generic routine to process NaNs in binary
1625 asm("cmn r3, #0x10000 "); // check if first operand is NaN
1626 asm("movcc r0, r1 "); // if not, swap the operands
1627 asm("movcc r1, r4 ");
1628 asm("movcc r4, r0 ");
1629 asm("movcc r0, r2 ");
1630 asm("movcc r2, r5 ");
1631 asm("movcc r5, r0 ");
1632 asm("movcc r0, r3 ");
1633 asm("movcc r3, r6 ");
1634 asm("movcc r6, r0 ");
1635 asm("cmn r6, #0x10000 "); // both operands NaNs?
1636 asm("bcc TRealXBinOpNan1 "); // skip if not
1637 asm("cmp r2, r5 "); // if so, compare the significands
1638 asm("cmpeq r1, r4 ");
1639 asm("movcc r1, r4 "); // r1,r2,r3 will get NaN with larger significand
1640 asm("movcc r2, r5 ");
1641 asm("movcc r3, r6 ");
1642 asm("TRealXBinOpNan1: ");
1643 asm("orr r2, r2, #0x40000000 "); // convert an SNaN to a QNaN
1644 asm("mvn r12, #5 "); // return KErrArgument
1651 __NAKED__ EXPORT_C TInt TRealX::MultEq(const TRealX& /*aVal*/)
1653 Multiplies this extended precision number by an extended precision value.
1655 @param aVal The extended precision value to be used as the multiplier.
1657 @return KErrNone, if the operation is successful;
1658 KErrOverflow, if the operation results in overflow;
1659 KErrUnderflow, if the operation results in underflow
1662 // Version for ARM 3M or later
1664 asm("stmfd sp!, {r0,r4-r7,lr} ");
1665 asm("ldmia r1, {r4,r5,r6} ");
1666 asm("ldmia r0, {r1,r2,r3} ");
1667 asm("bl TRealXMultiply ");
1668 asm("ldmfd sp!, {r0,r4-r7,lr} ");
1669 asm("stmia r0, {r1,r2,r3} ");
1670 asm("mov r0, r12 ");
1673 // TRealX multiplication r1,r2,r3 * r4,r5,r6 result in r1,r2,r3
1674 // Error code returned in r12
1675 // Registers r0-r7,r12 modified
1676 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1677 asm("TRealXMultiply: ");
1678 asm("mov r12, #0 "); // initialise return value to KErrNone
1679 asm("bic r3, r3, #0x300 "); // clear rounding flags
1681 asm("eorne r3, r3, #1 "); // Exclusive-OR signs
1682 asm("cmn r3, #0x10000 "); // check if first operand is NaN or infinity
1683 asm("bcs TRealXMultiply1 "); // branch if it is
1684 asm("cmn r6, #0x10000 "); // check if second operand is NaN or infinity
1685 asm("bcs TRealXMultiply2 "); // branch if it is
1686 asm("cmp r3, #0x10000 "); // check if first operand zero
1687 __JUMP(cc,lr); // if so, exit
1689 // Multiply mantissas in r2:r1 and r5:r4, result in r2:r1:r12:r7
1690 asm("umull r7, r12, r1, r4 "); // r7:r12=m1.low*m2.low
1691 asm("movs r0, r6, lsr #16 "); // r0=2nd operand exponent
1692 asm("beq TRealXMultiply3 "); // if zero, return zero
1693 asm("mov r6, #0 "); // clear r6 initially
1694 asm("umlal r12, r6, r1, r5 "); // r6:r12:r7=m1.low*m2, r1 no longer needed
1695 asm("add r0, r0, r3, lsr #16 "); // r0=sum of exponents
1697 asm("mov r3, #0 "); // clear r3 initially
1698 asm("umlal r6, r3, r2, r5 "); // r3:r6:r12:r7=m2.low*m1+m2.high*m1.high<<64
1699 // r1,r5 no longer required
1700 asm("orrne lr, lr, #1 "); // save sign in bottom bit of lr
1701 asm("sub r0, r0, #0x7F00 ");
1702 asm("sub r0, r0, #0x00FE "); // r0 now contains result exponent
1703 asm("umull r1, r5, r2, r4 "); // r5:r1=m2.high*m1.low
1704 asm("adds r12, r12, r1 "); // shift left by 32 and add to give final result
1705 asm("adcs r1, r6, r5 ");
1706 asm("adcs r2, r3, #0 "); // final result now in r2:r1:r12:r7
1707 // set flags on final value of r2 (ms word of result)
1709 // normalise the result mantissa
1710 asm("bmi TRealXMultiply4 "); // skip if already normalised
1711 asm("adds r7, r7, r7 "); // else shift left (will only ever need one shift)
1712 asm("adcs r12, r12, r12 ");
1713 asm("adcs r1, r1, r1 ");
1714 asm("adcs r2, r2, r2 ");
1715 asm("sub r0, r0, #1 "); // and decrement exponent by one
1717 // round the result mantissa
1718 asm("TRealXMultiply4: ");
1719 asm("and r3, lr, #1 "); // result sign bit back into r3
1720 asm("orrs r4, r7, r12 "); // check for exact result
1721 asm("beq TRealXMultiply5 "); // skip if exact
1722 asm("cmp r12, #0x80000000 "); // compare bottom 64 bits to 80000000 00000000
1723 asm("cmpeq r7, #0 ");
1724 asm("moveqs r4, r1, lsr #1 "); // if exactly equal, set carry=lsb of result
1725 // so we round up if lsb=1
1726 asm("orrcc r3, r3, #0x100 "); // if rounding down, set rounded-down flag
1727 asm("orrcs r3, r3, #0x200 "); // if rounding up, set rounded-up flag
1728 asm("adcs r1, r1, #0 "); // increment mantissa if necessary
1729 asm("adcs r2, r2, #0 ");
1730 asm("movcs r2, #0x80000000 "); // if carry, set mantissa to 80000000 00000000
1731 asm("addcs r0, r0, #1 "); // and increment result exponent
1733 // check for overflow or underflow and assemble final result
1734 asm("TRealXMultiply5: ");
1735 asm("add r4, r0, #1 "); // need to add 1 to get usable threshold
1736 asm("cmp r4, #0x10000 "); // check if exponent >= 0xFFFF
1737 asm("bge TRealXMultiply6 "); // if so, overflow
1738 asm("cmp r0, #0 "); // check for underflow
1739 asm("orrgt r3, r3, r0, lsl #16 "); // if no underflow, result exponent into r3, ...
1740 asm("movgt r12, #0 "); // ... return KErrNone ...
1741 asm("bicgt pc, lr, #3 ");
1744 asm("mvn r12, #9 "); // return KErrUnderflow
1745 asm("bic pc, lr, #3 ");
1748 asm("TRealXMultiply6: ");
1749 asm("bic r3, r3, #0x0000FF00 "); // clear rounding flags
1750 asm("orr r3, r3, #0xFF000000 "); // make exponent FFFF for infinity
1751 asm("orr r3, r3, #0x00FF0000 ");
1752 asm("mov r2, #0x80000000 "); // mantissa = 80000000 00000000
1754 asm("mvn r12, #8 "); // return KErrOverflow
1755 asm("bic pc, lr, #3 ");
1757 // come here if second operand zero
1758 asm("TRealXMultiply3: ");
1761 asm("and r3, r3, #1 "); // zero exponent, keep xor sign
1762 asm("mov r12, #0 "); // return KErrNone
1763 asm("bic pc, lr, #3 ");
1765 // First operand NaN or infinity
1766 asm("TRealXMultiply1: ");
1767 asm("cmp r2, #0x80000000 "); // check for infinity
1768 asm("cmpeq r1, #0 ");
1769 asm("bne TRealXBinOpNan "); // branch if NaN
1770 asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
1771 asm("bcs TRealXMultiply1a "); // branch if it is
1772 asm("cmp r6, #0x10000 "); // else check if second operand zero
1773 asm("mvncs r12, #8 "); // if not, return infinity and KErrOverflow
1774 asm("biccs pc, lr, #3 ");
1775 asm("b TRealXRealIndefinite "); // else return 'real indefinite'
1777 asm("TRealXMultiply1a: ");
1778 asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
1779 asm("cmpeq r4, #0 ");
1780 asm("bne TRealXBinOpNan "); // branch if NaN
1781 asm("mvn r12, #8 "); // else (infinity), return KErrOverflow
1782 asm("bic pc, lr, #3 ");
1784 // Second operand NaN or infinity, first operand finite
1785 asm("TRealXMultiply2: ");
1786 asm("cmp r5, #0x80000000 "); // check for infinity
1787 asm("cmpeq r4, #0 ");
1788 asm("bne TRealXBinOpNan "); // branch if NaN
1789 asm("cmp r3, #0x10000 "); // if infinity, check if first operand zero
1790 asm("bcc TRealXRealIndefinite "); // if it is, return 'real indefinite'
1791 asm("orr r3, r3, #0xFF000000 "); // else return infinity with xor sign
1792 asm("orr r3, r3, #0x00FF0000 ");
1793 asm("mov r2, #0x80000000 ");
1795 asm("mvn r12, #8 "); // return KErrOverflow
1796 asm("bic pc, lr, #3 ");
1802 __NAKED__ EXPORT_C TInt TRealX::DivEq(const TRealX& /*aVal*/)
1804 Divides this extended precision number by an extended precision value.
1806 @param aVal The extended precision value to be used as the divisor.
1808 @return KErrNone, if the operation is successful;
1809 KErrOverflow, if the operation results in overflow;
1810 KErrUnderflow, if the operation results in underflow;
1811 KErrDivideByZero, if the divisor is zero.
1814 asm("stmfd sp!, {r0,r4-r9,lr} ");
1815 asm("ldmia r1, {r4,r5,r6} ");
1816 asm("ldmia r0, {r1,r2,r3} ");
1817 asm("bl TRealXDivide ");
1818 asm("ldmfd sp!, {r0,r4-r9,lr} ");
1819 asm("stmia r0, {r1,r2,r3} ");
1820 asm("mov r0, r12 ");
1823 // TRealX division r1,r2,r3 / r4,r5,r6 result in r1,r2,r3
1824 // Error code returned in r12
1825 // Registers r0-r9,r12 modified
1826 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
1827 asm("TRealXDivide: ");
1828 asm("mov r12, #0 "); // initialise return value to KErrNone
1829 asm("bic r3, r3, #0x300 "); // clear rounding flags
1831 asm("eorne r3, r3, #1 "); // Exclusive-OR signs
1832 asm("cmn r3, #0x10000 "); // check if dividend is NaN or infinity
1833 asm("bcs TRealXDivide1 "); // branch if it is
1834 asm("cmn r6, #0x10000 "); // check if divisor is NaN or infinity
1835 asm("bcs TRealXDivide2 "); // branch if it is
1836 asm("cmp r6, #0x10000 "); // check if divisor zero
1837 asm("bcc TRealXDivide3 "); // branch if it is
1838 asm("cmp r3, #0x10000 "); // check if dividend zero
1839 __JUMP(cc,lr); // if zero, exit
1841 asm("orrne lr, lr, #1 "); // save sign in bottom bit of lr
1843 // calculate result exponent
1844 asm("mov r0, r3, lsr #16 "); // r0=dividend exponent
1845 asm("sub r0, r0, r6, lsr #16 "); // r0=dividend exponent - divisor exponent
1846 asm("add r0, r0, #0x7F00 ");
1847 asm("add r0, r0, #0x00FF "); // r0 now contains result exponent
1848 asm("mov r6, r1 "); // move dividend into r6,r7,r8
1850 asm("mov r8, #0 "); // use r8 to hold extra bit shifted up
1851 // r2:r1 will hold result mantissa
1852 asm("mov r2, #1 "); // we will make sure first bit is 1
1853 asm("cmp r7, r5 "); // compare dividend mantissa to divisor mantissa
1854 asm("cmpeq r6, r4 ");
1855 asm("bcs TRealXDivide4 "); // branch if dividend >= divisor
1856 asm("adds r6, r6, r6 "); // else shift dividend left one
1857 asm("adcs r7, r7, r7 "); // ignore carry here
1858 asm("sub r0, r0, #1 "); // decrement result exponent by one
1859 asm("TRealXDivide4: ");
1860 asm("subs r6, r6, r4 "); // subtract divisor from dividend
1861 asm("sbcs r7, r7, r5 ");
1863 // Main mantissa division code
1864 // First calculate the top 32 bits of the result
1865 // Top bit is 1, do 10 lots of 3 bits the one more bit
1866 asm("mov r12, #10 ");
1867 asm("TRealXDivide5: ");
1868 asm("adds r6, r6, r6 "); // shift accumulator left by one
1869 asm("adcs r7, r7, r7 ");
1870 asm("adcs r8, r8, r8 ");
1871 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1872 asm("sbcs r3, r7, r5 ");
1873 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1874 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1875 asm("movcs r7, r3 ");
1876 asm("adcs r2, r2, r2 "); // shift in new result bit
1877 asm("adds r6, r6, r6 "); // shift accumulator left by one
1878 asm("adcs r7, r7, r7 ");
1879 asm("adcs r8, r8, r8 ");
1880 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1881 asm("sbcs r3, r7, r5 ");
1882 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1883 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1884 asm("movcs r7, r3 ");
1885 asm("adcs r2, r2, r2 "); // shift in new result bit
1886 asm("adds r6, r6, r6 "); // shift accumulator left by one
1887 asm("adcs r7, r7, r7 ");
1888 asm("adcs r8, r8, r8 ");
1889 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1890 asm("sbcs r3, r7, r5 ");
1891 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1892 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1893 asm("movcs r7, r3 ");
1894 asm("adcs r2, r2, r2 "); // shift in new result bit
1895 asm("subs r12, r12, #1 ");
1896 asm("bne TRealXDivide5 "); // iterate the loop
1897 asm("adds r6, r6, r6 "); // shift accumulator left by one
1898 asm("adcs r7, r7, r7 ");
1899 asm("adcs r8, r8, r8 ");
1900 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1901 asm("sbcs r3, r7, r5 ");
1902 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1903 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1904 asm("movcs r7, r3 ");
1905 asm("adcs r2, r2, r2 "); // shift in new result bit - now have 32 bits
1907 // Now calculate the bottom 32 bits of the result
1908 // Do 8 lots of 4 bits
1909 asm("mov r12, #8 ");
1910 asm("TRealXDivide5a: ");
1911 asm("adds r6, r6, r6 "); // shift accumulator left by one
1912 asm("adcs r7, r7, r7 ");
1913 asm("adcs r8, r8, r8 ");
1914 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1915 asm("sbcs r3, r7, r5 ");
1916 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1917 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1918 asm("movcs r7, r3 ");
1919 asm("adcs r1, r1, r1 "); // shift in new result bit
1920 asm("adds r6, r6, r6 "); // shift accumulator left by one
1921 asm("adcs r7, r7, r7 ");
1922 asm("adcs r8, r8, r8 ");
1923 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1924 asm("sbcs r3, r7, r5 ");
1925 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1926 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1927 asm("movcs r7, r3 ");
1928 asm("adcs r1, r1, r1 "); // shift in new result bit
1929 asm("adds r6, r6, r6 "); // shift accumulator left by one
1930 asm("adcs r7, r7, r7 ");
1931 asm("adcs r8, r8, r8 ");
1932 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1933 asm("sbcs r3, r7, r5 ");
1934 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1935 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1936 asm("movcs r7, r3 ");
1937 asm("adcs r1, r1, r1 "); // shift in new result bit
1938 asm("adds r6, r6, r6 "); // shift accumulator left by one
1939 asm("adcs r7, r7, r7 ");
1940 asm("adcs r8, r8, r8 ");
1941 asm("subs r9, r6, r4 "); // subtract divisor from accumulator, result in r9,r3
1942 asm("sbcs r3, r7, r5 ");
1943 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1944 asm("movcs r6, r9 "); // if no borrow, replace accumulator with result
1945 asm("movcs r7, r3 ");
1946 asm("adcs r1, r1, r1 "); // shift in new result bit
1947 asm("subs r12, r12, #1 ");
1948 asm("bne TRealXDivide5a "); // iterate the loop
1950 // r2:r1 now contains a 64-bit normalised mantissa
1951 // need to do rounding now
1952 asm("and r3, lr, #1 "); // result sign back into r3
1953 asm("orrs r9, r6, r7 "); // check if accumulator zero
1954 asm("beq TRealXDivide6 "); // if it is, result is exact, else generate next bit
1955 asm("adds r6, r6, r6 "); // shift accumulator left by one
1956 asm("adcs r7, r7, r7 ");
1957 asm("adcs r8, r8, r8 ");
1958 asm("subs r6, r6, r4 "); // subtract divisor from accumulator
1959 asm("sbcs r7, r7, r5 ");
1960 asm("movccs r8, r8, lsr #1 "); // if borrow, check for carry from shift
1961 asm("orrcc r3, r3, #0x100 "); // if borrow, round down and set round-down flag
1962 asm("bcc TRealXDivide6 ");
1963 asm("orrs r9, r6, r7 "); // if no borrow, check if exactly half-way
1964 asm("moveqs r9, r1, lsr #1 "); // if exactly half-way, round to even
1965 asm("orrcc r3, r3, #0x100 "); // if C=0, round result down and set round-down flag
1966 asm("bcc TRealXDivide6 ");
1967 asm("orr r3, r3, #0x200 "); // else set round-up flag
1968 asm("adds r1, r1, #1 "); // and round mantissa up
1969 asm("adcs r2, r2, #0 ");
1970 asm("movcs r2, #0x80000000 "); // if carry, mantissa = 80000000 00000000
1971 asm("addcs r0, r0, #1 "); // and increment exponent
1973 // check for overflow or underflow and assemble final result
1974 asm("TRealXDivide6: ");
1975 asm("add r4, r0, #1 "); // need to add 1 to get usable threshold
1976 asm("cmp r4, #0x10000 "); // check if exponent >= 0xFFFF
1977 asm("bge TRealXMultiply6 "); // if so, overflow
1978 asm("cmp r0, #0 "); // check for underflow
1979 asm("orrgt r3, r3, r0, lsl #16 "); // if no underflow, result exponent into r3, ...
1980 asm("movgt r12, #0 "); // ... return KErrNone ...
1981 asm("bicgt pc, lr, #3 ");
1984 asm("and r3, r3, #1 "); // set exponent=0, keep sign
1985 asm("mvn r12, #9 "); // return KErrUnderflow
1986 asm("bic pc, lr, #3 ");
1988 // come here if divisor is zero, dividend finite
1989 asm("TRealXDivide3: ");
1990 asm("cmp r3, #0x10000 "); // check if dividend also zero
1991 asm("bcc TRealXRealIndefinite "); // if so, return 'real indefinite'
1992 asm("orr r3, r3, #0xFF000000 "); // else return infinity with xor sign
1993 asm("orr r3, r3, #0x00FF0000 ");
1994 asm("mov r2, #0x80000000 ");
1996 asm("mvn r12, #40 "); // return KErrDivideByZero
1997 asm("bic pc, lr, #3 ");
1999 // Dividend is NaN or infinity
2000 asm("TRealXDivide1: ");
2001 asm("cmp r2, #0x80000000 "); // check for infinity
2002 asm("cmpeq r1, #0 ");
2003 asm("bne TRealXBinOpNan "); // branch if NaN
2004 asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
2005 asm("mvncc r12, #8 "); // if not, return KErrOverflow
2006 asm("biccc pc, lr, #3 ");
2008 // Dividend=infinity, divisor=NaN or infinity
2009 asm("cmp r5, #0x80000000 "); // check 2nd operand for infinity
2010 asm("cmpeq r4, #0 ");
2011 asm("bne TRealXBinOpNan "); // branch if NaN
2012 asm("b TRealXRealIndefinite "); // else return 'real indefinite'
2014 // Divisor is NaN or infinity, dividend finite
2015 asm("TRealXDivide2: ");
2016 asm("cmp r5, #0x80000000 "); // check for infinity
2017 asm("cmpeq r4, #0 ");
2018 asm("bne TRealXBinOpNan "); // branch if NaN
2019 asm("and r3, r3, #1 "); // else return zero with xor sign
2020 asm("bic pc, lr, #3 ");
2026 __NAKED__ EXPORT_C TInt TRealX::ModEq(const TRealX& /*aVal*/)
2028 Modulo-divides this extended precision number by an extended precision value.
2030 @param aVal The extended precision value to be used as the divisor.
2032 @return KErrNone, if the operation is successful;
2033 KErrTotalLossOfPrecision, if precision is lost;
2034 KErrUnderflow, if the operation results in underflow.
2037 asm("stmfd sp!, {r0,r4-r7,lr} ");
2038 asm("ldmia r1, {r4,r5,r6} ");
2039 asm("ldmia r0, {r1,r2,r3} ");
2040 asm("bl TRealXModulo ");
2041 asm("ldmfd sp!, {r0,r4-r7,lr} ");
2042 asm("stmia r0, {r1,r2,r3} ");
2043 asm("mov r0, r12 ");
2046 // TRealX remainder r1,r2,r3 % r4,r5,r6 result in r1,r2,r3
2047 // Error code returned in r12
2048 // Registers r0-r7,r12 modified
2049 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
2050 asm("TRealXModulo: ");
2051 asm("mov r12, #0 "); // initialise return value to KErrNone
2052 asm("cmn r3, #0x10000 "); // check if dividend is NaN or infinity
2053 asm("bcs TRealXModulo1 "); // branch if it is
2054 asm("cmn r6, #0x10000 "); // check if divisor is NaN or infinity
2055 asm("bcs TRealXModulo2 "); // branch if it is
2056 asm("cmp r6, #0x10000 "); // check if divisor zero
2057 asm("bcc TRealXRealIndefinite "); // if it is, return 'real indefinite'
2058 asm("mov r0, r3, lsr #16 "); // r0=dividend exponent
2059 asm("subs r0, r0, r6, lsr #16 "); // r0=dividend exponent-divisor exponent
2061 asm("cmp r0, #64 "); // check if difference >= 64 bits
2062 asm("bcs TRealXModuloLp "); // if so, underflow
2063 asm("b TRealXModulo4 "); // skip left shift on first iteration
2065 asm("TRealXModulo3: ");
2066 asm("adds r1, r1, r1 "); // shift dividend mantissa left one bit
2067 asm("adcs r2, r2, r2 ");
2068 asm("bcs TRealXModulo5 "); // if one shifted out, override comparison
2069 asm("TRealXModulo4: ");
2070 asm("cmp r2, r5 "); // compare dividend to divisor
2071 asm("cmpeq r1, r4 ");
2072 asm("bcc TRealXModulo6 "); // if dividend<divisor, skip
2073 asm("TRealXModulo5: ");
2074 asm("subs r1, r1, r4 "); // if dividend>=divisor, dividend-=divisor
2075 asm("sbcs r2, r2, r5 ");
2076 asm("TRealXModulo6: ");
2077 asm("subs r0, r0, #1 "); // decrement loop count
2078 asm("bpl TRealXModulo3 "); // if more bits to do, loop
2080 asm("orrs r0, r1, r2 "); // test for exact zero result
2081 asm("andeq r3, r3, #1 "); // if so, return zero with same sign as dividend
2083 asm("and r7, r3, #1 "); // dividend sign bit into r7
2084 asm("mov r3, r6, lsr #16 "); // r3 lower 16 bits=result exponent=divisor exponent
2085 asm("cmp r2, #0 "); // test if upper 32 bits zero
2086 asm("moveq r2, r1 "); // if so, shift left by 32
2087 asm("moveq r1, #0 ");
2088 asm("subeqs r3, r3, #32 "); // and subtract 32 from exponent
2089 asm("bls TRealXModuloUnderflow "); // if borrow from exponent or exponent 0, underflow
2090 asm("mov r0, #32 "); // r0 will hold 32-number of shifts to normalise
2091 asm("cmp r2, #0x00010000 "); // normalise
2092 asm("movcc r2, r2, lsl #16 ");
2093 asm("subcc r0, r0, #16 ");
2094 asm("cmp r2, #0x01000000 ");
2095 asm("movcc r2, r2, lsl #8 ");
2096 asm("subcc r0, r0, #8 ");
2097 asm("cmp r2, #0x10000000 ");
2098 asm("movcc r2, r2, lsl #4 ");
2099 asm("subcc r0, r0, #4 ");
2100 asm("cmp r2, #0x40000000 ");
2101 asm("movcc r2, r2, lsl #2 ");
2102 asm("subcc r0, r0, #2 ");
2103 asm("cmp r2, #0x80000000 ");
2104 asm("movcc r2, r2, lsl #1 "); // top bit of r2 is now set
2105 asm("subcc r0, r0, #1 ");
2106 asm("orr r2, r2, r1, lsr r0 "); // top bits of r1 into bottom bits of r2
2107 asm("rsb r0, r0, #32 "); // r0=number of shifts to normalise
2108 asm("mov r1, r1, lsl r0 "); // shift r1 left - mantissa now normalised
2109 asm("subs r3, r3, r0 "); // subtract r0 from exponent
2110 asm("bls TRealXModuloUnderflow "); // if borrow from exponent or exponent 0, underflow
2111 asm("orr r3, r7, r3, lsl #16 "); // else r3=result exponent and sign
2114 // dividend=NaN or infinity
2115 asm("TRealXModulo1: ");
2116 asm("cmp r2, #0x80000000 "); // check for infinity
2117 asm("cmpeq r1, #0 ");
2118 asm("bne TRealXBinOpNan "); // branch if NaN
2119 asm("cmn r6, #0x10000 "); // check 2nd operand for NaN/infinity
2120 asm("bcc TRealXRealIndefinite "); // infinity%finite - return 'real indefinite'
2121 asm("cmp r5, #0x80000000 "); // check if divisor=infinity
2122 asm("cmpeq r4, #0 ");
2123 asm("bne TRealXBinOpNan "); // branch if NaN
2124 asm("b TRealXRealIndefinite "); // else infinity%infinity - return 'real indefinite'
2126 // divisor=NaN or infinity, dividend finite
2127 asm("TRealXModulo2: ");
2128 asm("cmp r5, #0x80000000 "); // check for infinity
2129 asm("cmpeq r4, #0 ");
2130 asm("bne TRealXBinOpNan "); // branch if NaN
2133 asm("TRealXModuloLp: ");
2134 asm("mvn r12, #%a0" : : "i" ((TInt)~KErrTotalLossOfPrecision));
2137 asm("and r3, r3, #1 ");
2140 asm("TRealXModuloUnderflow: ");
2141 asm("mvn r12, #%a0" : : "i" ((TInt)~KErrUnderflow));
2144 asm("and r3, r3, #1 ");
2151 __NAKED__ EXPORT_C TInt TRealX::Add(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
2153 Adds an extended precision value to this extended precision number.
2155 @param aResult On return, a reference to an extended precision object
2156 containing the result of the operation.
2157 @param aVal The extended precision value to be added.
2159 @return KErrNone, if the operation is successful;
2160 KErrOverflow, if the operation results in overflow;
2161 KErrUnderflow, if the operation results in underflow.
2164 // r0=this, r1=&aResult, r2=&aVal
2165 asm("stmfd sp!, {r1,r4-r8,lr} ");
2166 asm("ldmia r2, {r4,r5,r6} ");
2167 asm("ldmia r0, {r1,r2,r3} ");
2168 asm("bl TRealXAdd ");
2169 asm("ldmfd sp!, {lr} "); // lr=&aResult
2170 asm("stmia lr, {r1,r2,r3} ");
2171 asm("mov r0, r12 "); // return value into r0
2178 __NAKED__ EXPORT_C TInt TRealX::Sub(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
2180 Subtracts an extended precision value from this extended precision number.
2182 @param aResult On return, a reference to an extended precision object
2183 containing the result of the operation.
2184 @param aVal The extended precision value to be subtracted.
2186 @return KErrNone, if the operation is successful;
2187 KErrOverflow, if the operation results in overflow;
2188 KErrUnderflow, if the operation results in underflow.
2191 // r0=this, r1=&aResult, r2=&aVal
2192 asm("stmfd sp!, {r1,r4-r8,lr} ");
2193 asm("ldmia r2, {r4,r5,r6} ");
2194 asm("ldmia r0, {r1,r2,r3} ");
2195 asm("bl TRealXSubtract ");
2196 asm("ldmfd sp!, {lr} "); // lr=&aResult
2197 asm("stmia lr, {r1,r2,r3} ");
2198 asm("mov r0, r12 "); // return value into r0
2205 __NAKED__ EXPORT_C TInt TRealX::Mult(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
2207 Multiplies this extended precision number by an extended precision value.
2209 @param aResult On return, a reference to an extended precision object
2210 containing the result of the operation.
2211 @param aVal The extended precision value to be used as the multiplier.
2213 @return KErrNone, if the operation is successful;
2214 KErrOverflow, if the operation results in overflow;
2215 KErrUnderflow, if the operation results in underflow.
2218 // r0=this, r1=&aResult, r2=&aVal
2219 asm("stmfd sp!, {r1,r4-r7,lr} ");
2220 asm("ldmia r2, {r4,r5,r6} ");
2221 asm("ldmia r0, {r1,r2,r3} ");
2222 asm("bl TRealXMultiply ");
2223 asm("ldmfd sp!, {lr} "); // lr=&aResult
2224 asm("stmia lr, {r1,r2,r3} ");
2225 asm("mov r0, r12 "); // return value into r0
2232 __NAKED__ EXPORT_C TInt TRealX::Div(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
2234 Divides this extended precision number by an extended precision value.
2236 @param aResult On return, a reference to an extended precision object
2237 containing the result of the operation.
2238 @param aVal The extended precision value to be used as the divisor.
2240 @return KErrNone, if the operation is successful;
2241 KErrOverflow, if the operation results in overflow;
2242 KErrUnderflow, if the operation results in underflow;
2243 KErrDivideByZero, if the divisor is zero.
2246 // r0=this, r1=&aResult, r2=&aVal
2247 asm("stmfd sp!, {r1,r4-r9,lr} ");
2248 asm("ldmia r2, {r4,r5,r6} ");
2249 asm("ldmia r0, {r1,r2,r3} ");
2250 asm("bl TRealXDivide ");
2251 asm("ldmfd sp!, {lr} "); // lr=&aResult
2252 asm("stmia lr, {r1,r2,r3} ");
2253 asm("mov r0, r12 "); // return value into r0
2260 __NAKED__ EXPORT_C TInt TRealX::Mod(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
2262 Modulo-divides this extended precision number by an extended precision value.
2264 @param aResult On return, a reference to an extended precision object
2265 containing the result of the operation.
2267 @param aVal The extended precision value to be used as the divisor.
2269 @return KErrNone, if the operation is successful;
2270 KErrTotalLossOfPrecision, if precision is lost;
2271 KErrUnderflow, if the operation results in underflow.
2274 // r0=this, r1=&aResult, r2=&aVal
2275 asm("stmfd sp!, {r1,r4-r7,lr} ");
2276 asm("ldmia r2, {r4,r5,r6} ");
2277 asm("ldmia r0, {r1,r2,r3} ");
2278 asm("bl TRealXModulo ");
2279 asm("ldmfd sp!, {lr} "); // lr=&aResult
2280 asm("stmia lr, {r1,r2,r3} ");
2281 asm("mov r0, r12 "); // return value into r0
2285 extern void PanicOverUnderflowDividebyZero(const TInt aErr);
2290 __NAKED__ EXPORT_C const TRealX& TRealX::operator+=(const TRealX& /*aVal*/)
2292 Adds an extended precision value to this extended precision number.
2294 @param aVal The extended precision value to be added.
2296 @return A reference to this object.
2298 @panic MATHX KErrOverflow if the operation results in overflow.
2299 @panic MATHX KErrUnderflow if the operation results in underflow.
2302 asm("stmfd sp!, {r0,r4-r8,lr} ");
2303 asm("ldmia r1, {r4,r5,r6} ");
2304 asm("ldmia r0, {r1,r2,r3} ");
2305 asm("bl TRealXAdd ");
2306 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2307 asm("stmia r0, {r1,r2,r3} ");
2308 asm("cmp r12, #0 "); // check the error code
2310 asm("mov r0, r12 ");
2311 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2317 __NAKED__ EXPORT_C const TRealX& TRealX::operator-=(const TRealX& /*aVal*/)
2319 Subtracts an extended precision value from this extended precision number.
2321 @param aVal The extended precision value to be subtracted.
2323 @return A reference to this object.
2325 @panic MATHX KErrOverflow if the operation results in overflow.
2326 @panic MATHX KErrUnderflow if the operation results in underflow.
2329 asm("stmfd sp!, {r0,r4-r8,lr} ");
2330 asm("ldmia r1, {r4,r5,r6} ");
2331 asm("ldmia r0, {r1,r2,r3} ");
2332 asm("bl TRealXSubtract ");
2333 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2334 asm("stmia r0, {r1,r2,r3} ");
2335 asm("cmp r12, #0 "); // check the error code
2337 asm("mov r0, r12 ");
2338 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2344 __NAKED__ EXPORT_C const TRealX& TRealX::operator*=(const TRealX& /*aVal*/)
2346 Multiplies this extended precision number by an extended precision value.
2348 @param aVal The extended precision value to be subtracted.
2350 @return A reference to this object.
2352 @panic MATHX KErrOverflow if the operation results in overflow.
2353 @panic MATHX KErrUnderflow if the operation results in underflow.
2356 asm("stmfd sp!, {r0,r4-r7,lr} ");
2357 asm("ldmia r1, {r4,r5,r6} ");
2358 asm("ldmia r0, {r1,r2,r3} ");
2359 asm("bl TRealXMultiply ");
2360 asm("ldmfd sp!, {r0,r4-r7,lr} ");
2361 asm("stmia r0, {r1,r2,r3} ");
2362 asm("cmp r12, #0 "); // check the error code
2364 asm("mov r0, r12 ");
2365 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2371 __NAKED__ EXPORT_C const TRealX& TRealX::operator/=(const TRealX& /*aVal*/)
2373 Divides this extended precision number by an extended precision value.
2375 @param aVal The extended precision value to be used as the divisor.
2377 @return A reference to this object.
2379 @panic MATHX KErrOverflow if the operation results in overflow.
2380 @panic MATHX KErrUnderflow if the operation results in underflow.
2381 @panic MATHX KErrDivideByZero if the divisor is zero.
2384 asm("stmfd sp!, {r0,r4-r9,lr} ");
2385 asm("ldmia r1, {r4,r5,r6} ");
2386 asm("ldmia r0, {r1,r2,r3} ");
2387 asm("bl TRealXDivide ");
2388 asm("ldmfd sp!, {r0,r4-r9,lr} ");
2389 asm("stmia r0, {r1,r2,r3} ");
2390 asm("cmp r12, #0 "); // check the error code
2392 asm("mov r0, r12 ");
2393 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2399 __NAKED__ EXPORT_C const TRealX& TRealX::operator%=(const TRealX& /*aVal*/)
2401 Modulo-divides this extended precision number by an extended precision value.
2403 @param aVal The extended precision value to be used as the divisor.
2405 @return A reference to this object.
2407 @panic MATHX KErrTotalLossOfPrecision panic if precision is lost.
2408 @panic MATHX KErrUnderflow if the operation results in underflow.
2411 asm("stmfd sp!, {r0,r4-r7,lr} ");
2412 asm("ldmia r1, {r4,r5,r6} ");
2413 asm("ldmia r0, {r1,r2,r3} ");
2414 asm("bl TRealXModulo ");
2415 asm("ldmfd sp!, {r0,r4-r7,lr} ");
2416 asm("stmia r0, {r1,r2,r3} ");
2417 asm("cmp r12, #0 "); // check the error code
2418 asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
2420 asm("mov r0, r12 ");
2421 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2427 __NAKED__ EXPORT_C TRealX& TRealX::operator++()
2429 Increments this extended precision number by one,
2430 and then returns a reference to it.
2432 This is also referred to as a prefix operator.
2434 @return A reference to this object.
2436 @panic MATHX KErrOverflow if the operation results in overflow.
2437 @panic MATHX KErrUnderflow if the operation results in underflow.
2441 asm("stmfd sp!, {r0,r4-r8,lr} ");
2442 asm("ldmia r0, {r1,r2,r3} ");
2443 asm("add r4, pc, #__TRealXOne-.-8 ");
2444 asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
2445 asm("bl TRealXAdd ");
2446 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2447 asm("stmia r0, {r1,r2,r3} ");
2448 asm("cmp r12, #0 "); // check the error code
2450 asm("mov r0, r12 ");
2451 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2453 asm("__TRealXOne: ");
2454 asm(".word 0x00000000 ");
2455 asm(".word 0x80000000 ");
2456 asm(".word 0x7FFF0000 ");
2462 __NAKED__ EXPORT_C TRealX TRealX::operator++(TInt)
2464 Returns this extended precision number before incrementing it by one.
2466 This is also referred to as a postfix operator.
2468 @return A reference to this object.
2470 @panic MATHX KErrOverflow if the operation results in overflow.
2471 @panic MATHX KErrUnderflow if the operation results in underflow.
2475 // r0=address of return value, r1=this
2476 asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
2477 asm("ldmia r1, {r1,r2,r3} ");
2478 asm("stmia r0, {r1,r2,r3} "); // store old value
2479 asm("add r4, pc, #__TRealXOne-.-8 ");
2480 asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
2481 asm("bl TRealXAdd ");
2482 asm("ldmfd sp!, {r0,lr} "); // restore r0, lr=this
2483 asm("stmia lr, {r1,r2,r3} "); // store incremented value
2484 asm("ldmfd sp!, {r4-r8,lr} ");
2485 asm("cmp r12, #0 "); // check the error code
2487 asm("mov r0, r12 ");
2488 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2494 __NAKED__ EXPORT_C TRealX& TRealX::operator--()
2496 Decrements this extended precision number by one,
2497 and then returns a reference to it.
2499 This is also referred to as a prefix operator.
2501 @return A reference to this object.
2503 @panic MATHX KErrOverflow if the operation results in overflow.
2504 @panic MATHX KErrUnderflow if the operation results in underflow.
2508 asm("stmfd sp!, {r0,r4-r8,lr} ");
2509 asm("ldmia r0, {r1,r2,r3} ");
2510 asm("add r4, pc, #__TRealXOne-.-8 ");
2511 asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
2512 asm("bl TRealXSubtract ");
2513 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2514 asm("stmia r0, {r1,r2,r3} ");
2515 asm("cmp r12, #0 "); // check the error code
2517 asm("mov r0, r12 ");
2518 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2524 __NAKED__ EXPORT_C TRealX TRealX::operator--(TInt)
2526 Returns this extended precision number before decrementing it by one.
2528 This is also referred to as a postfix operator.
2530 @return A reference to this object.
2532 @panic MATHX KErrOverflow if the operation results in overflow.
2533 @panic MATHX KErrUnderflow if the operation results in underflow.
2537 // r0=address of return value, r1=this
2538 asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
2539 asm("ldmia r1, {r1,r2,r3} ");
2540 asm("stmia r0, {r1,r2,r3} "); // store old value
2541 asm("add r4, pc, #__TRealXOne-.-8 ");
2542 asm("ldmia r4, {r4,r5,r6} "); // r4,r5,r6=1.0
2543 asm("bl TRealXSubtract ");
2544 asm("ldmfd sp!, {r0,lr} "); // restore r0, lr=this
2545 asm("stmia lr, {r1,r2,r3} "); // store decremented value
2546 asm("ldmfd sp!, {r4-r8,lr} ");
2547 asm("cmp r12, #0 "); // check the error code
2549 asm("mov r0, r12 ");
2550 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2556 __NAKED__ EXPORT_C TRealX TRealX::operator+(const TRealX& /*aVal*/) const
2558 Adds an extended precision value to this extended precision number.
2560 @param aVal The extended precision value to be added.
2562 @return An extended precision object containing the result.
2564 @panic MATHX KErrOverflow if the operation results in overflow.
2565 @panic MATHX KErrUnderflow if the operation results in underflow.
2568 // r0=address of return value, r1=this, r2=&aVal
2569 asm("stmfd sp!, {r0,r4-r8,lr} ");
2570 asm("ldmia r2, {r4,r5,r6} ");
2571 asm("ldmia r1, {r1,r2,r3} ");
2572 asm("bl TRealXAdd ");
2573 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2574 asm("stmia r0, {r1,r2,r3} ");
2575 asm("cmp r12, #0 "); // check the error code
2577 asm("mov r0, r12 ");
2578 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2584 __NAKED__ EXPORT_C TRealX TRealX::operator-(const TRealX& /*aVal*/) const
2586 Subtracts an extended precision value from this extended precision number.
2588 @param aVal The extended precision value to be subtracted.
2590 @return An extended precision object containing the result.
2592 @panic MATHX KErrOverflow if the operation results in overflow.
2593 @panic MATHX KErrUnderflow if the operation results in underflow.
2596 // r0=address of return value, r1=this, r2=&aVal
2597 asm("stmfd sp!, {r0,r4-r8,lr} ");
2598 asm("ldmia r2, {r4,r5,r6} ");
2599 asm("ldmia r1, {r1,r2,r3} ");
2600 asm("bl TRealXSubtract ");
2601 asm("ldmfd sp!, {r0,r4-r8,lr} ");
2602 asm("stmia r0, {r1,r2,r3} ");
2603 asm("cmp r12, #0 "); // check the error code
2605 asm("mov r0, r12 ");
2606 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2612 __NAKED__ EXPORT_C TRealX TRealX::operator*(const TRealX& /*aVal*/) const
2614 Multiplies this extended precision number by an extended precision value.
2616 @param aVal The extended precision value to be used as the multiplier.
2618 @return An extended precision object containing the result.
2620 @panic MATHX KErrOverflow if the operation results in overflow.
2621 @panic MATHX KErrUnderflow if the operation results in underflow.
2624 // r0=address of return value, r1=this, r2=&aVal
2625 asm("stmfd sp!, {r0,r4-r7,lr} ");
2626 asm("ldmia r2, {r4,r5,r6} ");
2627 asm("ldmia r1, {r1,r2,r3} ");
2628 asm("bl TRealXMultiply ");
2629 asm("ldmfd sp!, {r0,r4-r7,lr} ");
2630 asm("stmia r0, {r1,r2,r3} ");
2631 asm("cmp r12, #0 "); // check the error code
2633 asm("mov r0, r12 ");
2634 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2640 __NAKED__ EXPORT_C TRealX TRealX::operator/(const TRealX& /*aVal*/) const
2642 Divides this extended precision number by an extended precision value.
2644 @param aVal The extended precision value to be used as the divisor.
2646 @return An extended precision object containing the result.
2648 @panic MATHX KErrOverflow if the operation results in overflow.
2649 @panic MATHX KErrUnderflow if the operation results in underflow.
2650 @panic MATHX KErrDivideByZero if the divisor is zero.
2653 // r0=address of return value, r1=this, r2=&aVal
2654 asm("stmfd sp!, {r0,r4-r9,lr} ");
2655 asm("ldmia r2, {r4,r5,r6} ");
2656 asm("ldmia r1, {r1,r2,r3} ");
2657 asm("bl TRealXDivide ");
2658 asm("ldmfd sp!, {r0,r4-r9,lr} ");
2659 asm("stmia r0, {r1,r2,r3} ");
2660 asm("cmp r12, #0 "); // check the error code
2662 asm("mov r0, r12 ");
2663 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2669 __NAKED__ EXPORT_C TRealX TRealX::operator%(const TRealX& /*aVal*/) const
2671 Modulo-divides this extended precision number by an extended precision value.
2673 @param aVal The extended precision value to be used as the divisor.
2675 @return An extended precision object containing the result.
2677 @panic MATHX KErrTotalLossOfPrecision if precision is lost.
2678 @panic MATHX KErrUnderflow if the operation results in underflow.
2681 // r0=address of return value, r1=this, r2=&aVal
2682 asm("stmfd sp!, {r0,r4-r7,lr} ");
2683 asm("ldmia r2, {r4,r5,r6} ");
2684 asm("ldmia r1, {r1,r2,r3} ");
2685 asm("bl TRealXModulo ");
2686 asm("ldmfd sp!, {r0,r4-r7,lr} ");
2687 asm("stmia r0, {r1,r2,r3} ");
2688 asm("cmp r12, #0 "); // check the error code
2689 asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
2691 asm("mov r0, r12 ");
2692 asm("b " CSM_Z30PanicOverUnderflowDividebyZeroi); // else panic
2698 #ifdef __REALS_MACHINE_CODED__
2699 __NAKED__ EXPORT_C TInt Math::Sqrt( TReal &/*aDest*/, const TReal &/*aSrc*/ )
2701 Calculates the square root of a number.
2703 @param aDest A reference containing the result.
2704 @param aSrc The number whose square-root is required.
2706 @return KErrNone if successful, otherwise another of
2707 the system-wide error codes.
2710 // r0=address of aDest, r1=address of aSrc
2713 #ifdef __USE_VFP_MATH
2714 VFP_FLDD(CC_AL,0,1,0);
2716 VFP_FMRRD(CC_AL,3,2,0);
2717 asm("bic r1, r2, #0x80000000 "); // remove sign bit
2718 asm("cmn r1, #0x00100000 "); // check if exp=7FF
2719 asm("movpl r1, #0 "); // if not return KErrNone
2720 asm("bpl donesqrt ");
2721 asm("movs r1, r1, lsl #12 "); // if exp=7FF, check mantissa
2722 asm("cmpeq r3, #0 ");
2723 asm("moveq r1, #-9 "); // if exp=7FF, mant=0, return KErrOverflow
2724 asm("mvnne r2, #0x80000000 "); // else set NaN
2725 asm("mvnne r3, #0 ");
2726 asm("movne r1, #-6 "); // and return KErrArgument
2728 #ifdef __DOUBLE_WORDS_SWAPPED__
2729 asm("stmia r0, {r2,r3} "); // store the result
2731 asm("str r2, [r0, #4] ");
2732 asm("str r3, [r0, #0] ");
2736 #else // __USE_VFP_MATH
2737 asm("stmfd sp!, {r4-r10,lr} ");
2738 #ifdef __DOUBLE_WORDS_SWAPPED__
2739 asm("ldmia r1, {r3,r4} "); // low mant into r4, sign:exp:high mant into r3
2741 asm("ldr r3, [r1, #4] ");
2742 asm("ldr r4, [r1, #0] ");
2744 asm("bic r5, r3, #0xFF000000 ");
2745 asm("bic r5, r5, #0x00F00000 "); // high word of mantissa into r5
2746 asm("mov r2, r3, lsr #20 ");
2747 asm("bics r2, r2, #0x800 "); // exponent now in r2
2748 asm("beq fastsqrt1 "); // branch if exponent zero (zero or denormal)
2749 asm("mov r6, #0xFF ");
2750 asm("orr r6, r6, #0x700 ");
2751 asm("cmp r2, r6 "); // check for infinity or NaN
2752 asm("beq fastsqrt2 "); // branch if infinity or NaN
2753 asm("movs r3, r3 "); // test sign
2754 asm("bmi fastsqrtn "); // branch if negative
2755 asm("sub r2, r2, #0xFF "); // unbias the exponent
2756 asm("sub r2, r2, #0x300 "); //
2757 asm("fastsqrtd1: ");
2758 asm("mov r1, #0x40000000 "); // value for comparison
2759 asm("mov r3, #27 "); // loop counter (number of bits/2)
2760 asm("movs r2, r2, asr #1 "); // divide exponent by 2, LSB into CF
2761 asm("movcs r7, r5, lsl #11 "); // mantissa into r6,r7 with MSB in MSB of r7
2762 asm("orrcs r7, r7, r4, lsr #21 ");
2763 asm("movcs r6, r4, lsl #11 ");
2764 asm("movcs r4, #0 "); // r4, r5 will hold result mantissa
2765 asm("orrcs r7, r7, #0x80000000 "); // if exponent odd, restore MSB of mantissa
2766 asm("movcc r7, r5, lsl #12 "); // mantissa into r6,r7 with MSB in MSB of r7
2767 asm("orrcc r7, r7, r4, lsr #20 "); // if exponent even, shift mantissa left an extra
2768 asm("movcc r6, r4, lsl #12 "); // place, lose top bit, and
2769 asm("movcc r4, #1 "); // set MSB of result, and
2770 asm("mov r5, #0 "); // r4, r5 will hold result mantissa
2771 asm("mov r8, #0 "); // r8, r9 will be comparison accumulator
2773 asm("bcc fastsqrt4 "); // if exponent even, calculate one less bit
2774 // as result MSB already known
2776 // Main mantissa square-root loop
2777 asm("fastsqrt3: "); // START OF MAIN LOOP
2778 asm("subs r10, r7, r1 "); // subtract result:01 from acc:mant
2779 asm("sbcs r12, r8, r4 "); // result into r14:r12:r10
2780 asm("sbcs r14, r9, r5 ");
2781 asm("movcs r7, r10 "); // if no borrow replace accumulator with result
2782 asm("movcs r8, r12 ");
2783 asm("movcs r9, r14 ");
2784 asm("adcs r4, r4, r4 "); // shift result left one, putting in next bit
2785 asm("adcs r5, r5, r5 ");
2786 asm("mov r9, r9, lsl #2 "); // shift acc:mant left by 2 bits
2787 asm("orr r9, r9, r8, lsr #30 ");
2788 asm("mov r8, r8, lsl #2 ");
2789 asm("orr r8, r8, r7, lsr #30 ");
2790 asm("mov r7, r7, lsl #2 ");
2791 asm("orr r7, r7, r6, lsr #30 ");
2792 asm("mov r6, r6, lsl #2 ");
2793 asm("fastsqrt4: "); // Come in here if we need to do one less iteration
2794 asm("subs r10, r7, r1 "); // subtract result:01 from acc:mant
2795 asm("sbcs r12, r8, r4 "); // result into r14:r12:r10
2796 asm("sbcs r14, r9, r5 ");
2797 asm("movcs r7, r10 "); // if no borrow replace accumulator with result
2798 asm("movcs r8, r12 ");
2799 asm("movcs r9, r14 ");
2800 asm("adcs r4, r4, r4 "); // shift result left one, putting in next bit
2801 asm("adcs r5, r5, r5 ");
2802 asm("mov r9, r9, lsl #2 "); // shift acc:mant left by 2 bits
2803 asm("orr r9, r9, r8, lsr #30 ");
2804 asm("mov r8, r8, lsl #2 ");
2805 asm("orr r8, r8, r7, lsr #30 ");
2806 asm("mov r7, r7, lsl #2 ");
2807 asm("orr r7, r7, r6, lsr #30 ");
2808 asm("mov r6, r6, lsl #2 ");
2809 asm("subs r3, r3, #1 "); // decrement loop counter
2810 asm("bne fastsqrt3 "); // do necessary number of iterations
2812 asm("movs r4, r4, lsr #1 "); // shift result mantissa right 1 place
2813 asm("orr r4, r4, r5, lsl #31 "); // LSB (=rounding bit) into carry
2814 asm("mov r5, r5, lsr #1 ");
2815 asm("adcs r4, r4, #0 "); // round the mantissa to 53 bits
2816 asm("adcs r5, r5, #0 ");
2817 asm("cmp r5, #0x00200000 "); // check for mantissa overflow
2818 asm("addeq r2, r2, #1 "); // if so, increment exponent - can never overflow
2819 asm("bic r5, r5, #0x00300000 "); // remove top bit of mantissa - it is implicit
2820 asm("add r2, r2, #0xFF "); // re-bias the exponent
2821 asm("add r3, r2, #0x300 "); // and move into r3
2822 asm("orr r3, r5, r3, lsl #20 "); // r3 now contains exponent + top of mantissa
2823 asm("fastsqrt_ok: ");
2824 #ifdef __DOUBLE_WORDS_SWAPPED__
2825 asm("stmia r0, {r3,r4} "); // store the result
2827 asm("str r3, [r0, #4] ");
2828 asm("str r4, [r0, #0] ");
2830 asm("mov r0, #0 "); // error code KErrNone
2831 __POPRET("r4-r10,");
2834 asm("orrs r6, r5, r4 "); // exponent zero - test mantissa
2835 asm("beq fastsqrt_ok "); // if zero, return 0
2837 asm("movs r3, r3 "); // denormal - test sign
2838 asm("bmi fastsqrtn "); // branch out if negative
2839 asm("sub r2, r2, #0xFE "); // unbias the exponent
2840 asm("sub r2, r2, #0x300 "); //
2842 asm("adds r4, r4, r4 "); // shift mantissa left
2843 asm("adcs r5, r5, r5 ");
2844 asm("sub r2, r2, #1 "); // and decrement exponent
2845 asm("tst r5, #0x00100000 "); // test if normalised
2846 asm("beq fastsqrtd "); // loop until normalised
2847 asm("b fastsqrtd1 "); // now treat as a normalised number
2848 asm("fastsqrt2: "); // get here if infinity or NaN
2849 asm("orrs r6, r5, r4 "); // if mantissa zero, infinity
2850 asm("bne fastsqrtnan "); // branch if not - must be NaN
2851 asm("movs r3, r3 "); // test sign of infinity
2852 asm("bmi fastsqrtn "); // branch if -ve
2853 #ifdef __DOUBLE_WORDS_SWAPPED__
2854 asm("stmia r0, {r3,r4} "); // store the result
2856 asm("str r3, [r0, #4] ");
2857 asm("str r4, [r0, #0] ");
2859 asm("mov r0, #-9 "); // return KErrOverflow
2860 asm("b fastsqrt_end ");
2862 asm("fastsqrtn: "); // get here if negative or QNaN operand
2863 asm("mov r3, #0xFF000000 "); // generate "real indefinite" QNaN
2864 asm("orr r3, r3, #0x00F80000 "); // sign=1, exp=7FF, mantissa = 1000...0
2866 asm("fastsqrtxa: ");
2867 #ifdef __DOUBLE_WORDS_SWAPPED__
2868 asm("stmia r0, {r3,r4} "); // store the result
2870 asm("str r3, [r0, #4] ");
2871 asm("str r4, [r0, #0] ");
2873 asm("mov r0, #-6 "); // return KErrArgument
2874 asm("fastsqrt_end: ");
2875 __POPRET("r4-r10,");
2877 asm("fastsqrtnan: "); // operand is a NaN
2878 asm("tst r5, #0x00080000 "); // test MSB of mantissa
2879 asm("bne fastsqrtn "); // if set it is a QNaN - so return "real indefinite"
2880 asm("bic r3, r3, #0x00080000 "); // else convert SNaN to QNaN
2881 asm("b fastsqrtxa "); // and return KErrArgument
2882 #endif // __USE_VFP_MATH
2888 __NAKED__ EXPORT_C TReal Math::Poly(TReal /*aX*/,const SPoly* /*aPoly*/) __SOFTFP
2890 Evaluates the polynomial:
2891 {a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
2894 @param aX The value of the x-variable
2895 @param aPoly A pointer to the structure containing the set of coefficients
2896 in the order: a[0], a[1], ..., a[n-1], a[n].
2898 @return The result of the evaluation.
2901 // Evaluate a power series in x for a P_POLY coefficient table.
2902 // Changed to use TRealX throughout the calculation
2905 // On entry r0,r1=aX, r2=aPoly
2906 asm("stmfd sp!, {r4-r11,lr} ");
2907 asm("mov r11, r2 ");
2908 asm("ldr r10, [r11], #4 "); // r10=number of coefficients, r11=first coeff addr
2909 asm("add r11, r11, r10, lsl #3 "); // r11=address of last coefficient+8
2910 asm("mov r2, r1 "); // aX into r1,r2
2912 asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
2913 asm("mov r4, r1 "); // move into r4,r5,r6
2916 asm("ldmdb r11!, {r1,r2} "); // last coefficient into r1,r2
2917 asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
2918 asm("subs r10, r10, #1 ");
2919 asm("beq polynomial0 "); // if no more coefficients, exit
2921 asm("polynomial1: ");
2922 asm("stmfd sp!, {r4,r5,r6} "); // save value of aX
2923 asm("bl TRealXMultiply "); // r *= aX
2924 asm("mov r4, r1 "); // move result into r4,r5,r6
2927 asm("ldmdb r11!, {r1,r2} "); // next coefficient into r1,r2
2928 asm("bl ConvertTReal64ToTRealX "); // convert to TRealX in r1,r2,r3
2929 asm("bl TRealXAdd "); // r += *--pR
2930 asm("ldmfd sp!, {r4,r5,r6} "); // aX back into r4,r5,r6
2931 asm("subs r10, r10, #1 "); // iterate until all coefficients processed
2932 asm("bne polynomial1 ");
2934 asm("polynomial0: "); // result now in r1,r2,r3
2935 asm("bl ConvertTRealXToTReal64 "); // convert back to TReal64
2936 __POPRET("r4-r11,");
2942 __NAKED__ EXPORT_C void Math::PolyX(TRealX& /*aY*/,const TRealX& /*aX*/,TInt /*aDeg*/,const TRealX* /*aCoef*/)
2944 Evaluates the polynomial:
2945 {a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
2947 @param aY A reference containing the result.
2948 @param aX The value of the x-variable.
2949 @param aDeg The degree of the polynomial (the highest power of x
2951 @param aCoef A pointer to a contiguous set of TRealX values containing
2953 They must be in the order: a[0], a[1], ..., a[n-1], a[n].
2956 // Evaluate a polynomial with TRealX argument, coefficients and result
2959 // On entry r0=&aY, r1=&aX, r2=aDeg, r3=aCoef
2960 asm("stmfd sp!, {r0,r4-r11,lr} ");
2961 asm("add r11, r3, r2, lsl #3 "); // r11=address of last coefficient
2962 asm("add r11, r11, r2, lsl #2 ");
2963 asm("mov r9, r1 "); // r9=address of argument
2964 asm("movs r10, r2 "); // r10=number of coefficients-1
2965 asm("ldmia r11, {r1,r2,r3} "); // last coefficient into r1,r2,r3
2966 asm("beq polyx0 "); // if no more coefficients, exit
2969 asm("ldmia r9, {r4,r5,r6} "); // aX into r4,r5,r6
2970 asm("bl TRealXMultiply "); // result *= aX
2971 asm("ldmdb r11!, {r4,r5,r6} "); // next coefficient into r4,r5,r6
2972 asm("bl TRealXAdd "); // result += next coeff
2973 asm("subs r10, r10, #1 "); // iterate until all coefficients processed
2976 asm("polyx0: "); // result now in r1,r2,r3
2977 asm("ldmfd sp!, {r0,r4-r11,lr} "); // restore registers, including destination address in r0
2978 asm("stmia r0, {r1,r2,r3} "); // store result
2985 #ifndef __USE_VFP_MATH
2986 __NAKED__ EXPORT_C TInt Math::Int(TReal& /*aTrg*/, const TReal& /*aSrc*/)
2988 Calculates the integer part of a number.
2990 The integer part is that before a decimal point.
2991 Truncation is toward zero, so that
2992 int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
2995 @param aTrg A reference containing the result.
2996 @param aSrc The number whose integer part is required.
2998 @return KErrNone if successful, otherwise another of
2999 the system-wide error codes.
3002 // Write the integer part of aSrc to the TReal at aTrg
3003 // Negative numbers are rounded towards zero.
3006 // r0=&aTrg, r1=&aSrc, return value in r0
3007 asm("stmfd sp!, {lr} ");
3008 asm("mov r12, r0 "); // r12=&aTrg
3009 #ifdef __DOUBLE_WORDS_SWAPPED__
3010 asm("ldmia r1, {r0,r1} "); // input value into r0,r1
3012 asm("ldr r0, [r1, #4] ");
3013 asm("ldr r1, [r1, #0] ");
3015 asm("bl TReal64Int ");
3016 #ifdef __DOUBLE_WORDS_SWAPPED__
3017 asm("stmia r12, {r0,r1} "); // store result
3019 asm("str r0, [r12, #4] ");
3020 asm("str r1, [r12, #0] ");
3022 asm("bic r0, r0, #0x80000000 "); // remove sign bit
3023 asm("cmn r0, #0x00100000 "); // check for NaN or infinity
3024 asm("movpl r0, #0 "); // if neither, return KErrNone
3025 asm("bpl math_int_0 ");
3026 asm("movs r0, r0, lsl #12 "); // check for infinity
3027 asm("cmpeq r1, #0 ");
3028 asm("mvneq r0, #8 "); // if infinity return KErrOverflow
3029 asm("mvnne r0, #5 "); // else return KErrArgument
3030 asm("math_int_0: ");
3033 // Take integer part of TReal64 in r0,r1
3034 // Infinity and NaNs are unaffected
3036 asm("TReal64Int: ");
3037 asm("mov r2, r0, lsr #20 ");
3038 asm("bic r2, r2, #0x800 "); // r2=exponent
3039 asm("mov r3, #0x300 ");
3040 asm("orr r3, r3, #0xFF "); // r3=0x3FF
3041 asm("subs r2, r2, r3 "); // r2=exponent-3FF=number of integer bits-1
3042 asm("ble TReal64Int1 "); // branch if <=1 integer bits
3043 asm("cmp r2, #52 ");
3045 asm("cmp r2, #20 ");
3046 asm("bgt TReal64Int2 "); // jump if >21 integer bits (r0 will be unaffected)
3047 asm("rsb r2, r2, #20 "); // r2=number of bits to clear at bottom end of r0
3048 asm("mov r0, r0, lsr r2 "); // clear them
3049 asm("mov r0, r0, lsl r2 ");
3050 asm("mov r1, #0 "); // clear r1
3052 asm("TReal64Int2: ");
3053 asm("rsb r2, r2, #52 "); // r2=number of bits to clear at bottom end of r1
3054 asm("mov r1, r1, lsr r2 "); // clear them
3055 asm("mov r1, r1, lsl r2 ");
3057 asm("TReal64Int1: "); // result is either 0 or 1
3058 asm("mov r1, #0 "); // lower mantissa bits of result will be zero
3059 asm("moveq r0, r0, lsr #20 "); // if result is 1, clear mantissa but leave exponent
3060 asm("moveq r0, r0, lsl #20 ");
3061 asm("andlt r0, r0, #0x80000000 "); // if result is 0, clear mantissa and exponent
3068 __NAKED__ EXPORT_C TInt Math::Int(TInt16& /*aTrg*/, const TReal& /*aSrc*/)
3070 Calculates the integer part of a number.
3072 The integer part is that before a decimal point.
3073 Truncation is toward zero, so that:
3074 int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
3076 This function is suitable when the result is known to be small enough
3077 for a 16-bit signed integer.
3079 @param aTrg A reference containing the result.
3080 @param aSrc The number whose integer part is required.
3082 @return KErrNone if successful, otherwise another of
3083 the system-wide error codes.
3086 // If the integer part of aSrc is in the range -32768 to +32767
3087 // inclusive, write the integer part to the TInt16 at aTrg
3088 // Negative numbers are rounded towards zero.
3089 // If an overflow or underflow occurs, aTrg is set to the max/min value
3092 // r0=&aTrg, r1=&aSrc
3093 asm("stmfd sp!, {lr} ");
3094 asm("mov r3, r0 "); // r3=&aTrg
3095 #ifdef __DOUBLE_WORDS_SWAPPED__
3096 asm("ldmia r1, {r0,r1} "); // input value into r0,r1
3098 asm("ldr r0, [r1, #4] ");
3099 asm("ldr r1, [r1, #0] ");
3101 asm("bl TReal64GetTInt "); // do the conversion
3102 asm("cmp r0, #0x8000 "); // limit answer to TInt16 range
3103 asm("movge r0, #0x7F00 ");
3104 asm("orrge r0, r0, #0xFF ");
3105 asm("mvnge r12, #8 "); // set error code if limiting occurred
3106 asm("cmn r0, #0x8000 ");
3107 asm("movlt r0, #0x8000 ");
3108 asm("mvnlt r12, #9 "); // set error code if limiting occurred
3109 asm("mov r1, r0, lsr #8 "); // top byte of answer into r1
3110 asm("strb r0, [r3] "); // store result in aTrg
3111 asm("strb r1, [r3, #1] ");
3112 asm("mov r0, r12 "); // return error code in r0
3118 __NAKED__ EXPORT_C TInt Math::Int(TInt32& /*aTrg*/, const TReal& /*aSrc*/)
3120 Calculates the integer part of a number.
3122 The integer part is that before a decimal point.
3123 Truncation is toward zero, so that
3124 int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
3126 This function is suitable when the result is known to be small enough
3127 for a 32-bit signed integer.
3129 @param aTrg A reference containing the result.
3130 @param aSrc The number whose integer part is required.
3132 @return KErrNone if successful, otherwise another of
3133 the system-wide error codes.
3136 // If the integer part of the float is in the range -2147483648 to +2147483647
3137 // inclusive, write the integer part to the TInt32 at aTrg
3138 // Negative numbers are rounded towards zero.
3139 // If an overflow or underflow occurs, aTrg is set to the max/min value
3142 // r0=&aTrg, r1=&aSrc
3143 asm("stmfd sp!, {lr} ");
3144 asm("mov r3, r0 "); // r3=&aTrg
3145 #ifdef __DOUBLE_WORDS_SWAPPED__
3146 asm("ldmia r1, {r0,r1} "); // input value into r0,r1
3148 asm("ldr r0, [r1, #4] ");
3149 asm("ldr r1, [r1, #0] ");
3151 asm("bl TReal64GetTInt "); // do the conversion
3152 asm("str r0, [r3] "); // store result in aTrg
3153 asm("mov r0, r12 "); // return error code in r0
3156 // Convert double in r0,r1 to int in r0
3157 // Return error code in r12
3158 // Registers r0,r1,r2,r12 modified
3159 asm("TReal64GetTInt: ");
3160 asm("mov r2, r0, lsr #20 ");
3161 asm("bic r2, r2, #0x800 "); // r1=exponent
3162 asm("add r12, r2, #1 ");
3163 asm("cmp r12, #0x800 "); // check for NaN
3164 asm("bne TReal64GetTInt1 ");
3165 asm("movs r12, r0, lsl #12 "); // exponent=FF, check mantissa
3166 asm("cmpeq r1, #0 ");
3167 asm("movne r0, #0 "); // if non-zero, input is a NaN so return 0
3168 asm("mvnne r12, #5 "); // and return KErrArgument
3170 asm("TReal64GetTInt1: ");
3171 asm("mov r12, #0x400 ");
3172 asm("orr r12, r12, #0x1E "); // r12=0x41E (exponent of 2^31)
3173 asm("subs r2, r12, r2 "); // r2=number of shifts to produce integer
3174 asm("mov r12, #0 "); // set return code to KErrNone
3175 asm("ble TReal64GetTInt2 "); // if <=0, saturate result
3176 asm("cmp r2, #31 "); // check if more than 31 shifts needed
3177 asm("movhi r0, #0 "); // if so, underflow result to 0
3179 asm("cmp r0, #0 "); // check sign bit
3180 asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
3181 asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
3182 asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
3183 asm("mov r0, r0, lsr r2 "); // r0=absolute integer
3184 asm("rsbmi r0, r0, #0 "); // if negative, negate
3186 asm("TReal64GetTInt2: ");
3187 asm("blt TReal64GetTInt3 "); // if exponent>0x41E, definitely an overflow
3188 asm("cmp r0, #0 "); // check sign bit
3189 asm("bpl TReal64GetTInt3 "); // if positive, definitely an overflow
3190 asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
3191 asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
3192 asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
3193 asm("cmp r0, #0x80000000 "); // check if value is = -2^31
3195 asm("TReal64GetTInt3: ");
3196 asm("cmp r0, #0 "); // check sign
3197 asm("mov r0, #0x80000000 ");
3198 asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
3199 asm("mvnpl r12, #8 "); // if +ve return KErrOverflow
3200 asm("mvnmi r12, #9 "); // if -ve return KErrUnderflow
3203 #endif // __USE_VFP_MATH
3208 __NAKED__ EXPORT_C TBool Math::IsZero(const TReal& /*aVal*/)
3210 Determines whether a value is zero.
3212 @param aVal A reference to the value to be checked.
3214 @return True, if aVal is zero; false, otherwise.
3217 #ifdef __DOUBLE_WORDS_SWAPPED__
3218 asm("ldmia r0, {r1,r2} "); // input value into r0,r1
3220 asm("ldr r2, [r0, #0] ");
3221 asm("ldr r1, [r0, #4] ");
3223 asm("TReal64IsZero: ");
3224 asm("mov r0, #0 "); // default return value is 0
3225 asm("bics r1, r1, #0x80000000 "); // remove sign bit
3226 asm("cmpeq r2, #0 "); // and check both exponent and mantissa are zero
3227 asm("moveq r0, #1 "); // return 1 if zero
3234 __NAKED__ EXPORT_C TBool Math::IsNaN(const TReal& /*aVal*/)
3236 Determines whether a value is not a number.
3238 @param aVal A reference to the value to be checked.
3240 @return True, if aVal is not a number; false, otherwise.
3243 #ifdef __DOUBLE_WORDS_SWAPPED__
3244 asm("ldmia r0, {r1,r2} "); // input value into r0,r1
3246 asm("ldr r2, [r0, #0] ");
3247 asm("ldr r1, [r0, #4] ");
3249 asm("TReal64IsNaN: ");
3250 asm("mov r0, #0 "); // default return value is 0
3251 asm("bic r1, r1, #0x80000000 "); // remove sign bit
3252 asm("cmn r1, #0x00100000 "); // check if exponent=7FF
3254 asm("movs r1, r1, lsl #12 "); // exponent=7FF, check mantissa
3255 asm("cmpeq r2, #0 ");
3256 asm("movne r0, #1 "); // if mantissa nonzero, return 1
3263 __NAKED__ EXPORT_C TBool Math::IsInfinite(const TReal& /*aVal*/)
3265 Determines whether a value is infinite.
3267 @param aVal A reference to the value to be checked.
3269 @return True, if aVal is infinite; false, otherwise.
3272 #ifdef __DOUBLE_WORDS_SWAPPED__
3273 asm("ldmia r0, {r1,r2} "); // input value into r0,r1
3275 asm("ldr r2, [r0, #0] ");
3276 asm("ldr r1, [r0, #4] ");
3278 asm("TReal64IsInfinite: ");
3279 asm("mov r0, #0 "); // default return value is 0
3280 asm("mov r3, #0x00200000 "); // r3 == - (0x7ff00000 << 1)
3282 asm("cmneq r3, r1, lsl #1 "); // check exp=7FF && mant=0
3283 asm("moveq r0, #1 "); // if so, return 1
3290 __NAKED__ EXPORT_C TBool Math::IsFinite(const TReal& /*aVal*/)
3292 Determines whether a value is finite.
3294 In this context, a value is finite if it is a valid number and
3297 @param aVal A reference to the value to be checked.
3299 @return True, if aVal is finite; false, otherwise.
3302 #ifdef __DOUBLE_WORDS_SWAPPED__
3303 asm("ldr r1, [r0, #0] "); // only need exponent - get it into r0
3305 asm("ldr r1, [r0, #4] "); // only need exponent - get it into r0
3307 asm("TReal64IsFinite: ");
3308 asm("mov r0, #0 "); // default return value is 0
3309 asm("bic r1, r1, #0x80000000 "); // remove sign bit
3310 asm("cmn r1, #0x00100000 "); // check if exponent=7FF
3311 asm("movpl r0, #1 "); // else return 1
3318 __NAKED__ EXPORT_C void Math::SetZero(TReal& /*aVal*/, TInt /*aSign*/)
3320 // Constructs zeros, assuming default sign is positive
3323 asm("cmp r1, #0 "); // test aSign
3324 asm("movne r1, #0x80000000 "); // if nonzero, set sign bit
3325 asm("mov r2, #0 "); // mantissa=0
3326 #ifdef __DOUBLE_WORDS_SWAPPED__
3327 asm("stmia r0, {r1,r2} ");
3329 asm("str r2, [r0, #0] ");
3330 asm("str r1, [r0, #4] ");
3338 __NAKED__ EXPORT_C void Math::SetNaN(TReal& /*aVal*/)
3340 // Constructs NaN (+ve sign for Java)
3343 #ifdef __DOUBLE_WORDS_SWAPPED__
3344 asm("mvn r1, #0x80000000 "); // r1=7FFFFFFF
3345 asm("mvn r2, #0 "); // r2=FFFFFFFF
3347 asm("mvn r2, #0x80000000 "); // r2=7FFFFFFF
3348 asm("mvn r1, #0 "); // r1=FFFFFFFF
3350 asm("stmia r0, {r1,r2} ");
3357 __NAKED__ EXPORT_C void Math::SetInfinite(TReal& /*aVal*/, TInt /*aSign*/)
3359 // Constructs infinities
3362 asm("cmp r1, #0 "); // test aSign
3363 asm("movne r1, #0x80000000 "); // if nonzero, set sign bit
3364 asm("orr r1, r1, #0x70000000 "); // set exponent to 7FF
3365 asm("orr r1, r1, #0x0FF00000 ");
3366 asm("mov r2, #0 "); // mantissa=0
3367 #ifdef __DOUBLE_WORDS_SWAPPED__
3368 asm("stmia r0, {r1,r2} ");
3370 asm("str r2, [r0, #0] ");
3371 asm("str r1, [r0, #4] ");
3378 #ifndef __USE_VFP_MATH
3379 __NAKED__ EXPORT_C TInt Math::Frac(TReal& /*aTrg*/, const TReal& /*aSrc*/)
3381 Calculates the fractional part of a number.
3383 The fractional part is that after a decimal point.
3384 Truncation is toward zero, so that
3385 Frac(2.4)=0.4, Frac(2)=0, Frac(-1)=0, Frac(-1.4)=0.4.
3387 @param aTrg A reference containing the result.
3388 @param aSrc The number whose fractional part is required.
3390 @return KErrNone if successful, otherwise another of
3391 the system-wide error codes.
3394 // on entry r0=aTrg, r1=&Src
3395 // on exit r0=return code
3396 #ifdef __DOUBLE_WORDS_SWAPPED__
3397 asm("ldmia r1, {r1,r2} "); // r1,r2=aSrc
3399 asm("ldr r2, [r1, #0] ");
3400 asm("ldr r1, [r1, #4] ");
3402 asm("and r3, r1, #0x80000000 ");
3403 asm("str r3, [sp, #-4]! "); // save sign
3404 asm("mov r3, r1, lsr #20 ");
3405 asm("bic r3, r3, #0x800 "); // r3=exponent of aSrc
3406 asm("mov r12, #0x300 ");
3407 asm("orr r12, r12, #0xFE "); // r12=0x3FE
3408 asm("subs r3, r3, r12 "); // r3=exponent of aSrc-0x3FE=number of integer bits
3409 asm("ble MathFrac0 "); // if <=0, return aSrc unaltered
3410 asm("cmp r3, #53 ");
3411 asm("bge MathFrac1 "); // if >=53 integer bits, there is no fractional part
3412 asm("mov r1, r1, lsl #11 "); // left-justify mantissa in r1,r2
3413 asm("orr r1, r1, r2, lsr #21 ");
3414 asm("mov r2, r2, lsl #11 ");
3415 asm("cmp r3, #32 "); // check for >=32 integer bits
3416 asm("bge MathFrac2 ");
3417 asm("rsb r12, r3, #32 ");
3418 asm("mov r1, r1, lsl r3 "); // shift mantissa left by number of integer bits
3419 asm("orrs r1, r1, r2, lsr r12 ");
3420 asm("mov r2, r2, lsl r3 ");
3421 asm("mov r3, #0x300 "); // r3 holds exponent = 0x3FE initially
3422 asm("orr r3, r3, #0xFE ");
3423 asm("beq MathFrac3 "); // branch if >=32 shifts to normalise
3424 #ifdef __CPU_ARM_HAS_CLZ
3426 asm("mov r1, r1, lsl r12 ");
3427 asm("rsb r12, r12, #32 ");
3428 asm("orr r1, r1, r2, lsr r12 ");
3429 asm("rsb r12, r12, #32 ");
3431 asm("mov r12, #32 "); // else r12=32-number of shifts needed
3432 asm("cmp r1, #0x10000 "); // calculate shift count
3433 asm("movcc r1, r1, lsl #16 ");
3434 asm("subcc r12, r12, #16 ");
3435 asm("cmp r1, #0x1000000 ");
3436 asm("movcc r1, r1, lsl #8 ");
3437 asm("subcc r12, r12, #8 ");
3438 asm("cmp r1, #0x10000000 ");
3439 asm("movcc r1, r1, lsl #4 ");
3440 asm("subcc r12, r12, #4 ");
3441 asm("cmp r1, #0x40000000 ");
3442 asm("movcc r1, r1, lsl #2 ");
3443 asm("subcc r12, r12, #2 ");
3444 asm("cmp r1, #0x80000000 ");
3445 asm("movcc r1, r1, lsl #1 ");
3446 asm("subcc r12, r12, #1 ");
3447 asm("orr r1, r1, r2, lsr r12 "); // normalise
3448 asm("rsb r12, r12, #32 "); // r12=shift count
3450 asm("mov r2, r2, lsl r12 ");
3451 asm("sub r3, r3, r12 "); // exponent-=shift count
3452 asm("b MathFrac4 "); // branch to assemble and store result
3454 // come here if >=32 shifts to normalise
3456 asm("sub r3, r3, #32 "); // decrement exponent by 32
3457 asm("movs r1, r2 "); // shift left by 32, set Z if result zero
3459 asm("bne MathFrac6 "); // if result nonzero, normalise
3460 asm("beq MathFrac5 "); // branch if result zero
3462 // come here if >=32 integer bits
3464 asm("sub r3, r3, #32 ");
3465 asm("movs r1, r2, lsl r3 "); // shift left by number of integer bits, set Z if result zero
3467 asm("mov r3, #0x300 "); // r3 holds exponent = 0x3FE initially
3468 asm("orr r3, r3, #0xFE ");
3469 asm("beq MathFrac5 "); // branch if result zero
3471 asm("cmp r1, #0x10000 "); // else normalise
3472 asm("movcc r1, r1, lsl #16 ");
3473 asm("subcc r3, r3, #16 ");
3474 asm("cmp r1, #0x1000000 ");
3475 asm("movcc r1, r1, lsl #8 ");
3476 asm("subcc r3, r3, #8 ");
3477 asm("cmp r1, #0x10000000 ");
3478 asm("movcc r1, r1, lsl #4 ");
3479 asm("subcc r3, r3, #4 ");
3480 asm("cmp r1, #0x40000000 ");
3481 asm("movcc r1, r1, lsl #2 ");
3482 asm("subcc r3, r3, #2 ");
3483 asm("cmp r1, #0x80000000 ");
3484 asm("movcc r1, r1, lsl #1 ");
3485 asm("subcc r3, r3, #1 ");
3487 // come here to assemble and store result
3489 asm("bic r1, r1, #0x80000000 "); // remove integer bit
3490 asm("mov r2, r2, lsr #11 "); // shift mantissa right by 11
3491 asm("orr r2, r2, r1, lsl #21 ");
3492 asm("mov r1, r1, lsr #11 ");
3493 asm("ldr r12, [sp] ");
3494 asm("orr r1, r1, r3, lsl #20 "); // exponent into r1 bits 20-30
3495 asm("orr r1, r1, r12 "); // sign bit into r1 bit 31
3497 // come here to return source unaltered
3499 asm("add sp, sp, #4 ");
3500 asm("MathFrac_ok: ");
3501 #ifdef __DOUBLE_WORDS_SWAPPED__
3502 asm("stmia r0, {r1,r2} "); // store result
3504 asm("str r2, [r0, #0] ");
3505 asm("str r1, [r0, #4] ");
3507 asm("mov r0, #0 "); // return KErrNone
3510 // come here if infinity, NaN or >=53 integer bits
3512 asm("cmp r3, #0x400 "); // check for infinity/NaN
3513 asm("bhi MathFrac7 "); // branch if so
3515 // come here to return zero
3517 asm("ldr r1, [sp], #4 "); // r1 bit 31=sign, rest zero
3519 asm("b MathFrac_ok ");
3521 // come here if infinity/NaN
3523 asm("movs r12, r1, lsl #12 "); // check for infinity
3524 asm("cmpeq r2, #0 ");
3525 asm("bne MathFrac8 "); // branch if NaN
3526 asm("ldr r1, [sp], #4 "); // r1 bit 31=sign, rest zero
3528 #ifdef __DOUBLE_WORDS_SWAPPED__
3529 asm("stmia r0, {r1,r2} "); // store zero result
3531 asm("str r2, [r0, #0] ");
3532 asm("str r1, [r0, #4] ");
3534 asm("mvn r0, #8 "); // return KErrOverflow
3536 asm("MathFrac8: "); // NaN
3537 asm("add sp, sp, #4 ");
3538 #ifdef __DOUBLE_WORDS_SWAPPED__
3539 asm("stmia r0, {r1,r2} "); // store NaN unchanged
3541 asm("str r2, [r0, #0] ");
3542 asm("str r1, [r0, #4] ");
3544 asm("mvn r0, #5 "); // return KErrArgument
3547 #endif // __USE_VFP_MATH
3550 #ifdef __REALS_MACHINE_CODED__
3554 extern "C" void __math_exception(TInt aErrType);
3555 __NAKED__ EXPORT_C TReal32 __addsf3(TReal32 /*a1*/, TReal32 /*a2*/)
3560 // a1 is in r0, a2 in r1 on entry; return with answer in r0
3561 asm("stmfd sp!, {r4-r8,lr} ");
3562 asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3563 asm("mov r4, r1 "); // move into r4,r5,r6
3566 asm("mov r1, r0 "); // a1 into r1
3567 asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3568 asm("bl TRealXAdd "); // add a1+a2, result in r1,r2,r3
3569 asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
3570 asm("cmp r12, #0 "); // check error code
3571 __CPOPRET(eq,"r4-r8,");
3572 asm("stmfd sp!, {r0} "); // save result
3573 asm("mov r0, r12 "); // error code into r0
3574 asm("bl __math_exception "); // raise exception
3575 __POPRET("r0,r4-r8,");
3578 __NAKED__ EXPORT_C TReal64 __adddf3(TReal64 /*a1*/, TReal64 /*a2*/)
3583 // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
3584 asm("stmfd sp!, {r4-r8,lr} ");
3585 asm("mov r7, r2 "); // save a2
3587 asm("mov r2, r1 "); // a1 into r1,r2
3589 asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3590 asm("mov r4, r1 "); // move into r4,r5,r6
3593 asm("mov r1, r7 "); // a2 into r1,r2
3595 asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3596 asm("bl TRealXAdd "); // add a1+a2, result in r1,r2,r3
3597 asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
3598 asm("cmp r12, #0 "); // check error code
3599 __CPOPRET(eq,"r4-r8,");
3600 asm("stmfd sp!, {r0,r1} "); // save result
3601 asm("mov r0, r12 "); // error code into r0
3602 asm("bl __math_exception "); // raise exception
3603 __POPRET("r0,r1,r4-r8,");
3606 __NAKED__ EXPORT_C TReal32 __subsf3(TReal32 /*a1*/, TReal32 /*a2*/)
3608 // Subtract two floats
3611 // a1 is in r0, a2 in r1 on entry; return with answer in r0
3612 asm("stmfd sp!, {r4-r8,lr} ");
3613 asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3614 asm("mov r4, r1 "); // move into r4,r5,r6
3617 asm("mov r1, r0 "); // a1 into r1
3618 asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3619 asm("bl TRealXSubtract "); // subtract a1-a2, result in r1,r2,r3
3620 asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
3621 asm("cmp r12, #0 "); // check error code
3622 __CPOPRET(eq,"r4-r8,");
3623 asm("stmfd sp!, {r0} "); // save result
3624 asm("mov r0, r12 "); // error code into r0
3625 asm("bl __math_exception "); // raise exception
3626 __POPRET("r0,r4-r8,");
3629 __NAKED__ EXPORT_C TReal64 __subdf3(TReal64 /*a1*/, TReal64 /*a2*/)
3631 // Subtract two doubles
3634 // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
3635 asm("stmfd sp!, {r4-r8,lr} ");
3636 asm("mov r7, r0 "); // save a1
3638 asm("mov r1, r2 "); // a2 into r1,r2
3640 asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3641 asm("mov r4, r1 "); // move into r4,r5,r6
3644 asm("mov r1, r7 "); // a1 into r1,r2
3646 asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3647 asm("bl TRealXSubtract "); // subtract a1-a2, result in r1,r2,r3
3648 asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
3649 asm("cmp r12, #0 "); // check error code
3650 __CPOPRET(eq,"r4-r8,");
3651 asm("stmfd sp!, {r0,r1} "); // save result
3652 asm("mov r0, r12 "); // error code into r0
3653 asm("bl __math_exception "); // raise exception
3654 __POPRET("r0,r1,r4-r8,");
3657 __NAKED__ EXPORT_C TInt __cmpsf3(TReal32 /*a1*/, TReal32 /*a2*/)
3659 // Compare two floats
3662 // a1 in r0, a2 in r1 on entry
3663 asm("stmfd sp!, {lr} ");
3664 asm("bl CompareTReal32 "); // compare the two numbers
3665 asm("mov r0, r0, lsl #28 ");
3666 asm("msr cpsr_flg, r0 "); // N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
3668 asm("mvnvs r0, #0 "); // if a1<a2 r0=-1
3669 asm("moveq r0, #1 "); // if a1>a2 r0=+1
3672 // Compare two TReal32s in r0, r1.
3673 // Return 1 if r0<r1, 2 if r0=r1, 4 if r0>r1, 8 if unordered
3674 // Registers r0,r1,r12 modified
3675 asm("CompareTReal32: ");
3676 asm("mov r12, r0, lsr #23 ");
3677 asm("and r12, r12, #0xFF "); // r12=r0 exponent
3678 asm("cmp r12, #0xFF "); // check if r0 is a NaN
3679 asm("bne CompareTReal32a ");
3680 asm("movs r12, r0, lsl #9 "); // exponent=FF, check mantissa
3681 asm("movne r0, #8 "); // if not zero, r0 is a NaN so result is unordered
3683 asm("CompareTReal32a: ");
3684 asm("mov r12, r1, lsr #23 ");
3685 asm("and r12, r12, #0xFF "); // r12=r1 exponent
3686 asm("cmp r12, #0xFF "); // check if r1 is a NaN
3687 asm("bne CompareTReal32b ");
3688 asm("movs r12, r1, lsl #9 "); // exponent=FF, check mantissa
3689 asm("movne r0, #8 "); // if not zero, r1 is a NaN so result is unordered
3691 asm("CompareTReal32b: ");
3692 asm("bics r12, r0, #0x80000000 "); // check if r0=0 (can be +0 or -0)
3693 asm("moveq r0, #0 "); // if it is, make it +0
3694 asm("bics r12, r1, #0x80000000 "); // check if r1=0 (can be +0 or -0)
3695 asm("moveq r1, #0 "); // if it is, make it +0
3696 asm("teq r0, r1 "); // test if signs different
3697 asm("bmi CompareTReal32c "); // branch if different
3698 asm("cmp r0, r1 "); // if same, check exponents + mantissas
3699 asm("moveq r0, #2 "); // if equal, return 2
3701 asm("movhi r0, #4 "); // if r0>r1, r0=4
3702 asm("movcc r0, #1 "); // if r0<r1, r0=1
3703 asm("cmp r1, #0 "); // check signs
3704 asm("eormi r0, r0, #5 "); // if negative, switch 1 and 4
3706 asm("CompareTReal32c: "); // come here if signs different
3707 asm("cmp r0, #0 "); // check sign of r0
3708 asm("movpl r0, #4 "); // if r0 nonnegative, then r0 is greater so return 4
3709 asm("movmi r0, #1 "); // if r0 negative, return 1
3713 __NAKED__ EXPORT_C TInt __cmpdf3(TReal64 /*a1*/,TReal64 /*a2*/)
3715 // Compare two doubles
3718 // a1 in r0,r1, a2 in r2,r3 on entry
3719 asm("stmfd sp!, {lr} ");
3720 asm("bl CompareTReal64 "); // compare the two numbers
3721 asm("mov r0, r0, lsl #28 ");
3722 asm("msr cpsr_flg, r0 "); // N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
3724 asm("mvnvs r0, #0 "); // if a1<a2 r0=-1
3725 asm("moveq r0, #1 "); // if a1>a2 r0=+1
3728 // Compare two TReal64s in r0,r1 and r2,r3.
3729 // Return 1 if r0,r1<r2,r3
3730 // Return 2 if r0,r1=r2,r3
3731 // Return 4 if r0,r1>r2,r3
3732 // Return 8 if unordered
3733 // Registers r0,r1,r12 modified
3734 asm("CompareTReal64: ");
3735 #ifndef __DOUBLE_WORDS_SWAPPED__
3736 asm("mov r12, r0 ");
3738 asm("mov r1, r12 ");
3739 asm("mov r12, r2 ");
3741 asm("mov r3, r12 ");
3743 asm("mov r12, r0, lsr #20 ");
3744 asm("bic r12, r12, #0x800 "); // r12=first operand exponent
3745 asm("add r12, r12, #1 "); // add 1 to get usable compare value
3746 asm("cmp r12, #0x800 "); // check if first operand is a NaN
3747 asm("bne CompareTReal64a ");
3748 asm("movs r12, r0, lsl #12 "); // exponent=7FF, check mantissa
3749 asm("cmpeq r1, #0 ");
3750 asm("movne r0, #8 "); // if not zero, 1st op is a NaN so result is unordered
3752 asm("CompareTReal64a: ");
3753 asm("mov r12, r2, lsr #20 ");
3754 asm("bic r12, r12, #0x800 "); // r12=second operand exponent
3755 asm("add r12, r12, #1 "); // add 1 to get usable compare value
3756 asm("cmp r12, #0x800 "); // check if second operand is a NaN
3757 asm("bne CompareTReal64b ");
3758 asm("movs r12, r2, lsl #12 "); // exponent=7FF, check mantissa
3759 asm("cmpeq r3, #0 ");
3760 asm("movne r0, #8 "); // if not zero, 2nd op is a NaN so result is unordered
3762 asm("CompareTReal64b: ");
3763 asm("bics r12, r0, #0x80000000 "); // check if first operand is zero (can be +0 or -0)
3764 asm("cmpeq r1, #0 ");
3765 asm("moveq r0, #0 "); // if it is, make it +0
3766 asm("bics r12, r2, #0x80000000 "); // check if second operand is zero (can be +0 or -0)
3767 asm("cmpeq r3, #0 ");
3768 asm("moveq r2, #0 "); // if it is, make it +0
3769 asm("teq r0, r2 "); // test if signs different
3770 asm("bmi CompareTReal64c "); // branch if different
3771 asm("cmp r0, r2 "); // if same, check exponents + mantissas
3772 asm("cmpeq r1, r3 ");
3773 asm("moveq r0, #2 "); // if equal, return 2
3775 asm("movhi r0, #4 "); // if 1st operand > 2nd operand, r0=4
3776 asm("movcc r0, #1 "); // if 1st operand < 2nd operand, r0=1
3777 asm("cmp r2, #0 "); // check signs
3778 asm("eormi r0, r0, #5 "); // if negative, switch 1 and 4
3780 asm("CompareTReal64c: "); // come here if signs different
3781 asm("cmp r0, #0 "); // check sign of r0
3782 asm("movpl r0, #4 "); // if first operand nonnegative, return 4
3783 asm("movmi r0, #1 "); // if first operand negative, return 1
3787 __NAKED__ EXPORT_C TInt __eqsf2(TReal32 /*a1*/, TReal32 /*a2*/)
3789 // Compare if two floats are equal
3792 // a1 in r0, a2 in r1 on entry
3793 asm("stmfd sp!, {lr} ");
3794 asm("bl CompareTReal32 "); // compare the two numbers
3796 asm("movne r0, #0 "); // if ordered and equal return 0
3797 asm("moveq r0, #1 "); // else return 1
3801 __NAKED__ EXPORT_C TInt __eqdf2(TReal64 /*a1*/, TReal64 /*a2*/)
3803 // Compare if two doubles are equal
3806 // a1 in r0,r1, a2 in r2,r3 on entry
3807 asm("stmfd sp!, {lr} ");
3808 asm("bl CompareTReal64 "); // compare the two numbers
3810 asm("movne r0, #0 "); // if ordered and equal return 0
3811 asm("moveq r0, #1 "); // else return 1
3815 __NAKED__ EXPORT_C TInt __nesf2(TReal32 /*a1*/, TReal32 /*a2*/)
3817 // Compare if two floats are not equal
3820 // a1 in r0, a2 in r1 on entry
3821 asm("stmfd sp!, {lr} ");
3822 asm("bl CompareTReal32 "); // compare the two numbers
3823 asm("tst r0, #5 "); // test if ordered and unequal
3824 asm("moveq r0, #0 "); // if equal or unordered return 0
3825 asm("movne r0, #1 "); // if ordered and unequal return 1
3829 __NAKED__ EXPORT_C TInt __nedf2(TReal64 /*a1*/, TReal64 /*a2*/)
3831 // Compare if two doubles are not equal
3834 // a1 in r0,r1, a2 in r2,r3 on entry
3835 asm("stmfd sp!, {lr} ");
3836 asm("bl CompareTReal64 "); // compare the two numbers
3837 asm("tst r0, #5 "); // test if ordered and unequal
3838 asm("moveq r0, #0 "); // if equal or unordered return 0
3839 asm("movne r0, #1 "); // if ordered and unequal return 1
3843 __NAKED__ EXPORT_C TInt __gtsf2(TReal32 /*a1*/, TReal32 /*a2*/)
3845 // Compare if one float is greater than another
3848 // a1 in r0, a2 in r1 on entry
3849 asm("stmfd sp!, {lr} ");
3850 asm("bl CompareTReal32 "); // compare the two numbers
3851 asm("tst r0, #4 "); // test if ordered and a1>a2
3852 asm("movne r0, #1 "); // if ordered and a1>a2 return +1
3853 asm("mvneq r0, #0 "); // else return -1
3857 __NAKED__ EXPORT_C TInt __gtdf2(TReal64 /*a1*/, TReal64 /*a2*/)
3859 // Compare if one double is greater than another
3862 // a1 in r0,r1, a2 in r2,r3 on entry
3863 asm("stmfd sp!, {lr} ");
3864 asm("bl CompareTReal64 "); // compare the two numbers
3865 asm("tst r0, #4 "); // test if ordered and a1>a2
3866 asm("movne r0, #1 "); // if ordered and a1>a2 return +1
3867 asm("mvneq r0, #0 "); // else return -1
3871 __NAKED__ EXPORT_C TInt __gesf2(TReal32 /*a1*/, TReal32 /*a2*/)
3873 // Compare if one float is greater than or equal to another
3876 // a1 in r0, a2 in r1 on entry
3877 asm("stmfd sp!, {lr} ");
3878 asm("bl CompareTReal32 "); // compare the two numbers
3879 asm("tst r0, #6 "); // test if ordered and a1>=a2
3880 asm("movne r0, #1 "); // if ordered and a1>=a2 return +1
3881 asm("mvneq r0, #0 "); // else return -1
3885 __NAKED__ EXPORT_C TInt __gedf2(TReal64 /*a1*/, TReal64 /*a2*/)
3887 // Compare if one double is greater than or equal to another
3890 // a1 in r0,r1, a2 in r2,r3 on entry
3891 asm("stmfd sp!, {lr} ");
3892 asm("bl CompareTReal64 "); // compare the two numbers
3893 asm("tst r0, #6 "); // test if ordered and a1>=a2
3894 asm("movne r0, #1 "); // if ordered and a1>=a2 return +1
3895 asm("mvneq r0, #0 "); // else return -1
3899 __NAKED__ EXPORT_C TInt __ltsf2(TReal32 /*a1*/, TReal32 /*a2*/)
3901 // Compare if one float is less than another
3904 // a1 in r0, a2 in r1 on entry
3905 asm("stmfd sp!, {lr} ");
3906 asm("bl CompareTReal32 "); // compare the two numbers
3907 asm("tst r0, #1 "); // test if ordered and a1<a2
3908 asm("mvnne r0, #0 "); // if ordered and a1<a2 return -1
3909 asm("moveq r0, #1 "); // else return +1
3913 __NAKED__ EXPORT_C TInt __ltdf2(TReal64 /*a1*/, TReal64 /*a2*/)
3915 // Compare if one double is less than another
3918 // a1 in r0,r1, a2 in r2,r3 on entry
3919 asm("stmfd sp!, {lr} ");
3920 asm("bl CompareTReal64 "); // compare the two numbers
3921 asm("tst r0, #1 "); // test if ordered and a1<a2
3922 asm("mvnne r0, #0 "); // if ordered and a1<a2 return -1
3923 asm("moveq r0, #1 "); // else return +1
3927 __NAKED__ EXPORT_C TInt __lesf2(TReal32 /*a1*/, TReal32 /*a2*/)
3929 // Compare if one float is less than or equal to another
3932 // a1 in r0, a2 in r1 on entry
3933 asm("stmfd sp!, {lr} ");
3934 asm("bl CompareTReal32 "); // compare the two numbers
3935 asm("tst r0, #3 "); // test if ordered and a1<=a2
3936 asm("mvnne r0, #0 "); // if ordered and a1<=a2 return -1
3937 asm("moveq r0, #1 "); // else return +1
3941 __NAKED__ EXPORT_C TInt __ledf2(TReal64 /*a1*/, TReal64 /*a2*/)
3943 // Compare if one double is less than or equal to another
3946 // a1 in r0,r1, a2 in r2,r3 on entry
3947 asm("stmfd sp!, {lr} ");
3948 asm("bl CompareTReal64 "); // compare the two numbers
3949 asm("tst r0, #3 "); // test if ordered and a1<=a2
3950 asm("mvnne r0, #0 "); // if ordered and a1<=a2 return -1
3951 asm("moveq r0, #1 "); // else return +1
3955 __NAKED__ EXPORT_C TReal32 __mulsf3(TReal32 /*a1*/,TReal32 /*a2*/)
3957 // Multiply two floats
3960 // a1 is in r0, a2 in r1 on entry; return with answer in r0
3961 asm("stmfd sp!, {r4-r7,lr} ");
3962 asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3963 asm("mov r4, r1 "); // move into r4,r5,r6
3966 asm("mov r1, r0 "); // a1 into r1
3967 asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3968 asm("bl TRealXMultiply "); // multiply a1*a2, result in r1,r2,r3
3969 asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
3970 asm("cmp r12, #0 "); // check error code
3971 __CPOPRET(eq,"r4-r7,");
3972 asm("stmfd sp!, {r0} "); // save result
3973 asm("mov r0, r12 "); // error code into r0
3974 asm("bl __math_exception "); // raise exception
3975 __POPRET("r0,r4-r7,");
3978 __NAKED__ EXPORT_C TReal64 __muldf3(TReal64 /*a1*/, TReal64 /*a2*/)
3980 // Multiply two doubles
3983 // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
3984 asm("stmfd sp!, {r4-r8,lr} ");
3985 asm("mov r7, r2 "); // save a2
3987 asm("mov r2, r1 "); // a1 into r1,r2
3989 asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
3990 asm("mov r4, r1 "); // move into r4,r5,r6
3993 asm("mov r1, r7 "); // a2 into r1,r2
3995 asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
3996 asm("bl TRealXMultiply "); // multiply a1*a2, result in r1,r2,r3
3997 asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
3998 asm("cmp r12, #0 "); // check error code
3999 __CPOPRET(eq,"r4-r8,");
4000 asm("stmfd sp!, {r0,r1} "); // save result
4001 asm("mov r0, r12 "); // error code into r0
4002 asm("bl __math_exception "); // raise exception
4003 __POPRET("r0,r1,r4-r8,");
4006 __NAKED__ EXPORT_C TReal32 __divsf3(TReal32 /*a1*/, TReal32 /*a2*/)
4008 // Divide two floats
4011 // a1 is in r0, a2 in r1 on entry; return with answer in r0
4012 asm("stmfd sp!, {r4-r9,lr} ");
4013 asm("bl ConvertTReal32ToTRealX "); // convert a2 to TRealX in r1,r2,r3
4014 asm("mov r4, r1 "); // move into r4,r5,r6
4017 asm("mov r1, r0 "); // a1 into r1
4018 asm("bl ConvertTReal32ToTRealX "); // convert a1 to TRealX in r1,r2,r3
4019 asm("bl TRealXDivide "); // divide a1/a2, result in r1,r2,r3 error code in r12
4020 asm("mov r9, r12 "); // save error code in case it's division by zero
4021 asm("bl TRealXGetTReal32 "); // convert result to TReal32 in r0, error code in r12
4022 asm("cmn r9, #41 "); // check for KErrDivideByZero
4023 asm("moveq r12, r9 ");
4024 asm("cmp r12, #0 "); // check error code
4025 __CPOPRET(eq,"r4-r9,");
4026 asm("stmfd sp!, {r0} "); // save result
4027 asm("mov r0, r12 "); // error code into r0
4028 asm("bl __math_exception "); // raise exception
4029 __POPRET("r0,r4-r9,");
4032 __NAKED__ EXPORT_C TReal64 __divdf3(TReal64 /*a1*/, TReal64 /*a2*/)
4034 // Divide two doubles
4037 // a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
4038 asm("stmfd sp!, {r4-r9,lr} ");
4039 asm("mov r7, r0 "); // save a1
4041 asm("mov r1, r2 "); // a2 into r1,r2
4043 asm("bl ConvertTReal64ToTRealX "); // convert a2 to TRealX in r1,r2,r3
4044 asm("mov r4, r1 "); // move into r4,r5,r6
4047 asm("mov r1, r7 "); // a1 into r1,r2
4049 asm("bl ConvertTReal64ToTRealX "); // convert a1 to TRealX in r1,r2,r3
4050 asm("bl TRealXDivide "); // divide a1/a2, result in r1,r2,r3
4051 asm("mov r9, r12 "); // save error code in case it's division by zero
4052 asm("bl TRealXGetTReal64 "); // convert result to TReal64 in r0,r1 error code in r12
4053 asm("cmn r9, #41 "); // check for KErrDivideByZero
4054 asm("moveq r12, r9 ");
4055 asm("cmp r12, #0 "); // check error code
4056 __CPOPRET(eq,"r4-r9,");
4057 asm("stmfd sp!, {r0,r1} "); // save result
4058 asm("mov r0, r12 "); // error code into r0
4059 asm("bl __math_exception "); // raise exception
4060 __POPRET("r0,r1,r4-r9,");
4063 __NAKED__ EXPORT_C TReal32 __negsf2(TReal32 /*a1*/)
4068 // a1 in r0 on entry, return value in r0
4069 asm("eor r0, r0, #0x80000000 "); // change sign bit
4073 __NAKED__ EXPORT_C TReal64 __negdf2(TReal64 /*a1*/)
4078 // a1 in r0,r1 on entry, return value in r0,r1
4079 asm("eor r0, r0, #0x80000000 "); // change sign bit
4083 __NAKED__ EXPORT_C TReal32 __floatsisf(TInt /*a1*/)
4085 // Convert int to float
4088 // a1 in r0 on entry, return value in r0
4089 asm("cmp r0, #0 "); // test for zero or negative
4091 asm("and ip, r0, #0x80000000 "); // ip=bit 31 of r0 (sign bit)
4092 asm("rsbmi r0, r0, #0 "); // if negative, negate it
4093 asm("mov r2, #0x9E "); // r2=0x9E=exponent of 2^31
4094 asm("cmp r0, #0x00010000 "); // normalise integer, adjusting exponent
4095 asm("movcc r0, r0, lsl #16 ");
4096 asm("subcc r2, r2, #16 ");
4097 asm("cmp r0, #0x01000000 ");
4098 asm("movcc r0, r0, lsl #8 ");
4099 asm("subcc r2, r2, #8 ");
4100 asm("cmp r0, #0x10000000 ");
4101 asm("movcc r0, r0, lsl #4 ");
4102 asm("subcc r2, r2, #4 ");
4103 asm("cmp r0, #0x40000000 ");
4104 asm("movcc r0, r0, lsl #2 ");
4105 asm("subcc r2, r2, #2 ");
4106 asm("cmp r0, #0x80000000 ");
4107 asm("movcc r0, r0, lsl #1 ");
4108 asm("subcc r2, r2, #1 ");
4109 asm("and r1, r0, #0xFF "); // r1=bottom 8 bits=rounding bits
4110 asm("cmp r1, #0x80 "); // check if we need to round up (carry=1 if we do)
4111 asm("moveqs r1, r0, lsr #9 "); // if bottom 8 bits=0x80, set carry=LSB of mantissa
4112 asm("addcss r0, r0, #0x100 "); // round up if necessary
4113 asm("addcs r2, r2, #1 "); // if carry, increment exponent
4114 asm("bic r0, r0, #0x80000000 "); // remove top bit (integer bit of mantissa implicit)
4115 asm("mov r0, r0, lsr #8 "); // mantissa into r0 bits 0-22
4116 asm("orr r0, r0, r2, lsl #23 "); // exponent into r0 bits 23-30
4117 asm("orr r0, r0, ip "); // sign bit into r0 bit 31
4121 __NAKED__ EXPORT_C TReal64 __floatsidf(TInt /*a1*/)
4123 // Convert int to double
4126 // a1 in r0 on entry, return value in r0,r1
4127 asm("cmp r0, #0 "); // test for zero or negative
4128 asm("moveq r1, #0 "); // if zero, return 0
4130 asm("and ip, r0, #0x80000000 "); // ip=bit 31 of r0 (sign bit)
4131 asm("rsbmi r0, r0, #0 "); // if negative, negate it
4132 asm("mov r2, #0x400 "); // r2=0x41E=exponent of 2^31
4133 asm("orr r2, r2, #0x1E ");
4134 asm("cmp r0, #0x00010000 "); // normalise integer, adjusting exponent
4135 asm("movcc r0, r0, lsl #16 ");
4136 asm("subcc r2, r2, #16 ");
4137 asm("cmp r0, #0x01000000 ");
4138 asm("movcc r0, r0, lsl #8 ");
4139 asm("subcc r2, r2, #8 ");
4140 asm("cmp r0, #0x10000000 ");
4141 asm("movcc r0, r0, lsl #4 ");
4142 asm("subcc r2, r2, #4 ");
4143 asm("cmp r0, #0x40000000 ");
4144 asm("movcc r0, r0, lsl #2 ");
4145 asm("subcc r2, r2, #2 ");
4146 asm("cmp r0, #0x80000000 ");
4147 asm("movcc r0, r0, lsl #1 ");
4148 asm("subcc r2, r2, #1 ");
4149 asm("bic r0, r0, #0x80000000 "); // remove top bit (integer bit of mantissa implicit)
4150 asm("mov r1, r0, lsl #21 "); // low 11 bits of mantissa into r1
4151 asm("mov r0, r0, lsr #11 "); // high 20 bits of mantissa into r0 bits 0-19
4152 asm("orr r0, r0, r2, lsl #20 "); // exponent into r0 bits 20-30
4153 asm("orr r0, r0, ip "); // sign bit into r0 bit 31
4154 #ifndef __DOUBLE_WORDS_SWAPPED__
4162 __NAKED__ EXPORT_C TInt __fixsfsi(TReal32 /*a1*/)
4164 // Convert float to int
4167 // a1 in r0 on entry, return value in r0
4168 asm("mov r1, r0, lsr #23 ");
4169 asm("and r1, r1, #0xFF "); // r1=exponent of a1
4170 asm("cmp r1, #0xFF "); // check for NaN
4171 asm("bne fixsfsi1 ");
4172 asm("movs r2, r0, lsl #9 "); // exponent=FF, check mantissa
4173 asm("movne r0, #0 "); // if non-zero, a1 is a NaN so return 0
4176 asm("rsbs r1, r1, #0x9E "); // r1=number of shifts to produce integer
4177 asm("ble fixsfsi2 "); // if <=0, saturate result
4178 asm("cmp r0, #0 "); // check sign bit
4179 asm("orr r0, r0, #0x00800000 "); // set implicit integer bit
4180 asm("mov r0, r0, lsl #8 "); // shift mantissa up so MSB is in MSB of r0
4181 asm("mov r0, r0, lsr r1 "); // r0=absolute integer
4182 asm("rsbmi r0, r0, #0 "); // if negative, negate
4185 asm("cmp r0, #0 "); // check sign
4186 asm("mov r0, #0x80000000 ");
4187 asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
4191 __NAKED__ EXPORT_C TInt __fixdfsi(TReal64 /*a1*/)
4193 // Convert double to int
4196 // a1 in r0,r1 on entry, return value in r0
4197 #ifndef __DOUBLE_WORDS_SWAPPED__
4202 asm("mov r2, r0, lsr #20 ");
4203 asm("bic r2, r2, #0x800 "); // r1=exponent of a1
4204 asm("add r3, r2, #1 ");
4205 asm("cmp r3, #0x800 "); // check for NaN
4206 asm("bne fixdfsi1 ");
4207 asm("movs r3, r0, lsl #12 "); // exponent=FF, check mantissa
4208 asm("cmpeq r1, #0 ");
4209 asm("movne r0, #0 "); // if non-zero, a1 is a NaN so return 0
4212 asm("mov r3, #0x400 ");
4213 asm("orr r3, r3, #0x1E "); // r3=0x41E (exponent of 2^31)
4214 asm("subs r2, r3, r2 "); // r2=number of shifts to produce integer
4215 asm("ble fixdfsi2 "); // if <=0, saturate result
4216 asm("cmp r2, #31 "); // check if more than 31 shifts needed
4217 asm("movhi r0, #0 "); // if so, underflow result to 0
4219 asm("cmp r0, #0 "); // check sign bit
4220 asm("orr r0, r0, #0x00100000 "); // set implicit integer bit
4221 asm("mov r0, r0, lsl #11 "); // shift mantissa up so MSB is in MSB of r0
4222 asm("orr r0, r0, r1, lsr #21 "); // put in bits from r1
4223 asm("mov r0, r0, lsr r2 "); // r0=absolute integer
4224 asm("rsbmi r0, r0, #0 "); // if negative, negate
4227 asm("cmp r0, #0 "); // check sign
4228 asm("mov r0, #0x80000000 ");
4229 asm("subpl r0, r0, #1 "); // if -ve return 80000000, if +ve return 7FFFFFFF
4233 __NAKED__ EXPORT_C TReal64 __extendsfdf2(TReal32 /*a1*/)
4235 // Convert a float to a double
4238 // a1 in r0, return in r0,r1
4239 asm("mov r3, r0, lsr #3 ");
4240 asm("ands r3, r3, #0x0FF00000 "); // r3 bits 20-27 hold exponent, Z=1 if zero/denormal
4241 asm("mov r1, r0, lsl #9 "); // r1 = TReal32 mantissa << 9
4242 asm("and r0, r0, #0x80000000 "); // leave only sign bit in r0
4243 asm("beq extendsfdf2a "); // branch if zero/denormal
4244 asm("cmp r3, #0x0FF00000 "); // check for infinity or NaN
4245 asm("orrcs r3, r3, #0x70000000 "); // if infinity or NaN, exponent = 7FF
4246 asm("addcc r3, r3, #0x38000000 "); // else exponent = TReal32 exponent + 380
4247 asm("orr r0, r0, r1, lsr #12 "); // top 20 mantissa bits into r0 bits 0-19
4248 asm("mov r1, r1, lsl #20 "); // remaining mantissa bits in r1 bits 29-31
4249 asm("orr r0, r0, r3 "); // exponent into r0 bits 20-30
4251 asm("extendsfdf2a: "); // come here if zero or denormal
4252 asm("cmp r1, #0 "); // check for zero
4254 asm("mov r3, #0x38000000 "); // else exponent = 380 (highest denormal exponent)
4255 asm("cmp r1, #0x10000 "); // normalise mantissa, decrementing exponent as needed
4256 asm("movcc r1, r1, lsl #16 ");
4257 asm("subcc r3, r3, #0x01000000 ");
4258 asm("cmp r1, #0x1000000 ");
4259 asm("movcc r1, r1, lsl #8 ");
4260 asm("subcc r3, r3, #0x00800000 ");
4261 asm("cmp r1, #0x10000000 ");
4262 asm("movcc r1, r1, lsl #4 ");
4263 asm("subcc r3, r3, #0x00400000 ");
4264 asm("cmp r1, #0x40000000 ");
4265 asm("movcc r1, r1, lsl #2 ");
4266 asm("subcc r3, r3, #0x00200000 ");
4267 asm("cmp r1, #0x80000000 ");
4268 asm("movcc r1, r1, lsl #1 ");
4269 asm("subcc r3, r3, #0x00100000 ");
4270 asm("add r1, r1, r1 "); // shift mantissa left one more to remove integer bit
4271 asm("orr r0, r0, r1, lsr #12 "); // top 20 mantissa bits into r0 bits 0-19
4272 asm("mov r1, r1, lsl #20 "); // remaining mantissa bits in r1 bits 29-31
4273 asm("orr r0, r0, r3 "); // exponent into r0 bits 20-30
4275 #ifndef __DOUBLE_WORDS_SWAPPED__
4283 __NAKED__ EXPORT_C TReal32 __truncdfsf2(TReal64 /*a1*/)
4285 // Convert a double to a float
4286 // Raises an exception if conversion results in an error
4289 asm("stmfd sp!, {lr} ");
4290 asm("bl TReal64GetTReal32 "); // do the conversion
4291 asm("cmp r12, #0 "); // check error code
4293 asm("stmfd sp!, {r0} "); // else save result
4294 asm("mov r0, r12 "); // error code into r0
4295 asm("bl __math_exception "); // raise exception
4298 // Convert TReal64 in r0,r1 to TReal32 in r0
4299 // Return error code in r12
4300 // r0-r3, r12 modified
4301 // NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
4302 asm("TReal64GetTReal32: ");
4303 #ifndef __DOUBLE_WORDS_SWAPPED__
4308 asm("mov r12, r0, lsr #20 ");
4309 asm("bic r12, r12, #0x800 "); // r12=a1 exponent
4310 asm("sub r12, r12, #0x380 "); // r12=exp in - 380 = result exponent if in range
4311 asm("cmp r12, #0xFF "); // check if input exponent too big for TReal32
4312 asm("bge TReal64GetTReal32a "); // branch if it is
4313 asm("mov r2, r0, lsl #11 "); // left justify mantissa in r2:r1
4314 asm("orr r2, r2, r1, lsr #21 ");
4315 asm("mov r1, r1, lsl #11 ");
4316 asm("orr r2, r2, #0x80000000 "); // set implied integer bit in mantissa
4317 asm("cmp r12, #0 ");
4318 asm("bgt TReal64GetTReal32b "); // branch if normalised result
4319 asm("cmn r12, #23 "); // check for total underflow or zero
4320 asm("bge TReal64GetTReal32e "); // skip if not
4321 asm("bics r2, r0, #0x80000000 "); // check if input value zero
4322 asm("cmpeq r1, #0 ");
4323 asm("moveq r12, #0 "); // if zero return KErrNone
4324 asm("mvnne r12, #9 "); // else return KErrUnderflow
4325 asm("and r0, r0, #0x80000000 "); // return zero of appropriate sign
4328 asm("TReal64GetTReal32e: "); // result will be a denormal
4329 asm("add r12, r12, #31 "); // r12=32-mantissa shift required = 32-(1-r12)
4330 asm("movs r3, r1, lsl r12 "); // r3=lost bits when r2:r1 is shifted
4331 asm("orrne lr, lr, #1 "); // if these are not zero, set rounded down flag
4332 asm("rsb r3, r12, #32 ");
4333 asm("mov r1, r1, lsr r3 ");
4334 asm("orr r1, r1, r2, lsl r12 ");
4335 asm("mov r2, r2, lsr r3 "); // r2 top 24 bits now give unrounded result mantissa
4336 asm("mov r12, #0 "); // result exponent will be zero
4337 asm("TReal64GetTReal32b: ");
4338 asm("movs r3, r2, lsl #24 "); // top 8 truncated bits into top byte of r3
4339 asm("bpl TReal64GetTReal32c "); // if top bit clear, truncate
4340 asm("cmp r3, #0x80000000 ");
4341 asm("cmpeq r1, #0 "); // compare rounding bits to 1000...
4342 asm("bhi TReal64GetTReal32d "); // if >, round up
4343 asm("tst lr, #1 "); // check rounded-down flag
4344 asm("bne TReal64GetTReal32d "); // if rounded down, round up
4345 asm("tst r2, #0x100 "); // else round to even - test LSB of result mantissa
4346 asm("beq TReal64GetTReal32c "); // if zero, truncate, else round up
4347 asm("TReal64GetTReal32d: "); // come here to round up
4348 asm("adds r2, r2, #0x100 "); // increment the mantissa
4349 asm("movcs r2, #0x80000000 "); // if carry, mantissa=800000
4350 asm("addcs r12, r12, #1 "); // and increment exponent
4351 asm("cmpmi r12, #1 "); // if mantissa normalised, check exponent>0
4352 asm("movmi r12, #1 "); // if normalised and exponent=0, set exponent to 1
4353 asm("TReal64GetTReal32c: "); // come here to truncate
4354 asm("and r0, r0, #0x80000000 "); // leave only sign bit in r0
4355 asm("orr r0, r0, r12, lsl #23 "); // exponent into r0 bits 23-30
4356 asm("bic r2, r2, #0x80000000 "); // remove integer bit from mantissa
4357 asm("orr r0, r0, r2, lsr #8 "); // non-integer mantissa bits into r0 bits 0-22
4358 asm("cmp r12, #0xFF "); // check for overflow
4359 asm("mvneq r12, #8 "); // if overflow, return KErrOverflow
4360 asm("biceq pc, lr, #3 ");
4361 asm("bics r1, r0, #0x80000000 "); // check for underflow
4362 asm("mvneq r12, #9 "); // if underflow return KErrUnderflow
4363 asm("movne r12, #0 "); // else return KErrNone
4364 asm("bic pc, lr, #3 ");
4365 asm("TReal64GetTReal32a: "); // come here if overflow, infinity or NaN
4366 asm("add r3, r12, #1 ");
4367 asm("cmp r3, #0x480 "); // check for infinity or NaN
4368 asm("movne r1, #0 "); // if not, set mantissa to 0 for infinity result
4369 asm("movne r0, r0, lsr #20 ");
4370 asm("movne r0, r0, lsl #20 ");
4371 asm("mov r1, r1, lsr #29 "); // assemble 23 bit mantissa in r1
4372 asm("orr r1, r1, r0, lsl #3 ");
4373 asm("bic r1, r1, #0xFF000000 ");
4374 asm("and r0, r0, #0x80000000 "); // leave only sign in r0
4375 asm("orr r0, r0, #0x7F000000 "); // r0 bits 23-30 = FF = exponent
4376 asm("orr r0, r0, #0x00800000 ");
4377 asm("orr r0, r0, r1 "); // r0 bits 0-22 = result mantissa
4378 asm("movs r12, r0, lsl #9 "); // check if result is infinity or NaN
4379 asm("mvneq r12, #8 "); // if infinity return KErrOverflow
4380 asm("mvnne r12, #5 "); // else return KErrArgument
4381 asm("bic pc, lr, #3 ");
4383 } // end of extern "C" declaration