os/kernelhwsrv/kernel/eka/euser/epoc/arm/uc_realx.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\euser\epoc\arm\uc_realx.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <u32std.h>
sl@0
    20
#include <e32math.h>
sl@0
    21
#ifdef __USE_VFP_MATH
sl@0
    22
#include <arm_vfp.h>
sl@0
    23
#endif
sl@0
    24
sl@0
    25
#if defined(__USE_VFP_MATH) && !defined(__CPU_HAS_VFP)
sl@0
    26
#error	__USE_VFP_MATH was defined but not __CPU_HAS_VFP - impossible combination, check variant.mmh 
sl@0
    27
#endif	
sl@0
    28
sl@0
    29
#ifndef __EABI_CTORS__
sl@0
    30
__NAKED__ EXPORT_C TRealX::TRealX()
sl@0
    31
/**
sl@0
    32
Constructs a default extended precision object.
sl@0
    33
sl@0
    34
This sets the value to zero.
sl@0
    35
*/
sl@0
    36
	{
sl@0
    37
	asm("mov r1, #0 ");
sl@0
    38
	asm("str r1, [r0] ");
sl@0
    39
	asm("str r1, [r0, #4] ");
sl@0
    40
	asm("str r1, [r0, #8] ");
sl@0
    41
	__JUMP(,lr);
sl@0
    42
	}
sl@0
    43
sl@0
    44
sl@0
    45
sl@0
    46
sl@0
    47
__NAKED__ EXPORT_C TRealX::TRealX(TUint /*anExp*/, TUint /*aMantHi*/, TUint /*aMantLo*/)
sl@0
    48
/**
sl@0
    49
Constructs an extended precision object from an explicit exponent and
sl@0
    50
a 64 bit mantissa.
sl@0
    51
sl@0
    52
@param anExp   The exponent 
sl@0
    53
@param aMantHi The high order 32 bits of the 64 bit mantissa 
sl@0
    54
@param aMantLo The low order 32 bits of the 64 bit mantissa 
sl@0
    55
*/
sl@0
    56
	{
sl@0
    57
	asm("str r1, [r0, #8] ");
sl@0
    58
	asm("str r2, [r0, #4] ");
sl@0
    59
	asm("str r3, [r0, #0] ");
sl@0
    60
	__JUMP(,lr);
sl@0
    61
	}
sl@0
    62
#endif
sl@0
    63
sl@0
    64
sl@0
    65
sl@0
    66
sl@0
    67
sl@0
    68
__NAKED__ EXPORT_C TInt TRealX::Set(TInt /*anInt*/)
sl@0
    69
/**
sl@0
    70
Gives this extended precision object a new value taken
sl@0
    71
from a signed integer.
sl@0
    72
sl@0
    73
@param anInt The signed integer value.
sl@0
    74
sl@0
    75
@return KErrNone, always.
sl@0
    76
*/
sl@0
    77
	{
sl@0
    78
	asm("stmfd sp!, {lr} ");
sl@0
    79
	asm("mov r2, r1 ");
sl@0
    80
	asm("bl ConvertIntToTRealX ");
sl@0
    81
	asm("stmia r0, {r1,r2,r3} ");
sl@0
    82
	asm("mov r0, #0 ");				// return KErrNone
sl@0
    83
	__POPRET("");
sl@0
    84
	}
sl@0
    85
sl@0
    86
sl@0
    87
sl@0
    88
sl@0
    89
#ifndef __EABI_CTORS__
sl@0
    90
__NAKED__ EXPORT_C TRealX::TRealX(TInt /*anInt*/)
sl@0
    91
/**
sl@0
    92
Constructs an extended precision object from a signed integer value.
sl@0
    93
sl@0
    94
@param anInt The signed integer value.
sl@0
    95
*/
sl@0
    96
	{
sl@0
    97
	// fall through
sl@0
    98
	}
sl@0
    99
#endif
sl@0
   100
sl@0
   101
sl@0
   102
sl@0
   103
sl@0
   104
__NAKED__ EXPORT_C TRealX& TRealX::operator=(TInt /*anInt*/)
sl@0
   105
/**
sl@0
   106
Assigns the specified signed integer value to this extended precision object.
sl@0
   107
sl@0
   108
@param anInt The signed integer value.
sl@0
   109
sl@0
   110
@return A reference to this extended precision object.
sl@0
   111
*/
sl@0
   112
	{
sl@0
   113
	asm("stmfd sp!, {lr} ");
sl@0
   114
	asm("mov r2, r1 ");
sl@0
   115
	asm("bl ConvertIntToTRealX ");
sl@0
   116
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   117
	__POPRET("");
sl@0
   118
sl@0
   119
	asm("ConvertIntToTRealX: ");
sl@0
   120
	asm("cmp r2, #0 ");
sl@0
   121
	asm("movpl r3, #0 ");				// if int>0, r3=0
sl@0
   122
	asm("beq ConvertIntToTRealX0 ");	// if int=0, return 0
sl@0
   123
	asm("movmi r3, #1 ");				// if int<0, r3=1
sl@0
   124
	asm("rsbmi r2, r2, #0 ");			// if int -ve, negate it
sl@0
   125
	asm("orr r3, r3, #0x001E0000 ");
sl@0
   126
	asm("orr r3, r3, #0x80000000 ");	// r3=exponent 801E + sign bit
sl@0
   127
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   128
	CLZ(12,2);
sl@0
   129
	asm("mov r2, r2, lsl r12 ");
sl@0
   130
	asm("sub r3, r3, r12, lsl #16 ");
sl@0
   131
#else
sl@0
   132
	asm("cmp r2, #0x10000 ");			// normalise mantissa, decrementing exponent as needed
sl@0
   133
	asm("movcc r2, r2, lsl #16 ");
sl@0
   134
	asm("subcc r3, r3, #0x100000 ");
sl@0
   135
	asm("cmp r2, #0x1000000 ");
sl@0
   136
	asm("movcc r2, r2, lsl #8 ");
sl@0
   137
	asm("subcc r3, r3, #0x080000 ");
sl@0
   138
	asm("cmp r2, #0x10000000 ");
sl@0
   139
	asm("movcc r2, r2, lsl #4 ");
sl@0
   140
	asm("subcc r3, r3, #0x040000 ");
sl@0
   141
	asm("cmp r2, #0x40000000 ");
sl@0
   142
	asm("movcc r2, r2, lsl #2 ");
sl@0
   143
	asm("subcc r3, r3, #0x020000 ");
sl@0
   144
	asm("cmp r2, #0x80000000 ");
sl@0
   145
	asm("movcc r2, r2, lsl #1 ");
sl@0
   146
	asm("subcc r3, r3, #0x010000 ");
sl@0
   147
#endif
sl@0
   148
	asm("ConvertIntToTRealX0: ");
sl@0
   149
	asm("mov r1, #0 ");					// low order word of mantissa = 0
sl@0
   150
	__JUMP(,lr);
sl@0
   151
	}
sl@0
   152
sl@0
   153
sl@0
   154
sl@0
   155
sl@0
   156
__NAKED__ EXPORT_C TInt TRealX::Set(const TInt64& /*anInt*/)
sl@0
   157
/**
sl@0
   158
Gives this extended precision object a new value taken from
sl@0
   159
a 64 bit integer.
sl@0
   160
sl@0
   161
@param anInt The 64 bit integer value.
sl@0
   162
sl@0
   163
@return KErrNone, always.
sl@0
   164
*/
sl@0
   165
	{
sl@0
   166
	asm("stmfd sp!, {lr} ");
sl@0
   167
	asm("ldmia r1, {r1,r2} ");
sl@0
   168
	asm("bl ConvertInt64ToTRealX ");
sl@0
   169
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   170
	asm("mov r0, #0 ");					// return KErrNone
sl@0
   171
	__POPRET("");
sl@0
   172
	}
sl@0
   173
sl@0
   174
sl@0
   175
sl@0
   176
sl@0
   177
#ifndef __EABI_CTORS__
sl@0
   178
__NAKED__ EXPORT_C TRealX::TRealX(const TInt64& /*anInt*/)
sl@0
   179
/**
sl@0
   180
Constructs an extended precision object from a 64 bit integer.
sl@0
   181
sl@0
   182
@param anInt A reference to a 64 bit integer. 
sl@0
   183
*/
sl@0
   184
	{
sl@0
   185
	// fall through
sl@0
   186
	}
sl@0
   187
#endif
sl@0
   188
sl@0
   189
sl@0
   190
sl@0
   191
sl@0
   192
__NAKED__ EXPORT_C TRealX& TRealX::operator=(const TInt64& /*anInt*/)
sl@0
   193
/**
sl@0
   194
Assigns the specified 64 bit integer value to this extended precision object.
sl@0
   195
sl@0
   196
@param anInt A reference to a 64 bit integer. 
sl@0
   197
sl@0
   198
@return A reference to this extended precision object.
sl@0
   199
*/
sl@0
   200
	{
sl@0
   201
	asm("stmfd sp!, {lr} ");
sl@0
   202
	asm("ldmia r1, {r1,r2} ");
sl@0
   203
	asm("bl ConvertInt64ToTRealX ");
sl@0
   204
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   205
	__POPRET("");
sl@0
   206
sl@0
   207
	asm("ConvertInt64ToTRealX: ");
sl@0
   208
	asm("movs r3, r2, lsr #31 ");		// sign bit into r3 bit 0
sl@0
   209
	asm("beq ConvertInt64ToTRealX1 ");	// skip if plus
sl@0
   210
	asm("rsbs r1, r1, #0 ");			// take absolute value
sl@0
   211
	asm("rsc r2, r2, #0 ");
sl@0
   212
	asm("ConvertInt64ToTRealX1: ");
sl@0
   213
	asm("cmp r2, #0 ");					// does it fit into 32 bits?
sl@0
   214
	asm("moveq r2, r1 ");				// if it does, do 32 bit conversion
sl@0
   215
	asm("beq ConvertUintToTRealX1 ");
sl@0
   216
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   217
	CLZ(12,2);
sl@0
   218
	asm("mov r2, r2, lsl r12 ");
sl@0
   219
	asm("rsb r12, r12, #32 ");
sl@0
   220
	asm("orr r2, r2, r1, lsr r12 ");
sl@0
   221
	asm("rsb r12, r12, #32 ");
sl@0
   222
#else
sl@0
   223
	asm("mov r12, #32 ");				// 32-number of left-shifts needed to normalise
sl@0
   224
	asm("cmp r2, #0x10000 ");			// calculate number required
sl@0
   225
	asm("movcc r2, r2, lsl #16 ");
sl@0
   226
	asm("subcc r12, r12, #16 ");
sl@0
   227
	asm("cmp r2, #0x1000000 ");
sl@0
   228
	asm("movcc r2, r2, lsl #8 ");
sl@0
   229
	asm("subcc r12, r12, #8 ");
sl@0
   230
	asm("cmp r2, #0x10000000 ");
sl@0
   231
	asm("movcc r2, r2, lsl #4 ");
sl@0
   232
	asm("subcc r12, r12, #4 ");
sl@0
   233
	asm("cmp r2, #0x40000000 ");
sl@0
   234
	asm("movcc r2, r2, lsl #2 ");
sl@0
   235
	asm("subcc r12, r12, #2 ");
sl@0
   236
	asm("cmp r2, #0x80000000 ");
sl@0
   237
	asm("movcc r2, r2, lsl #1 ");
sl@0
   238
	asm("subcc r12, r12, #1 ");			// r2 is now normalised
sl@0
   239
	asm("orr r2, r2, r1, lsr r12 ");	// shift r1 left into r2
sl@0
   240
	asm("rsb r12, r12, #32 ");
sl@0
   241
#endif
sl@0
   242
	asm("mov r1, r1, lsl r12 ");
sl@0
   243
	asm("add r3, r3, #0x80000000 ");	// exponent = 803E-r12
sl@0
   244
	asm("add r3, r3, #0x003E0000 ");
sl@0
   245
	asm("sub r3, r3, r12, lsl #16 ");
sl@0
   246
	__JUMP(,lr);
sl@0
   247
	}
sl@0
   248
sl@0
   249
sl@0
   250
sl@0
   251
sl@0
   252
__NAKED__ EXPORT_C TInt TRealX::Set(TUint /*anInt*/)
sl@0
   253
/**
sl@0
   254
Gives this extended precision object a new value taken from
sl@0
   255
an unsigned integer.
sl@0
   256
sl@0
   257
@param The unsigned integer value.
sl@0
   258
sl@0
   259
@return KErrNone, always.
sl@0
   260
*/
sl@0
   261
	{
sl@0
   262
	asm("stmfd sp!, {lr} ");
sl@0
   263
	asm("mov r2, r1 ");
sl@0
   264
	asm("bl ConvertUintToTRealX ");
sl@0
   265
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   266
	asm("mov r0, #0 ");				// return KErrNone
sl@0
   267
	__POPRET("");
sl@0
   268
	}
sl@0
   269
sl@0
   270
sl@0
   271
sl@0
   272
sl@0
   273
#ifndef __EABI_CTORS__
sl@0
   274
__NAKED__ EXPORT_C TRealX::TRealX(TUint /*anInt*/)
sl@0
   275
/**
sl@0
   276
Constructs an extended precision object from an unsigned integer value.
sl@0
   277
sl@0
   278
@param anInt The unsigned integer value.
sl@0
   279
*/
sl@0
   280
	{
sl@0
   281
	// fall through
sl@0
   282
	}
sl@0
   283
#endif
sl@0
   284
sl@0
   285
sl@0
   286
sl@0
   287
sl@0
   288
__NAKED__ EXPORT_C TRealX& TRealX::operator=(TUint /*anInt*/)
sl@0
   289
/**
sl@0
   290
Assigns the specified unsigned integer value to this extended precision object.
sl@0
   291
sl@0
   292
@param anInt The unsigned integer value.
sl@0
   293
sl@0
   294
@return A reference to this extended precision object.
sl@0
   295
*/
sl@0
   296
	{
sl@0
   297
	asm("stmfd sp!, {lr} ");
sl@0
   298
	asm("mov r2, r1 ");
sl@0
   299
	asm("bl ConvertUintToTRealX ");
sl@0
   300
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   301
	__POPRET("");
sl@0
   302
sl@0
   303
	asm("ConvertUintToTRealX: ");
sl@0
   304
	asm("mov r3, #0 ");
sl@0
   305
	asm("ConvertUintToTRealX1: ");
sl@0
   306
	asm("cmp r2, #0 ");					// check for zero
sl@0
   307
	asm("beq ConvertUintToTRealX0 ");
sl@0
   308
	asm("orr r3, r3, #0x001E0000 ");
sl@0
   309
	asm("orr r3, r3, #0x80000000 ");	// r3=exponent 801E
sl@0
   310
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   311
	CLZ(12,2);
sl@0
   312
	asm("mov r2, r2, lsl r12 ");
sl@0
   313
	asm("sub r3, r3, r12, lsl #16 ");
sl@0
   314
#else
sl@0
   315
	asm("cmp r2, #0x10000 ");			// normalise mantissa, decrementing exponent as needed
sl@0
   316
	asm("movcc r2, r2, lsl #16 ");
sl@0
   317
	asm("subcc r3, r3, #0x100000 ");
sl@0
   318
	asm("cmp r2, #0x1000000 ");
sl@0
   319
	asm("movcc r2, r2, lsl #8 ");
sl@0
   320
	asm("subcc r3, r3, #0x080000 ");
sl@0
   321
	asm("cmp r2, #0x10000000 ");
sl@0
   322
	asm("movcc r2, r2, lsl #4 ");
sl@0
   323
	asm("subcc r3, r3, #0x040000 ");
sl@0
   324
	asm("cmp r2, #0x40000000 ");
sl@0
   325
	asm("movcc r2, r2, lsl #2 ");
sl@0
   326
	asm("subcc r3, r3, #0x020000 ");
sl@0
   327
	asm("cmp r2, #0x80000000 ");
sl@0
   328
	asm("movcc r2, r2, lsl #1 ");
sl@0
   329
	asm("subcc r3, r3, #0x010000 ");
sl@0
   330
#endif
sl@0
   331
	asm("ConvertUintToTRealX0: ");
sl@0
   332
	asm("mov r1, #0 ");					// low order word of mantissa = 0
sl@0
   333
	__JUMP(,lr);
sl@0
   334
	}
sl@0
   335
sl@0
   336
sl@0
   337
sl@0
   338
sl@0
   339
__NAKED__ EXPORT_C void TRealX::SetZero(TBool /*aNegative*/)
sl@0
   340
/**
sl@0
   341
Sets the value of this extended precision object to zero.
sl@0
   342
sl@0
   343
@param aNegative ETrue, the value is a negative zero;
sl@0
   344
                 EFalse, the value is a positive zero, this is the default.
sl@0
   345
*/
sl@0
   346
	{
sl@0
   347
	asm("mov r3, #0 ");
sl@0
   348
	asm("cmp r1, #0 ");
sl@0
   349
	asm("movne r3, #1 ");
sl@0
   350
	asm("mov r2, #0 ");
sl@0
   351
	asm("mov r1, #0 ");
sl@0
   352
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   353
	__JUMP(,lr);
sl@0
   354
	}
sl@0
   355
sl@0
   356
sl@0
   357
sl@0
   358
sl@0
   359
__NAKED__ EXPORT_C void TRealX::SetNaN()
sl@0
   360
/**
sl@0
   361
Sets the value of this extended precision object to 'not a number'.
sl@0
   362
*/
sl@0
   363
	{
sl@0
   364
	asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
sl@0
   365
	asm("mov r2, #0xC0000000 ");
sl@0
   366
	asm("mov r1, #0 ");
sl@0
   367
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   368
	__JUMP(,lr);
sl@0
   369
	asm("__RealIndefiniteExponent: ");
sl@0
   370
	asm(".word 0xFFFF0001 ");
sl@0
   371
	}
sl@0
   372
sl@0
   373
sl@0
   374
sl@0
   375
sl@0
   376
sl@0
   377
__NAKED__ EXPORT_C void TRealX::SetInfinite(TBool /*aNegative*/)
sl@0
   378
/**
sl@0
   379
Sets the value of this extended precision object to infinity.
sl@0
   380
sl@0
   381
@param aNegative ETrue, the value is a negative zero;
sl@0
   382
                 EFalse, the value is a positive zero.
sl@0
   383
*/
sl@0
   384
	{
sl@0
   385
	asm("ldr r3, [pc, #__InfiniteExponent-.-8] ");
sl@0
   386
	asm("cmp r1, #0 ");
sl@0
   387
	asm("orrne r3, r3, #1 ");
sl@0
   388
	asm("mov r2, #0x80000000 ");
sl@0
   389
	asm("mov r1, #0 ");
sl@0
   390
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   391
	__JUMP(,lr);
sl@0
   392
	asm("__InfiniteExponent: ");
sl@0
   393
	asm(".word 0xFFFF0000 ");
sl@0
   394
	}
sl@0
   395
sl@0
   396
sl@0
   397
sl@0
   398
sl@0
   399
__NAKED__ EXPORT_C TBool TRealX::IsZero() const
sl@0
   400
/**
sl@0
   401
Determines whether the extended precision value is zero.
sl@0
   402
sl@0
   403
@return True, if the extended precision value is zero, false, otherwise.
sl@0
   404
*/
sl@0
   405
	{
sl@0
   406
	asm("ldr r1, [r0, #8] ");	// get exponent word
sl@0
   407
	asm("mov r0, #0 ");			// default return value is 0
sl@0
   408
	asm("cmp r1, #0x10000 ");	// is exponent=0 ?
sl@0
   409
	asm("movcc r0, #1 ");		// if so return 1
sl@0
   410
	__JUMP(,lr);
sl@0
   411
	}
sl@0
   412
sl@0
   413
sl@0
   414
sl@0
   415
sl@0
   416
__NAKED__ EXPORT_C TBool TRealX::IsNaN() const
sl@0
   417
/**
sl@0
   418
Determines whether the extended precision value is 'not a number'.
sl@0
   419
sl@0
   420
@return True, if the extended precision value is 'not a number',
sl@0
   421
        false, otherwise.
sl@0
   422
*/
sl@0
   423
	{
sl@0
   424
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
   425
	asm("mov r0, #0 ");					// default return value is 0
sl@0
   426
	asm("cmn r3, #0x10000 ");			// check for exponent 65535
sl@0
   427
	asm("bcc 1f ");						// branch if not
sl@0
   428
	asm("cmp r2, #0x80000000 ");		// check if infinity
sl@0
   429
	asm("cmpeq r1, #0 ");
sl@0
   430
	asm("movne r0, #1 ");				// if not, return 1
sl@0
   431
	asm("1: ");
sl@0
   432
	__JUMP(,lr);
sl@0
   433
	}
sl@0
   434
sl@0
   435
sl@0
   436
sl@0
   437
sl@0
   438
__NAKED__ EXPORT_C TBool TRealX::IsInfinite() const
sl@0
   439
/**
sl@0
   440
Determines whether the extended precision value has a finite value.
sl@0
   441
sl@0
   442
@return True, if the extended precision value is finite,
sl@0
   443
        false, if the value is 'not a number' or is infinite,
sl@0
   444
*/
sl@0
   445
	{
sl@0
   446
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
   447
	asm("mov r0, #0 ");						// default return value is 0
sl@0
   448
	asm("cmn r3, #0x10000 ");				// check for exponent 65535
sl@0
   449
	asm("bcc 1f ");							// branch if not
sl@0
   450
	asm("cmp r2, #0x80000000 ");			// check if infinity
sl@0
   451
	asm("cmpeq r1, #0 ");
sl@0
   452
	asm("moveq r0, #1 ");					// if it is, return 1
sl@0
   453
	asm("1: ");
sl@0
   454
	__JUMP(,lr);
sl@0
   455
	}
sl@0
   456
sl@0
   457
sl@0
   458
sl@0
   459
sl@0
   460
__NAKED__ EXPORT_C TBool TRealX::IsFinite() const
sl@0
   461
/**
sl@0
   462
Determines whether the extended precision value has a finite value.
sl@0
   463
sl@0
   464
@return True, if the extended precision value is finite,
sl@0
   465
        false, if the value is 'not a number' or is infinite,
sl@0
   466
*/
sl@0
   467
	{
sl@0
   468
	asm("ldr r1, [r0, #8] ");	// get exponent word
sl@0
   469
	asm("mov r0, #0 ");			// default return value is 0
sl@0
   470
	asm("cmn r1, #0x10000 ");	// is exponent=65535 (infinity or NaN) ?
sl@0
   471
	asm("movcc r0, #1 ");		// if not return 1
sl@0
   472
	__JUMP(,lr);
sl@0
   473
	}
sl@0
   474
sl@0
   475
sl@0
   476
sl@0
   477
sl@0
   478
#ifndef __EABI_CTORS__
sl@0
   479
__NAKED__ EXPORT_C TRealX::TRealX(TReal32 /*aReal*/) __SOFTFP
sl@0
   480
/**
sl@0
   481
Constructs an extended precision object from
sl@0
   482
a single precision floating point number.
sl@0
   483
sl@0
   484
@param aReal The single precision floating point value.
sl@0
   485
*/
sl@0
   486
	{
sl@0
   487
	// fall through
sl@0
   488
	}
sl@0
   489
#endif
sl@0
   490
sl@0
   491
sl@0
   492
sl@0
   493
sl@0
   494
__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal32 /*aReal*/) __SOFTFP
sl@0
   495
/**
sl@0
   496
Assigns the specified single precision floating point number to
sl@0
   497
this extended precision object.
sl@0
   498
sl@0
   499
@param aReal The single precision floating point value.
sl@0
   500
sl@0
   501
@return A reference to this extended precision object.
sl@0
   502
*/
sl@0
   503
	{
sl@0
   504
	asm("stmfd sp!, {lr} ");
sl@0
   505
	asm("bl ConvertTReal32ToTRealX ");
sl@0
   506
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   507
	__POPRET("");
sl@0
   508
	}
sl@0
   509
sl@0
   510
sl@0
   511
sl@0
   512
sl@0
   513
__NAKED__ EXPORT_C TInt TRealX::Set(TReal32 /*aReal*/) __SOFTFP
sl@0
   514
/**
sl@0
   515
Gives this extended precision object a new value taken from
sl@0
   516
a single precision floating point number.
sl@0
   517
sl@0
   518
@param aReal The single precision floating point value. 
sl@0
   519
sl@0
   520
@return KErrNone, if a valid number;
sl@0
   521
        KErrOverflow, if the number is infinite;
sl@0
   522
        KErrArgument, if not a number.
sl@0
   523
*/
sl@0
   524
	{
sl@0
   525
	// aReal is in r1 on entry
sl@0
   526
	// sign in bit 31, exponent in 30-23, mantissa (non-integer bits) in 22-0
sl@0
   527
	asm("stmfd sp!, {lr} ");
sl@0
   528
	asm("bl ConvertTReal32ToTRealX ");
sl@0
   529
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   530
	asm("cmn r3, #0x10000 ");			// check for infinity or NaN
sl@0
   531
	asm("movcc r0, #0 ");				// if neither, return KErrNone
sl@0
   532
	asm("bcc trealx_set_treal32_0 ");
sl@0
   533
	asm("cmp r2, #0x80000000 ");		// check for infinity
sl@0
   534
	asm("mvneq r0, #8 ");				// if so, return KErrOverflow
sl@0
   535
	asm("mvnne r0, #5 ");				// else return KErrArgument
sl@0
   536
	asm("trealx_set_treal32_0: ");
sl@0
   537
	__POPRET("");
sl@0
   538
sl@0
   539
	// Convert 32-bit real in r1 to TRealX in r1,r2,r3
sl@0
   540
	// r0 unmodified, r1,r2,r3,r12 modified
sl@0
   541
	asm("ConvertTReal32ToTRealX: ");
sl@0
   542
	asm("mov r3, r1, lsr #7 ");			// r3 bits 16-31 = TReal32 exponent
sl@0
   543
	asm("ands r3, r3, #0x00FF0000 ");
sl@0
   544
	asm("mov r2, r1, lsl #8 ");			// r2 = TReal32 mantissa << 8, bit 31 not yet in
sl@0
   545
	asm("orrne r2, r2, #0x80000000 ");	// if not zero/denormal, put in implied integer bit
sl@0
   546
	asm("orr r3, r3, r1, lsr #31 ");	// r3 bit 0 = sign bit
sl@0
   547
	asm("mov r1, #0 ");					// low word of mantissa = 0
sl@0
   548
	asm("beq ConvertTReal32ToTRealX0 ");	// branch if zero/denormal
sl@0
   549
	asm("cmp r3, #0x00FF0000 ");		// check for infinity or NaN
sl@0
   550
	asm("orrcs r3, r3, #0xFF000000 ");	// if infinity or NaN, exponent = FFFF
sl@0
   551
	asm("addcc r3, r3, #0x7F000000 ");	// else exponent = TReal32 exponent + 7F80
sl@0
   552
	asm("addcc r3, r3, #0x00800000 ");
sl@0
   553
	__JUMP(,lr);
sl@0
   554
	asm("ConvertTReal32ToTRealX0: ");	// come here if zero or denormal
sl@0
   555
	asm("adds r2, r2, r2 ");			// shift mantissa left one more and check if zero
sl@0
   556
	__JUMP(eq,lr);
sl@0
   557
	asm("add r3, r3, #0x7F000000 ");	// else exponent = 7F80 (highest denormal exponent)
sl@0
   558
	asm("add r3, r3, #0x00800000 ");
sl@0
   559
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   560
	CLZ(12,2);
sl@0
   561
	asm("mov r2, r2, lsl r12 ");
sl@0
   562
	asm("sub r3, r3, r12, lsl #16 ");
sl@0
   563
#else
sl@0
   564
	asm("cmp r2, #0x10000 ");			// normalise mantissa, decrementing exponent as needed
sl@0
   565
	asm("movcc r2, r2, lsl #16 ");
sl@0
   566
	asm("subcc r3, r3, #0x100000 ");
sl@0
   567
	asm("cmp r2, #0x1000000 ");
sl@0
   568
	asm("movcc r2, r2, lsl #8 ");
sl@0
   569
	asm("subcc r3, r3, #0x080000 ");
sl@0
   570
	asm("cmp r2, #0x10000000 ");
sl@0
   571
	asm("movcc r2, r2, lsl #4 ");
sl@0
   572
	asm("subcc r3, r3, #0x040000 ");
sl@0
   573
	asm("cmp r2, #0x40000000 ");
sl@0
   574
	asm("movcc r2, r2, lsl #2 ");
sl@0
   575
	asm("subcc r3, r3, #0x020000 ");
sl@0
   576
	asm("cmp r2, #0x80000000 ");
sl@0
   577
	asm("movcc r2, r2, lsl #1 ");
sl@0
   578
	asm("subcc r3, r3, #0x010000 ");
sl@0
   579
#endif
sl@0
   580
	__JUMP(,lr);
sl@0
   581
	}
sl@0
   582
sl@0
   583
sl@0
   584
sl@0
   585
sl@0
   586
#ifndef __EABI_CTORS__
sl@0
   587
__NAKED__ EXPORT_C TRealX::TRealX(TReal64 /*aReal*/) __SOFTFP
sl@0
   588
/**
sl@0
   589
Constructs an extended precision object from
sl@0
   590
a double precision floating point number.
sl@0
   591
sl@0
   592
@param aReal The double precision floating point value.
sl@0
   593
*/
sl@0
   594
	{
sl@0
   595
	// fall through
sl@0
   596
	}
sl@0
   597
#endif
sl@0
   598
sl@0
   599
sl@0
   600
sl@0
   601
sl@0
   602
__NAKED__ EXPORT_C TRealX& TRealX::operator=(TReal64 /*aReal*/) __SOFTFP
sl@0
   603
/**
sl@0
   604
Assigns the specified double precision floating point number to
sl@0
   605
this extended precision object.
sl@0
   606
sl@0
   607
@param aReal The double precision floating point value.
sl@0
   608
sl@0
   609
@return A reference to this extended precision object.
sl@0
   610
*/
sl@0
   611
	{
sl@0
   612
	asm("stmfd sp!, {lr} ");
sl@0
   613
	asm("bl ConvertTReal64ToTRealX ");
sl@0
   614
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   615
	__POPRET("");
sl@0
   616
	}
sl@0
   617
sl@0
   618
sl@0
   619
sl@0
   620
sl@0
   621
__NAKED__ EXPORT_C TInt TRealX::Set(TReal64 /*aReal*/) __SOFTFP
sl@0
   622
/**
sl@0
   623
Gives this extended precision object a new value taken from
sl@0
   624
a double precision floating point number.
sl@0
   625
sl@0
   626
@param aReal The double precision floating point value. 
sl@0
   627
sl@0
   628
@return KErrNone, if a valid number;
sl@0
   629
        KErrOverflow, if the number is infinite;
sl@0
   630
        KErrArgument, if not a number.
sl@0
   631
*/
sl@0
   632
	{
sl@0
   633
	// aReal is in r1,r2 on entry
sl@0
   634
	// sign in bit 31 of r1, exponent in 30-20 of r1
sl@0
   635
	// mantissa (non-integer bits) in 19-0 of r1 (high) and r2 (low)
sl@0
   636
	asm("stmfd sp!, {lr} ");
sl@0
   637
	asm("bl ConvertTReal64ToTRealX ");
sl@0
   638
	asm("stmia r0, {r1,r2,r3} ");
sl@0
   639
	asm("cmn r3, #0x10000 ");			// check for infinity or NaN
sl@0
   640
	asm("movcc r0, #0 ");				// if neither, return KErrNone
sl@0
   641
	asm("bcc trealx_set_treal64_0 ");
sl@0
   642
	asm("cmp r2, #0x80000000 ");		// check for infinity
sl@0
   643
	asm("cmpeq r1, #0 ");
sl@0
   644
	asm("mvneq r0, #8 ");				// if so, return KErrOverflow
sl@0
   645
	asm("mvnne r0, #5 ");				// else return KErrArgument
sl@0
   646
	asm("trealx_set_treal64_0: ");
sl@0
   647
	__POPRET("");
sl@0
   648
sl@0
   649
	// convert TReal64 in r1,r2 in GCC and r2 and r3 in RVCT
sl@0
   650
	// if __DOUBLE_WORDS_SWAPPED__ r1=sign,exp,high mant, r2=low mant
sl@0
   651
	// else r1 unused , r2=low mant, r3=sign,exp,high mant (as a result of EABI alignment reqs)
sl@0
   652
	// into TRealX in r1,r2,r3 (r2,r1=mant high,low r3=exp,flag,sign)
sl@0
   653
	// r0 unmodified, r1,r2,r3,r12 modified
sl@0
   654
	asm("ConvertTReal64ToTRealX: ");
sl@0
   655
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
   656
	asm("mov r12, r2 ");				// ls word of mantissa into r12
sl@0
   657
#else
sl@0
   658
	asm("mov r12, r2 ");				// ls word of mantissa into r12
sl@0
   659
	asm("mov r1, r3 ");
sl@0
   660
#endif
sl@0
   661
	asm("mov r3, r1, lsr #20 ");		// sign and exp into bottom 12 bits of r3
sl@0
   662
	asm("mov r2, r1, lsl #11 ");		// left justify mantissa in r2,r1
sl@0
   663
	asm("mov r3, r3, lsl #16 ");		// and into bits 16-27
sl@0
   664
	asm("bics r3, r3, #0x08000000 ");	// remove sign, leaving exponent in bits 16-26
sl@0
   665
	asm("orr r2, r2, r12, lsr #21 ");
sl@0
   666
	asm("orrne r2, r2, #0x80000000 ");	// if not zero/denormal, put in implied integer bit
sl@0
   667
	asm("orr r3, r3, r1, lsr #31 ");	// sign bit into bit 0 of r3
sl@0
   668
	asm("mov r1, r12, lsl #11 ");
sl@0
   669
	asm("beq ConvertTReal64ToTRealX0 ");	// branch if zero or denormal
sl@0
   670
	asm("mov r12, r3, lsl #5 ");		// exponent into bits 21-31 of r12
sl@0
   671
	asm("cmn r12, #0x00200000 ");		// check if exponent=7FF (infinity or NaN)
sl@0
   672
	asm("addcs r3, r3, #0xF8000000 ");	// if so, result exponent=FFFF
sl@0
   673
	asm("addcc r3, r3, #0x7C000000 ");	// else result exponent = TReal64 exponent + 7C00
sl@0
   674
	__JUMP(,lr);
sl@0
   675
	asm("ConvertTReal64ToTRealX0: ");	// come here if zero or denormal
sl@0
   676
	asm("adds r1, r1, r1 ");			// shift mantissa left one more bit
sl@0
   677
	asm("adcs r2, r2, r2 ");
sl@0
   678
	asm("cmpeq r1, #0 ");				// and test for zero
sl@0
   679
	__JUMP(eq,lr);
sl@0
   680
	asm("add r3, r3, #0x7C000000 ");	// else exponent=7C00 (highest denormal exponent)
sl@0
   681
	asm("cmp r2, #0 ");					// normalise - first check if r2=0
sl@0
   682
	asm("moveq r2, r1 ");				// if so, shift up by 32
sl@0
   683
	asm("moveq r1, #0 ");
sl@0
   684
	asm("subeq r3, r3, #0x200000 ");	// and subtract 32 from exponent
sl@0
   685
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   686
	CLZ(12,2);
sl@0
   687
	asm("mov r2, r2, lsl r12 ");
sl@0
   688
	asm("rsb r12, r12, #32 ");
sl@0
   689
	asm("orr r2, r2, r1, lsr r12 ");
sl@0
   690
	asm("rsb r12, r12, #32 ");
sl@0
   691
#else
sl@0
   692
	asm("mov r12, #32 ");				// 32-number of left-shifts needed to normalise
sl@0
   693
	asm("cmp r2, #0x10000 ");			// calculate number required
sl@0
   694
	asm("movcc r2, r2, lsl #16 ");
sl@0
   695
	asm("subcc r12, r12, #16 ");
sl@0
   696
	asm("cmp r2, #0x1000000 ");
sl@0
   697
	asm("movcc r2, r2, lsl #8 ");
sl@0
   698
	asm("subcc r12, r12, #8 ");
sl@0
   699
	asm("cmp r2, #0x10000000 ");
sl@0
   700
	asm("movcc r2, r2, lsl #4 ");
sl@0
   701
	asm("subcc r12, r12, #4 ");
sl@0
   702
	asm("cmp r2, #0x40000000 ");
sl@0
   703
	asm("movcc r2, r2, lsl #2 ");
sl@0
   704
	asm("subcc r12, r12, #2 ");
sl@0
   705
	asm("cmp r2, #0x80000000 ");
sl@0
   706
	asm("movcc r2, r2, lsl #1 ");
sl@0
   707
	asm("subcc r12, r12, #1 ");			// r2 is now normalised
sl@0
   708
	asm("orr r2, r2, r1, lsr r12 ");	// shift r1 left into r2
sl@0
   709
	asm("rsb r12, r12, #32 ");
sl@0
   710
#endif
sl@0
   711
	asm("mov r1, r1, lsl r12 ");
sl@0
   712
	asm("sub r3, r3, r12, lsl #16 ");	// exponent -= number of left shifts
sl@0
   713
	__JUMP(,lr);
sl@0
   714
	}
sl@0
   715
sl@0
   716
sl@0
   717
sl@0
   718
sl@0
   719
sl@0
   720
__NAKED__ EXPORT_C TRealX::operator TInt() const
sl@0
   721
/**
sl@0
   722
Gets the extended precision value as a signed integer value.
sl@0
   723
sl@0
   724
The operator returns:
sl@0
   725
sl@0
   726
1. zero , if the extended precision value is not a number
sl@0
   727
sl@0
   728
2. 0x7FFFFFFF, if the value is positive and too big to fit into a TInt.
sl@0
   729
sl@0
   730
3. 0x80000000, if the value is negative and too big to fit into a TInt.
sl@0
   731
*/
sl@0
   732
	{
sl@0
   733
	asm("ldmia r0, {r1,r2,r3} ");		// get value into r1,r2,r3
sl@0
   734
sl@0
   735
	asm("ConvertTRealXToInt: ");
sl@0
   736
	asm("mov r12, #0x8000 ");			// r12=0x801E
sl@0
   737
	asm("orr r12, r12, #0x001E ");
sl@0
   738
	asm("subs r12, r12, r3, lsr #16 ");	// r12=801E-exponent
sl@0
   739
	asm("bls ConvertTRealXToInt1 ");	// branch if exponent>=801E
sl@0
   740
	asm("cmp r12, #31 ");				// test if exponent<7FFF
sl@0
   741
	asm("movhi r0, #0 ");				// if so, underflow result to zero
sl@0
   742
	__JUMP(hi,lr);
sl@0
   743
	asm("mov r0, r2, lsr r12 ");		// shift mantissa right to form integer
sl@0
   744
	asm("tst r3, #1 ");					// check sign bit
sl@0
   745
	asm("rsbne r0, r0, #0 ");			// if negative, r0=-r0
sl@0
   746
	__JUMP(,lr);
sl@0
   747
	asm("ConvertTRealXToInt1: ");
sl@0
   748
	asm("cmn r3, #0x10000 ");			// check for infinity or NaN
sl@0
   749
	asm("bcc ConvertTRealXToInt2 ");	// branch if neither
sl@0
   750
	asm("cmp r2, #0x80000000 ");		// check for infinity
sl@0
   751
	asm("cmpeq r1, #0 ");
sl@0
   752
	asm("movne r0, #0 ");				// if NaN, return 0
sl@0
   753
	__JUMP(ne,lr);
sl@0
   754
	asm("ConvertTRealXToInt2: ");
sl@0
   755
	asm("mov r0, #0x80000000 ");		// return 0x80000000 if -ve overflow, 0x7FFFFFFF if +ve
sl@0
   756
	asm("movs r3, r3, lsr #1 ");
sl@0
   757
	asm("sbc r0, r0, #0 ");
sl@0
   758
	__JUMP(,lr);
sl@0
   759
	}
sl@0
   760
sl@0
   761
sl@0
   762
sl@0
   763
sl@0
   764
__NAKED__ EXPORT_C TRealX::operator TUint() const
sl@0
   765
/**
sl@0
   766
Returns the extended precision value as an unsigned signed integer value.
sl@0
   767
sl@0
   768
The operator returns:
sl@0
   769
sl@0
   770
1. zero, if the extended precision value is not a number
sl@0
   771
sl@0
   772
2. 0xFFFFFFFF, if the value is positive and too big to fit into a TUint.
sl@0
   773
sl@0
   774
3. zero, if the value is negative and too big to fit into a TUint.
sl@0
   775
*/
sl@0
   776
	{
sl@0
   777
	asm("ldmia r0, {r1,r2,r3} ");		// get value into r1,r2,r3
sl@0
   778
sl@0
   779
	asm("ConvertTRealXToUint: ");
sl@0
   780
	asm("mov r12, #0x8000 ");			// r12=0x801E
sl@0
   781
	asm("orr r12, r12, #0x001E ");
sl@0
   782
	asm("subs r12, r12, r3, lsr #16 ");	// r12=801E-exponent
sl@0
   783
	asm("bcc ConvertTRealXToUint1 ");	// branch if exponent>801E
sl@0
   784
	asm("cmp r12, #31 ");				// test if exponent<7FFF
sl@0
   785
	asm("movhi r0, #0 ");				// if so, underflow result to zero
sl@0
   786
	__JUMP(hi,lr);
sl@0
   787
	asm("tst r3, #1 ");					// check sign bit
sl@0
   788
	asm("moveq r0, r2, lsr r12 ");		// if +ve, shift mantissa right to form integer
sl@0
   789
	asm("movne r0, #0 ");				// if negative, r0=0
sl@0
   790
	__JUMP(,lr);
sl@0
   791
	asm("ConvertTRealXToUint1: ");
sl@0
   792
	asm("mov r0, #0 ");					// r0=0 initially
sl@0
   793
	asm("cmn r3, #0x10000 ");			// check for infinity or NaN
sl@0
   794
	asm("bcc ConvertTRealXToUint2 ");	// branch if neither
sl@0
   795
	asm("cmp r2, #0x80000000 ");		// check for infinity
sl@0
   796
	asm("cmpeq r1, #0 ");
sl@0
   797
	__JUMP(ne,lr);
sl@0
   798
	asm("ConvertTRealXToUint2: ");
sl@0
   799
	asm("movs r3, r3, lsr #1 ");		// sign bit into carry
sl@0
   800
	asm("sbc r0, r0, #0 ");				// r0=0 if -ve, 0xFFFFFFFF if +ve
sl@0
   801
	__JUMP(,lr);
sl@0
   802
	}
sl@0
   803
sl@0
   804
sl@0
   805
sl@0
   806
sl@0
   807
__NAKED__ EXPORT_C TRealX::operator TInt64() const
sl@0
   808
/**
sl@0
   809
Returns the extended precision value as a 64 bit integer value.
sl@0
   810
sl@0
   811
The operator returns:
sl@0
   812
sl@0
   813
1. zero, if the extended precision value is not a number
sl@0
   814
sl@0
   815
2. 0x7FFFFFFF FFFFFFFF, if the value is positive and too big to fit
sl@0
   816
   into a TInt64
sl@0
   817
sl@0
   818
3. 0x80000000 00000000, if the value is negative and too big to fit
sl@0
   819
   into a TInt.
sl@0
   820
*/
sl@0
   821
	{
sl@0
   822
	// r0 = this, result in r1:r0
sl@0
   823
	asm("ldmia r0, {r0,r1,r2} ");		// get value into r0,r1,r2
sl@0
   824
	asm("ConvertTRealXToInt64: ");
sl@0
   825
	asm("mov r3, #0x8000 ");			// r3=0x803E
sl@0
   826
	asm("orr r3, r3, #0x003E ");
sl@0
   827
	asm("subs r3, r3, r2, lsr #16 ");	// r3=803E-exponent
sl@0
   828
	asm("bls ConvertTRealXToInt64a ");	// branch if exponent>=803E
sl@0
   829
	asm("cmp r3, #63 ");				// test if exponent<7FFF
sl@0
   830
	asm("movhi r1, #0 ");				// if so, underflow result to zero
sl@0
   831
	asm("movhi r0, #0 ");
sl@0
   832
	__JUMP(hi,lr);
sl@0
   833
	asm("cmp r3, #32 ");				// >=32 shifts required?
sl@0
   834
	asm("subcs r3, r3, #32 ");			// if so, r3-=32
sl@0
   835
	asm("movcs r0, r1, lsr r3 ");		// r1:r0 >>= (r3+32)
sl@0
   836
	asm("movcs r1, #0 ");
sl@0
   837
	asm("movcc r0, r0, lsr r3 ");		// else r1:r0>>=r3
sl@0
   838
	asm("rsbcc r3, r3, #32 ");
sl@0
   839
	asm("orrcc r0, r0, r1, lsl r3 ");
sl@0
   840
	asm("rsbcc r3, r3, #32 ");
sl@0
   841
	asm("movcc r1, r1, lsr r3 ");		// r1:r0 = absolute integer
sl@0
   842
	asm("tst r2, #1 ");					// check sign bit
sl@0
   843
	__JUMP(eq,lr);
sl@0
   844
	asm("rsbs r0, r0, #0 ");			// else negate answer
sl@0
   845
	asm("rsc r1, r1, #0 ");
sl@0
   846
	__JUMP(,lr);
sl@0
   847
	asm("ConvertTRealXToInt64a: ");
sl@0
   848
	asm("cmn r2, #0x10000 ");			// check for infinity or NaN
sl@0
   849
	asm("bcc ConvertTRealXToInt64b ");	// branch if neither
sl@0
   850
	asm("cmp r1, #0x80000000 ");		// check for infinity
sl@0
   851
	asm("cmpeq r0, #0 ");
sl@0
   852
	asm("movne r1, #0 ");				// if NaN, return 0
sl@0
   853
	asm("movne r0, #0 ");
sl@0
   854
	__JUMP(ne,lr);
sl@0
   855
	asm("ConvertTRealXToInt64b: ");
sl@0
   856
	asm("mov r1, #0x80000000 ");		// return KMaxTInt64/KMinTInt64 depending on sign
sl@0
   857
	asm("mov r0, #0 ");
sl@0
   858
	asm("movs r2, r2, lsr #1 ");
sl@0
   859
	asm("sbcs r0, r0, #0 ");
sl@0
   860
	asm("sbc r1, r1, #0 ");
sl@0
   861
	__JUMP(,lr);
sl@0
   862
	}
sl@0
   863
sl@0
   864
sl@0
   865
sl@0
   866
sl@0
   867
__NAKED__ EXPORT_C TRealX::operator TReal32() const __SOFTFP
sl@0
   868
/**
sl@0
   869
Returns the extended precision value as
sl@0
   870
a single precision floating point value.
sl@0
   871
*/
sl@0
   872
	{
sl@0
   873
	asm("ldmia r0, {r1,r2,r3} ");			// r1,r2,r3=input value
sl@0
   874
sl@0
   875
	// Convert TRealX in r1,r2,r3 to TReal32 in r0
sl@0
   876
	asm("ConvertTRealXToTReal32: ");
sl@0
   877
	asm("mov r12, #0x8000 ");
sl@0
   878
	asm("orr r12, r12, #0x007F ");			// r12=0x807F
sl@0
   879
	asm("cmp r3, r12, lsl #16 ");			// check if exponent>=807F
sl@0
   880
	asm("bcs ConvertTRealXToTReal32a ");	// branch if it is
sl@0
   881
	asm("sub r12, r12, #0x00FF ");			// r12=0x7F80
sl@0
   882
	asm("rsbs r12, r12, r3, lsr #16 ");		// r12=exp in - 7F80 = result exponent if in range
sl@0
   883
	asm("bgt ConvertTRealXToTReal32b ");	// branch if normalised result
sl@0
   884
	asm("cmn r12, #23 ");					// check for total underflow or zero
sl@0
   885
	asm("movlt r0, r3, lsl #31 ");			// in this case, return zero with appropriate sign
sl@0
   886
	__JUMP(lt,lr);
sl@0
   887
	asm("add r12, r12, #31 ");				// r12=32-mantissa shift required = 32-(1-r12)
sl@0
   888
	asm("movs r0, r1, lsl r12 ");			// r0=lost bits when r2:r1 is shifted
sl@0
   889
	asm("bicne r3, r3, #0x300 ");			// if these are not zero, set rounded down flag
sl@0
   890
	asm("orrne r3, r3, #0x100 ");
sl@0
   891
	asm("rsb r0, r12, #32 ");
sl@0
   892
	asm("mov r1, r1, lsr r0 ");
sl@0
   893
	asm("orr r1, r1, r2, lsl r12 ");
sl@0
   894
	asm("mov r2, r2, lsr r0 ");				// r2 top 24 bits now give unrounded result mantissa
sl@0
   895
	asm("mov r12, #0 ");					// result exponent will be zero
sl@0
   896
	asm("ConvertTRealXToTReal32b: ");
sl@0
   897
	asm("movs r0, r2, lsl #24 ");			// top 8 truncated bits into top byte of r0
sl@0
   898
	asm("bpl ConvertTRealXToTReal32c ");	// if top bit clear, truncate
sl@0
   899
	asm("cmp r0, #0x80000000 ");
sl@0
   900
	asm("cmpeq r1, #0 ");					// compare rounding bits to 1000...
sl@0
   901
	asm("bhi ConvertTRealXToTReal32d ");	// if >, round up
sl@0
   902
	asm("movs r0, r3, lsl #23 ");			// round up flag into C, round down flag into N
sl@0
   903
	asm("bcs ConvertTRealXToTReal32c ");	// if rounded up, truncate
sl@0
   904
	asm("bmi ConvertTRealXToTReal32d ");	// if rounded down, round up
sl@0
   905
	asm("tst r2, #0x100 ");					// else round to even - test LSB of result mantissa
sl@0
   906
	asm("beq ConvertTRealXToTReal32c ");	// if zero, truncate, else round up
sl@0
   907
	asm("ConvertTRealXToTReal32d: ");		// come here to round up
sl@0
   908
	asm("adds r2, r2, #0x100 ");			// increment the mantissa
sl@0
   909
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa=800000
sl@0
   910
	asm("addcs r12, r12, #1 ");				// and increment exponent
sl@0
   911
	asm("cmpmi r12, #1 ");					// if mantissa normalised, check exponent>0
sl@0
   912
	asm("movmi r12, #1 ");					// if normalised and exponent=0, set exponent to 1
sl@0
   913
	asm("ConvertTRealXToTReal32c: ");		// come here to truncate
sl@0
   914
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
   915
	asm("orr r0, r0, r12, lsl #23 ");		// exponent into r0 bits 23-30
sl@0
   916
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
   917
	asm("orr r0, r0, r2, lsr #8 ");			// non-integer mantissa bits into r0 bits 0-22
sl@0
   918
	__JUMP(,lr);
sl@0
   919
	asm("ConvertTRealXToTReal32a: ");		// come here if overflow, infinity or NaN
sl@0
   920
	asm("cmn r3, #0x10000 ");				// check for infinity or NaN
sl@0
   921
	asm("movcc r2, #0 ");					// if not, set mantissa to 0 for infinity result
sl@0
   922
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
   923
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
   924
	asm("orr r0, r0, #0x7F000000 ");		// r0 bits 23-30 = FF = exponent
sl@0
   925
	asm("orr r0, r0, #0x00800000 ");
sl@0
   926
	asm("orr r0, r0, r2, lsr #8 ");			// r0 bits 0-22 = result mantissa
sl@0
   927
	__JUMP(,lr);
sl@0
   928
	}
sl@0
   929
sl@0
   930
sl@0
   931
sl@0
   932
sl@0
   933
__NAKED__ EXPORT_C TRealX::operator TReal64() const __SOFTFP
sl@0
   934
/**
sl@0
   935
Returns the extended precision value as
sl@0
   936
a double precision floating point value.
sl@0
   937
*/
sl@0
   938
	{
sl@0
   939
	asm("ldmia r0, {r1,r2,r3} ");			// r1,r2,r3=input value
sl@0
   940
sl@0
   941
	// Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
sl@0
   942
	// if __DOUBLE_WORDS_SWAPPED__ r0=sign,exp,high mant, r1=low mant
sl@0
   943
	// else r0, r1 reversed
sl@0
   944
	asm("ConvertTRealXToTReal64: ");
sl@0
   945
	asm("mov r12, #0x8300 ");
sl@0
   946
	asm("orr r12, r12, #0x00FF ");			// r12=0x83FF
sl@0
   947
	asm("cmp r3, r12, lsl #16 ");			// check if exponent>=83FF
sl@0
   948
	asm("bcs ConvertTRealXToTReal64a ");	// branch if it is
sl@0
   949
	asm("mov r12, #0x7C00 ");
sl@0
   950
	asm("rsbs r12, r12, r3, lsr #16 ");		// r12=exp in - 7C00 = result exponent if in range
sl@0
   951
	asm("bgt ConvertTRealXToTReal64b ");	// branch if normalised result
sl@0
   952
	asm("cmn r12, #52 ");					// check for total underflow or zero
sl@0
   953
	asm("movlt r0, r3, lsl #31 ");			// in this case, return zero with appropriate sign
sl@0
   954
	asm("movlt r1, #0 ");
sl@0
   955
	asm("blt ConvertTRealXToTReal64_end ");
sl@0
   956
sl@0
   957
	asm("adds r12, r12, #31 ");				// check if >=32 shifts needed, r12=32-shift count
sl@0
   958
	asm("ble ConvertTRealXToTReal64e ");	// branch if >=32 shifts needed
sl@0
   959
	asm("movs r0, r1, lsl r12 ");			// r0=lost bits when r2:r1 is shifted
sl@0
   960
	asm("bicne r3, r3, #0x300 ");			// if these are not zero, set rounded down flag
sl@0
   961
	asm("orrne r3, r3, #0x100 ");
sl@0
   962
	asm("rsb r0, r12, #32 ");				// r0=shift count
sl@0
   963
	asm("mov r1, r1, lsr r0 ");
sl@0
   964
	asm("orr r1, r1, r2, lsl r12 ");
sl@0
   965
	asm("mov r2, r2, lsr r0 ");				// r2:r1 top 53 bits = unrounded result mantissa
sl@0
   966
	asm("b ConvertTRealXToTReal64f ");
sl@0
   967
	asm("ConvertTRealXToTReal64e: ");
sl@0
   968
	asm("add r12, r12, #32 ");				// r12=64-shift count
sl@0
   969
	asm("cmp r1, #0 ");						// r1 bits are all lost - test them
sl@0
   970
	asm("moveqs r0, r2, lsl r12 ");			// if zero, test lost bits from r2
sl@0
   971
	asm("bicne r3, r3, #0x300 ");			// if lost bits not all zero, set rounded down flag
sl@0
   972
	asm("orrne r3, r3, #0x100 ");
sl@0
   973
	asm("rsb r0, r12, #32 ");				// r0=shift count-32
sl@0
   974
	asm("mov r1, r2, lsr r0 ");				// shift r2:r1 right
sl@0
   975
	asm("mov r2, #0 ");
sl@0
   976
	asm("ConvertTRealXToTReal64f: ");
sl@0
   977
	asm("mov r12, #0 ");					// result exponent will be zero for denormals
sl@0
   978
	asm("ConvertTRealXToTReal64b: ");
sl@0
   979
	asm("movs r0, r1, lsl #21 ");			// 11 rounding bits to top of r0
sl@0
   980
	asm("bpl ConvertTRealXToTReal64c ");	// if top bit clear, truncate
sl@0
   981
	asm("cmp r0, #0x80000000 ");			// compare rounding bits to 10000000000
sl@0
   982
	asm("bhi ConvertTRealXToTReal64d ");	// if >, round up
sl@0
   983
	asm("movs r0, r3, lsl #23 ");			// round up flag into C, round down flag into N
sl@0
   984
	asm("bcs ConvertTRealXToTReal64c ");	// if rounded up, truncate
sl@0
   985
	asm("bmi ConvertTRealXToTReal64d ");	// if rounded down, round up
sl@0
   986
	asm("tst r1, #0x800 ");					// else round to even - test LSB of result mantissa
sl@0
   987
	asm("beq ConvertTRealXToTReal64c ");	// if zero, truncate, else round up
sl@0
   988
	asm("ConvertTRealXToTReal64d: ");		// come here to round up
sl@0
   989
	asm("adds r1, r1, #0x800 ");			// increment the mantissa
sl@0
   990
	asm("adcs r2, r2, #0 ");
sl@0
   991
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa=10000...0
sl@0
   992
	asm("addcs r12, r12, #1 ");				// and increment exponent
sl@0
   993
	asm("cmpmi r12, #1 ");					// if mantissa normalised, check exponent>0
sl@0
   994
	asm("movmi r12, #1 ");					// if normalised and exponent=0, set exponent to 1
sl@0
   995
	asm("ConvertTRealXToTReal64c: ");		// come here to truncate
sl@0
   996
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
   997
	asm("orr r0, r0, r12, lsl #20 ");		// exponent into r0 bits 20-30
sl@0
   998
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
   999
	asm("orr r0, r0, r2, lsr #11 ");		// non-integer mantissa bits into r0 bits 0-19
sl@0
  1000
	asm("mov r1, r1, lsr #11 ");			// and r1
sl@0
  1001
	asm("orr r1, r1, r2, lsl #21 ");
sl@0
  1002
	asm("b ConvertTRealXToTReal64_end ");
sl@0
  1003
sl@0
  1004
	asm("ConvertTRealXToTReal64a: ");		// come here if overflow, infinity or NaN
sl@0
  1005
	asm("cmn r3, #0x10000 ");				// check for infinity or NaN
sl@0
  1006
	asm("movcc r2, #0 ");					// if not, set mantissa to 0 for infinity result
sl@0
  1007
	asm("movcc r1, #0 ");
sl@0
  1008
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  1009
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
  1010
	asm("orr r0, r0, #0x7F000000 ");		// r0 bits 20-30 = 7FF = exponent
sl@0
  1011
	asm("orr r0, r0, #0x00F00000 ");
sl@0
  1012
	asm("orr r0, r0, r2, lsr #11 ");		// r0 bits 0-19 = result mantissa high bits
sl@0
  1013
	asm("mov r1, r1, lsr #11 ");			// and r1=result mantissa low bits
sl@0
  1014
	asm("orr r1, r1, r2, lsl #21 ");
sl@0
  1015
	asm("ConvertTRealXToTReal64_end: ");
sl@0
  1016
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  1017
	asm("mov r2, r0 ");
sl@0
  1018
	asm("mov r0, r1 ");
sl@0
  1019
	asm("mov r1, r2 ");
sl@0
  1020
#endif
sl@0
  1021
	__JUMP(,lr);
sl@0
  1022
	}
sl@0
  1023
sl@0
  1024
sl@0
  1025
sl@0
  1026
sl@0
  1027
__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal32& /*aVal*/) const
sl@0
  1028
/**
sl@0
  1029
Extracts the extended precision value as
sl@0
  1030
a single precision floating point value.
sl@0
  1031
sl@0
  1032
@param aVal A reference to a single precision object which contains
sl@0
  1033
            the result of the operation.
sl@0
  1034
sl@0
  1035
@return KErrNone, if the operation is successful;
sl@0
  1036
        KErrOverflow, if the operation results in overflow;
sl@0
  1037
        KErrUnderflow, if the operation results in underflow.
sl@0
  1038
*/
sl@0
  1039
	{
sl@0
  1040
	asm("stmfd sp!, {r4,lr} ");
sl@0
  1041
	asm("mov r4, r1 ");
sl@0
  1042
	asm("ldmia r0, {r1,r2,r3} ");			// r1,r2,r3=input value
sl@0
  1043
	asm("bl TRealXGetTReal32 ");
sl@0
  1044
	asm("str r0, [r4] ");					// store converted TReal32
sl@0
  1045
	asm("mov r0, r12 ");					// return value into r0
sl@0
  1046
	__POPRET("r4,");
sl@0
  1047
sl@0
  1048
	// Convert TRealX in r1,r2,r3 to TReal32 in r0
sl@0
  1049
	// Return error code in r12
sl@0
  1050
	// r0-r3, r12 modified
sl@0
  1051
	asm("TRealXGetTReal32: ");
sl@0
  1052
	asm("mov r12, #0x8000 ");
sl@0
  1053
	asm("orr r12, r12, #0x007F ");			// r12=0x807F
sl@0
  1054
	asm("cmp r3, r12, lsl #16 ");			// check if exponent>=807F
sl@0
  1055
	asm("bcs TRealXGetTReal32a ");			// branch if it is
sl@0
  1056
	asm("sub r12, r12, #0x00FF ");			// r12=0x7F80
sl@0
  1057
	asm("rsbs r12, r12, r3, lsr #16 ");		// r12=exp in - 7F80 = result exponent if in range
sl@0
  1058
	asm("bgt TRealXGetTReal32b ");			// branch if normalised result
sl@0
  1059
	asm("cmn r12, #23 ");					// check for total underflow or zero
sl@0
  1060
	asm("bge TRealXGetTReal32e ");			// skip if not
sl@0
  1061
	asm("mov r0, r3, lsl #31 ");			// else return zero with appropriate sign
sl@0
  1062
	asm("mov r1, #0 ");
sl@0
  1063
	asm("cmp r3, #0x10000 ");				// check for zero
sl@0
  1064
	asm("movcc r12, #0 ");					// if zero return KErrNone
sl@0
  1065
	asm("mvncs r12, #9 ");					// else return KErrUnderflow
sl@0
  1066
	__JUMP(,lr);
sl@0
  1067
	asm("TRealXGetTReal32e: ");
sl@0
  1068
	asm("add r12, r12, #31 ");				// r12=32-mantissa shift required = 32-(1-r12)
sl@0
  1069
	asm("movs r0, r1, lsl r12 ");			// r0=lost bits when r2:r1 is shifted
sl@0
  1070
	asm("bicne r3, r3, #0x300 ");			// if these are not zero, set rounded down flag
sl@0
  1071
	asm("orrne r3, r3, #0x100 ");
sl@0
  1072
	asm("rsb r0, r12, #32 ");
sl@0
  1073
	asm("mov r1, r1, lsr r0 ");
sl@0
  1074
	asm("orr r1, r1, r2, lsl r12 ");
sl@0
  1075
	asm("mov r2, r2, lsr r0 ");				// r2 top 24 bits now give unrounded result mantissa
sl@0
  1076
	asm("mov r12, #0 ");					// result exponent will be zero
sl@0
  1077
	asm("TRealXGetTReal32b: ");
sl@0
  1078
	asm("movs r0, r2, lsl #24 ");			// top 8 truncated bits into top byte of r0
sl@0
  1079
	asm("bpl TRealXGetTReal32c ");			// if top bit clear, truncate
sl@0
  1080
	asm("cmp r0, #0x80000000 ");
sl@0
  1081
	asm("cmpeq r1, #0 ");					// compare rounding bits to 1000...
sl@0
  1082
	asm("bhi TRealXGetTReal32d ");			// if >, round up
sl@0
  1083
	asm("movs r0, r3, lsl #23 ");			// round up flag into C, round down flag into N
sl@0
  1084
	asm("bcs TRealXGetTReal32c ");			// if rounded up, truncate
sl@0
  1085
	asm("bmi TRealXGetTReal32d ");			// if rounded down, round up
sl@0
  1086
	asm("tst r2, #0x100 ");					// else round to even - test LSB of result mantissa
sl@0
  1087
	asm("beq TRealXGetTReal32c ");			// if zero, truncate, else round up
sl@0
  1088
	asm("TRealXGetTReal32d: ");				// come here to round up
sl@0
  1089
	asm("adds r2, r2, #0x100 ");			// increment the mantissa
sl@0
  1090
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa=800000
sl@0
  1091
	asm("addcs r12, r12, #1 ");				// and increment exponent
sl@0
  1092
	asm("cmpmi r12, #1 ");					// if mantissa normalised, check exponent>0
sl@0
  1093
	asm("movmi r12, #1 ");					// if normalised and exponent=0, set exponent to 1
sl@0
  1094
	asm("TRealXGetTReal32c: ");				// come here to truncate
sl@0
  1095
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
  1096
	asm("orr r0, r0, r12, lsl #23 ");		// exponent into r0 bits 23-30
sl@0
  1097
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  1098
	asm("orr r0, r0, r2, lsr #8 ");			// non-integer mantissa bits into r0 bits 0-22
sl@0
  1099
	asm("cmp r12, #0xFF ");					// check for overflow
sl@0
  1100
	asm("mvneq r12, #8 ");					// if overflow, return KErrOverflow
sl@0
  1101
	__JUMP(eq,lr);
sl@0
  1102
	asm("bics r1, r0, #0x80000000 ");		// check for underflow
sl@0
  1103
	asm("mvneq r12, #9 ");					// if underflow return KErrUnderflow
sl@0
  1104
	asm("movne r12, #0 ");					// else return KErrNone
sl@0
  1105
	__JUMP(,lr);
sl@0
  1106
	asm("TRealXGetTReal32a: ");				// come here if overflow, infinity or NaN
sl@0
  1107
	asm("cmn r3, #0x10000 ");				// check for infinity or NaN
sl@0
  1108
	asm("movcc r2, #0 ");					// if not, set mantissa to 0 for infinity result
sl@0
  1109
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  1110
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
  1111
	asm("orr r0, r0, #0x7F000000 ");		// r0 bits 23-30 = FF = exponent
sl@0
  1112
	asm("orr r0, r0, #0x00800000 ");
sl@0
  1113
	asm("orr r0, r0, r2, lsr #8 ");			// r0 bits 0-22 = result mantissa
sl@0
  1114
	asm("movs r12, r0, lsl #9 ");			// check if result is infinity or NaN
sl@0
  1115
	asm("mvneq r12, #8 ");					// if infinity return KErrOverflow
sl@0
  1116
	asm("mvnne r12, #5 ");					// else return KErrArgument
sl@0
  1117
	__JUMP(,lr);
sl@0
  1118
	}
sl@0
  1119
sl@0
  1120
sl@0
  1121
sl@0
  1122
sl@0
  1123
__NAKED__ EXPORT_C TInt TRealX::GetTReal(TReal64& /*aVal*/) const
sl@0
  1124
/**
sl@0
  1125
Extracts the extended precision value as
sl@0
  1126
a double precision floating point value.
sl@0
  1127
sl@0
  1128
@param aVal A reference to a double precision object which
sl@0
  1129
            contains the result of the operation.
sl@0
  1130
sl@0
  1131
@return KErrNone, if the operation is successful;
sl@0
  1132
        KErrOverflow, if the operation results in overflow;
sl@0
  1133
        KErrUnderflow, if the operation results in underflow.
sl@0
  1134
*/
sl@0
  1135
	{
sl@0
  1136
	asm("stmfd sp!, {r4,lr} ");
sl@0
  1137
	asm("mov r4, r1 ");
sl@0
  1138
	asm("ldmia r0, {r1,r2,r3} ");			// r1,r2,r3=input value
sl@0
  1139
	asm("bl TRealXGetTReal64 ");
sl@0
  1140
	asm("stmia r4, {r0,r1} ");				// store converted TReal64
sl@0
  1141
	asm("mov r0, r12 ");					// return value into r0
sl@0
  1142
	__POPRET("r4,");
sl@0
  1143
sl@0
  1144
	// Convert TRealX in r1,r2,r3 to TReal64 in r0,r1
sl@0
  1145
	// Return error code in r12
sl@0
  1146
	// r0-r3, r12 modified
sl@0
  1147
	asm("TRealXGetTReal64: ");
sl@0
  1148
	asm("mov r12, #0x8300 ");
sl@0
  1149
	asm("orr r12, r12, #0x00FF ");			// r12=0x83FF
sl@0
  1150
	asm("cmp r3, r12, lsl #16 ");			// check if exponent>=83FF
sl@0
  1151
	asm("bcs TRealXGetTReal64a ");			// branch if it is
sl@0
  1152
	asm("mov r12, #0x7C00 ");
sl@0
  1153
	asm("rsbs r12, r12, r3, lsr #16 ");		// r12=exp in - 7C00 = result exponent if in range
sl@0
  1154
	asm("bgt TRealXGetTReal64b ");			// branch if normalised result
sl@0
  1155
	asm("cmn r12, #52 ");					// check for total underflow or zero
sl@0
  1156
	asm("bge TRealXGetTReal64g ");			// skip if not
sl@0
  1157
	asm("mov r0, r3, lsl #31 ");			// else return zero with appropriate sign
sl@0
  1158
	asm("mov r1, #0 ");
sl@0
  1159
	asm("cmp r3, #0x10000 ");				// check for zero
sl@0
  1160
	asm("movcc r12, #0 ");					// if zero return KErrNone
sl@0
  1161
	asm("mvncs r12, #9 ");					// else return KErrUnderflow
sl@0
  1162
	asm("b TRealXGetTReal64_end ");
sl@0
  1163
sl@0
  1164
	asm("TRealXGetTReal64g: ");
sl@0
  1165
	asm("adds r12, r12, #31 ");				// check if >=32 shifts needed, r12=32-shift count
sl@0
  1166
	asm("ble TRealXGetTReal64e ");			// branch if >=32 shifts needed
sl@0
  1167
	asm("movs r0, r1, lsl r12 ");			// r0=lost bits when r2:r1 is shifted
sl@0
  1168
	asm("bicne r3, r3, #0x300 ");			// if these are not zero, set rounded down flag
sl@0
  1169
	asm("orrne r3, r3, #0x100 ");
sl@0
  1170
	asm("rsb r0, r12, #32 ");				// r0=shift count
sl@0
  1171
	asm("mov r1, r1, lsr r0 ");
sl@0
  1172
	asm("orr r1, r1, r2, lsl r12 ");
sl@0
  1173
	asm("mov r2, r2, lsr r0 ");				// r2:r1 top 53 bits = unrounded result mantissa
sl@0
  1174
	asm("b TRealXGetTReal64f ");
sl@0
  1175
	asm("TRealXGetTReal64e: ");
sl@0
  1176
	asm("add r12, r12, #32 ");				// r12=64-shift count
sl@0
  1177
	asm("cmp r1, #0 ");						// r1 bits are all lost - test them
sl@0
  1178
	asm("moveqs r0, r2, lsl r12 ");			// if zero, test lost bits from r2
sl@0
  1179
	asm("bicne r3, r3, #0x300 ");			// if lost bits not all zero, set rounded down flag
sl@0
  1180
	asm("orrne r3, r3, #0x100 ");
sl@0
  1181
	asm("rsb r0, r12, #32 ");				// r0=shift count-32
sl@0
  1182
	asm("mov r1, r2, lsr r0 ");				// shift r2:r1 right
sl@0
  1183
	asm("mov r2, #0 ");
sl@0
  1184
	asm("TRealXGetTReal64f: ");
sl@0
  1185
	asm("mov r12, #0 ");					// result exponent will be zero for denormals
sl@0
  1186
	asm("TRealXGetTReal64b: ");
sl@0
  1187
	asm("movs r0, r1, lsl #21 ");			// 11 rounding bits to top of r0
sl@0
  1188
	asm("bpl TRealXGetTReal64c ");			// if top bit clear, truncate
sl@0
  1189
	asm("cmp r0, #0x80000000 ");			// compare rounding bits to 10000000000
sl@0
  1190
	asm("bhi TRealXGetTReal64d ");			// if >, round up
sl@0
  1191
	asm("movs r0, r3, lsl #23 ");			// round up flag into C, round down flag into N
sl@0
  1192
	asm("bcs TRealXGetTReal64c ");			// if rounded up, truncate
sl@0
  1193
	asm("bmi TRealXGetTReal64d ");			// if rounded down, round up
sl@0
  1194
	asm("tst r1, #0x800 ");					// else round to even - test LSB of result mantissa
sl@0
  1195
	asm("beq TRealXGetTReal64c ");			// if zero, truncate, else round up
sl@0
  1196
	asm("TRealXGetTReal64d: ");				// come here to round up
sl@0
  1197
	asm("adds r1, r1, #0x800 ");			// increment the mantissa
sl@0
  1198
	asm("adcs r2, r2, #0 ");
sl@0
  1199
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa=10000...0
sl@0
  1200
	asm("addcs r12, r12, #1 ");				// and increment exponent
sl@0
  1201
	asm("cmpmi r12, #1 ");					// if mantissa normalised, check exponent>0
sl@0
  1202
	asm("movmi r12, #1 ");					// if normalised and exponent=0, set exponent to 1
sl@0
  1203
	asm("TRealXGetTReal64c: ");				// come here to truncate
sl@0
  1204
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
  1205
	asm("orr r0, r0, r12, lsl #20 ");		// exponent into r0 bits 20-30
sl@0
  1206
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  1207
	asm("orr r0, r0, r2, lsr #11 ");		// non-integer mantissa bits into r0 bits 0-19
sl@0
  1208
	asm("mov r1, r1, lsr #11 ");			// and r1
sl@0
  1209
	asm("orr r1, r1, r2, lsl #21 ");
sl@0
  1210
	asm("add r12, r12, #1 ");
sl@0
  1211
	asm("cmp r12, #0x800 ");				// check for overflow
sl@0
  1212
	asm("mvneq r12, #8 ");					// if overflow, return KErrOverflow
sl@0
  1213
	asm("beq TRealXGetTReal64_end ");
sl@0
  1214
sl@0
  1215
	asm("bics r12, r0, #0x80000000 ");		// check for underflow
sl@0
  1216
	asm("cmpeq r1, #0 ");
sl@0
  1217
	asm("mvneq r12, #9 ");					// if underflow return KErrUnderflow
sl@0
  1218
	asm("movne r12, #0 ");					// else return KErrNone
sl@0
  1219
	asm("b TRealXGetTReal64_end ");
sl@0
  1220
sl@0
  1221
	asm("TRealXGetTReal64a: ");				// come here if overflow, infinity or NaN
sl@0
  1222
	asm("cmn r3, #0x10000 ");				// check for infinity or NaN
sl@0
  1223
	asm("movcc r2, #0 ");					// if not, set mantissa to 0 for infinity result
sl@0
  1224
	asm("movcc r1, #0 ");
sl@0
  1225
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  1226
	asm("mov r0, r3, lsl #31 ");			// r0 bit 31 = sign bit
sl@0
  1227
	asm("orr r0, r0, #0x7F000000 ");		// r0 bits 20-30 = 7FF = exponent
sl@0
  1228
	asm("orr r0, r0, #0x00F00000 ");
sl@0
  1229
	asm("orr r0, r0, r2, lsr #11 ");		// r0 bits 0-19 = result mantissa high bits
sl@0
  1230
	asm("mov r1, r1, lsr #11 ");			// and r1=result mantissa low bits
sl@0
  1231
	asm("orr r1, r1, r2, lsl #21 ");
sl@0
  1232
	asm("movs r12, r0, lsl #12 ");			// check if result is infinity or NaN
sl@0
  1233
	asm("cmpeq r1, #0 ");
sl@0
  1234
	asm("mvneq r12, #8 ");					// if infinity return KErrOverflow
sl@0
  1235
	asm("mvnne r12, #5 ");					// else return KErrArgument
sl@0
  1236
	asm("TRealXGetTReal64_end: ");
sl@0
  1237
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  1238
	asm("mov r2, r0 ");
sl@0
  1239
	asm("mov r0, r1 ");
sl@0
  1240
	asm("mov r1, r2 ");
sl@0
  1241
#endif
sl@0
  1242
	__JUMP(,lr);
sl@0
  1243
	}
sl@0
  1244
sl@0
  1245
sl@0
  1246
sl@0
  1247
sl@0
  1248
__NAKED__ EXPORT_C TRealX TRealX::operator+() const
sl@0
  1249
/**
sl@0
  1250
Returns this extended precision number unchanged.
sl@0
  1251
sl@0
  1252
Note that this may also be referred to as a unary plus operator. 
sl@0
  1253
sl@0
  1254
@return The extended precision number.
sl@0
  1255
*/
sl@0
  1256
	{
sl@0
  1257
	asm("ldmia r1, {r2,r3,r12} ");
sl@0
  1258
	asm("stmia r0, {r2,r3,r12} ");
sl@0
  1259
	__JUMP(,lr);
sl@0
  1260
	}
sl@0
  1261
sl@0
  1262
sl@0
  1263
sl@0
  1264
sl@0
  1265
__NAKED__ EXPORT_C TRealX TRealX::operator-() const
sl@0
  1266
/**
sl@0
  1267
Negates this extended precision number.
sl@0
  1268
sl@0
  1269
This may also be referred to as a unary minus operator.
sl@0
  1270
sl@0
  1271
@return The negative of the extended precision number.
sl@0
  1272
*/
sl@0
  1273
	{
sl@0
  1274
	asm("ldmia r1, {r2,r3,r12} ");
sl@0
  1275
	asm("eor r12, r12, #1 ");			// unary - changes sign bit
sl@0
  1276
	asm("stmia r0, {r2,r3,r12} ");
sl@0
  1277
	__JUMP(,lr);
sl@0
  1278
	}
sl@0
  1279
sl@0
  1280
sl@0
  1281
sl@0
  1282
sl@0
  1283
__NAKED__ EXPORT_C TRealX::TRealXOrder TRealX::Compare(const TRealX& /*aVal*/) const
sl@0
  1284
/**
sl@0
  1285
*/
sl@0
  1286
	{
sl@0
  1287
	asm("stmfd sp!, {r4,r5,r6,lr} ");
sl@0
  1288
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  1289
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  1290
	asm("bl TRealXCompare ");
sl@0
  1291
	__POPRET("r4-r6,");
sl@0
  1292
sl@0
  1293
	// Compare TRealX in r1,r2,r3 to TRealX in r4,r5,r6
sl@0
  1294
	// Return TRealXOrder result in r0
sl@0
  1295
	asm("TRealXCompare: ");
sl@0
  1296
	asm("cmn r3, #0x10000 ");				// check for NaNs/infinity
sl@0
  1297
	asm("bcs TRealXCompare1 ");
sl@0
  1298
	asm("TRealXCompare6: ");				// will come back here if infinity
sl@0
  1299
	asm("cmn r6, #0x10000 ");
sl@0
  1300
	asm("bcs TRealXCompare2 ");
sl@0
  1301
	asm("TRealXCompare7: ");				// will come back here if infinity
sl@0
  1302
	asm("cmp r3, #0x10000 ");				// check for zeros
sl@0
  1303
	asm("bcc TRealXCompare3 ");
sl@0
  1304
	asm("cmp r6, #0x10000 ");
sl@0
  1305
	asm("bcc TRealXCompare4 ");
sl@0
  1306
	asm("mov r12, r6, lsl #31 ");
sl@0
  1307
	asm("cmp r12, r3, lsl #31 ");			// compare signs
sl@0
  1308
	asm("movne r0, #4 ");
sl@0
  1309
	asm("bne TRealXCompare5 ");				// branch if signs different
sl@0
  1310
	asm("mov r12, r3, lsr #16 ");			// r12=first exponent
sl@0
  1311
	asm("cmp r12, r6, lsr #16 ");			// compare exponents
sl@0
  1312
	asm("cmpeq r2, r5 ");					// if equal compare high words of mantissa
sl@0
  1313
	asm("cmpeq r1, r4 ");					// if equal compare low words of mantissa
sl@0
  1314
	asm("moveq r0, #2 ");					// if equal return 2
sl@0
  1315
	__JUMP(eq,lr);
sl@0
  1316
	asm("movhi r0, #4 ");					// r0=4 if first exp bigger
sl@0
  1317
	asm("movcc r0, #1 ");					// else r0=1
sl@0
  1318
	asm("TRealXCompare5: ");
sl@0
  1319
	asm("tst r3, #1 ");						// if signs negative
sl@0
  1320
	asm("eorne r0, r0, #5 ");				// then switch 1 and 4
sl@0
  1321
	__JUMP(,lr);
sl@0
  1322
	asm("TRealXCompare3: ");				// first operand zero
sl@0
  1323
	asm("cmp r6, #0x10000 ");				// check if second also zero
sl@0
  1324
	asm("movcc r0, #2 ");					// if so, return 2
sl@0
  1325
	__JUMP(cc,lr);
sl@0
  1326
	asm("tst r6, #1 ");						// else check sign of operand 2
sl@0
  1327
	asm("moveq r0, #1 ");					// if +, return 1
sl@0
  1328
	asm("movne r0, #4 ");					// else return 4
sl@0
  1329
	__JUMP(,lr);
sl@0
  1330
	asm("TRealXCompare4: ");				// second operand zero, first nonzero
sl@0
  1331
	asm("tst r3, #1 ");						// check sign of operand 1
sl@0
  1332
	asm("moveq r0, #4 ");					// if +, return 4
sl@0
  1333
	asm("movne r0, #1 ");					// else return 1
sl@0
  1334
	__JUMP(,lr);
sl@0
  1335
	asm("TRealXCompare1: ");				// first operand NaN or infinity
sl@0
  1336
	asm("cmp r2, #0x80000000 ");			// check for infinity
sl@0
  1337
	asm("cmpeq r1, #0 ");
sl@0
  1338
	asm("beq TRealXCompare6 ");				// if infinity, can handle normally
sl@0
  1339
	asm("mov r0, #8 ");						// if NaN, return 8 (unordered)
sl@0
  1340
	__JUMP(,lr);
sl@0
  1341
	asm("TRealXCompare2: ");				// second operand NaN or infinity
sl@0
  1342
	asm("cmp r5, #0x80000000 ");			// check for infinity
sl@0
  1343
	asm("cmpeq r4, #0 ");
sl@0
  1344
	asm("beq TRealXCompare7 ");				// if infinity, can handle normally
sl@0
  1345
	asm("mov r0, #8 ");						// if NaN, return 8 (unordered)
sl@0
  1346
	__JUMP(,lr);
sl@0
  1347
	}
sl@0
  1348
sl@0
  1349
sl@0
  1350
sl@0
  1351
sl@0
  1352
__NAKED__ EXPORT_C TInt TRealX::SubEq(const TRealX& /*aVal*/)
sl@0
  1353
/**
sl@0
  1354
Subtracts an extended precision value from this extended precision number.
sl@0
  1355
sl@0
  1356
@param aVal The extended precision value to be subtracted.
sl@0
  1357
sl@0
  1358
@return KErrNone, if the operation is successful;
sl@0
  1359
        KErrOverflow, if the operation results in overflow;
sl@0
  1360
        KErrUnderflow, if the operation results in underflow.
sl@0
  1361
*/
sl@0
  1362
	{
sl@0
  1363
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  1364
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  1365
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  1366
	asm("bl TRealXSubtract ");
sl@0
  1367
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  1368
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  1369
	asm("mov r0, r12 ");
sl@0
  1370
	__JUMP(,lr);
sl@0
  1371
	}
sl@0
  1372
sl@0
  1373
sl@0
  1374
sl@0
  1375
sl@0
  1376
__NAKED__ EXPORT_C TInt TRealX::AddEq(const TRealX& /*aVal*/)
sl@0
  1377
/**
sl@0
  1378
Adds an extended precision value to this extended precision number.
sl@0
  1379
sl@0
  1380
@param aVal The extended precision value to be added.
sl@0
  1381
sl@0
  1382
@return KErrNone, if the operation is successful;
sl@0
  1383
        KErrOverflow,if the operation results in overflow;
sl@0
  1384
        KErrUnderflow, if the operation results in underflow. 
sl@0
  1385
*/
sl@0
  1386
	{
sl@0
  1387
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  1388
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  1389
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  1390
	asm("bl TRealXAdd ");
sl@0
  1391
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  1392
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  1393
	asm("mov r0, r12 ");
sl@0
  1394
	__JUMP(,lr);
sl@0
  1395
sl@0
  1396
	// TRealX subtraction r1,r2,r3 - r4,r5,r6 result in r1,r2,r3
sl@0
  1397
	// Error code returned in r12
sl@0
  1398
	// Registers r0-r8,r12 modified
sl@0
  1399
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  1400
	asm("TRealXSubtract: ");
sl@0
  1401
	asm("eor r6, r6, #1 ");					// negate second operand and add
sl@0
  1402
sl@0
  1403
	// TRealX addition r1,r2,r3 + r4,r5,r6 result in r1,r2,r3
sl@0
  1404
	// Error code returned in r12
sl@0
  1405
	// Registers r0-r8,r12 modified
sl@0
  1406
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  1407
	// Note:	+0 + +0 = +0, -0 + -0 = -0, +0 + -0 = -0 + +0 = +0,
sl@0
  1408
	//			+/-0 + X = X + +/-0 = X, X + -X = -X + X = +0
sl@0
  1409
	asm("TRealXAdd: ");
sl@0
  1410
	asm("mov r12, #0 ");					// initialise return value to KErrNone
sl@0
  1411
	asm("bic r3, r3, #0x300 ");				// clear rounding flags
sl@0
  1412
	asm("bic r6, r6, #0x300 ");				// clear rounding flags
sl@0
  1413
	asm("cmn r3, #0x10000 ");				// check if first operand is NaN or infinity
sl@0
  1414
	asm("bcs TRealXAdd1 ");					// branch if it is
sl@0
  1415
	asm("cmn r6, #0x10000 ");				// check if second operand is NaN or infinity
sl@0
  1416
	asm("bcs TRealXAdd2 ");					// branch if it is
sl@0
  1417
	asm("cmp r6, #0x10000 ");				// check if second operand zero
sl@0
  1418
	asm("bcc TRealXAdd3a ");				// branch if it is
sl@0
  1419
	asm("cmp r3, #0x10000 ");				// check if first operand zero
sl@0
  1420
	asm("bcc TRealXAdd3 ");					// branch if it is
sl@0
  1421
	asm("mov r7, #0 ");						// r7 will be rounding word
sl@0
  1422
	asm("mov r0, r3, lsr #16 ");			// r0 = first operand exponent
sl@0
  1423
	asm("subs r0, r0, r6, lsr #16 ");		// r0 = first exponent - second exponent
sl@0
  1424
	asm("beq TRealXAdd8 ");					// if equal, no mantissa shifting needed
sl@0
  1425
	asm("bhi TRealXAdd4 ");					// skip if first exponent bigger
sl@0
  1426
	asm("rsb r0, r0, #0 ");					// need to shift first mantissa right by r0 to align
sl@0
  1427
	asm("mov r8, r1 ");						// swap the numbers to the one to be shifted is 2nd
sl@0
  1428
	asm("mov r1, r4 ");
sl@0
  1429
	asm("mov r4, r8 ");
sl@0
  1430
	asm("mov r8, r2 ");
sl@0
  1431
	asm("mov r2, r5 ");
sl@0
  1432
	asm("mov r5, r8 ");
sl@0
  1433
	asm("mov r8, r3 ");
sl@0
  1434
	asm("mov r3, r6 ");
sl@0
  1435
	asm("mov r6, r8 ");
sl@0
  1436
	asm("TRealXAdd4: ");					// need to shift 2nd mantissa right by r0 to align
sl@0
  1437
	asm("cmp r0, #64 ");					// more than 64 shifts needed?
sl@0
  1438
	asm("bhi TRealXAdd6 ");					// if so, smaller number cannot affect larger
sl@0
  1439
	asm("cmp r0, #32 ");
sl@0
  1440
	asm("bhi TRealXAdd7 ");					// branch if shift count>32
sl@0
  1441
	asm("rsb r8, r0, #32 ");
sl@0
  1442
	asm("mov r7, r4, lsl r8 ");				// shift r5:r4 right into r7
sl@0
  1443
	asm("mov r4, r4, lsr r0 ");
sl@0
  1444
	asm("orr r4, r4, r5, lsl r8 ");
sl@0
  1445
	asm("mov r5, r5, lsr r0 ");
sl@0
  1446
	asm("b TRealXAdd8 ");
sl@0
  1447
	asm("TRealXAdd7: ");					// 64 >= shift count > 32
sl@0
  1448
	asm("sub r0, r0, #32 ");
sl@0
  1449
	asm("rsb r8, r0, #32 ");
sl@0
  1450
	asm("movs r7, r4, lsl r8 ");			// test bits lost in shift
sl@0
  1451
	asm("orrne r6, r6, #0x100 ");			// if not all zero, flag 2nd mantissa rounded down
sl@0
  1452
	asm("mov r7, r4, lsr r0 ");				// shift r5:r4 right into r7 by 32+r0
sl@0
  1453
	asm("orr r7, r7, r5, lsl r8 ");
sl@0
  1454
	asm("mov r4, r5, lsr r0 ");
sl@0
  1455
	asm("mov r5, #0 ");
sl@0
  1456
	asm("TRealXAdd8: ");					// mantissas are now aligned
sl@0
  1457
	asm("mov r8, r3, lsl #31 ");			// r8=sign of first operand
sl@0
  1458
	asm("cmp r8, r6, lsl #31 ");			// compare signs
sl@0
  1459
	asm("bne TRealXSub1 ");					// if different, need to do a subtraction
sl@0
  1460
	asm("adds r1, r1, r4 ");				// signs the same - add mantissas
sl@0
  1461
	asm("adcs r2, r2, r5 ");
sl@0
  1462
	asm("bcc TRealXAdd9 ");					// skip if no carry
sl@0
  1463
	asm(".word 0xE1B02062 ");				// movs r2, r2, rrx shift carry into mantissa
sl@0
  1464
	asm(".word 0xE1B01061 ");				// movs r1, r1, rrx
sl@0
  1465
	asm(".word 0xE1B07067 ");				// movs r7, r7, rrx
sl@0
  1466
	asm("orrcs r6, r6, #0x100 ");			// if 1 shifted out, flag 2nd mantissa rounded down
sl@0
  1467
	asm("add r3, r3, #0x10000 ");			// increment exponent
sl@0
  1468
	asm("TRealXAdd9: ");
sl@0
  1469
	asm("cmp r7, #0x80000000 ");			// check rounding word
sl@0
  1470
	asm("bcc TRealXAdd10 ");				// if <0x80000000 round down
sl@0
  1471
	asm("bhi TRealXAdd11 ");				// if >0x80000000 round up
sl@0
  1472
	asm("tst r6, #0x100 ");					// if =0x80000000 check if 2nd mantissa rounded down
sl@0
  1473
	asm("bne TRealXAdd11 ");				// if so, round up
sl@0
  1474
	asm("tst r6, #0x200 ");					// if =0x80000000 check if 2nd mantissa rounded up
sl@0
  1475
	asm("bne TRealXAdd10 ");				// if so, round down
sl@0
  1476
	asm("tst r1, #1 ");						// else round to even - check LSB
sl@0
  1477
	asm("beq TRealXAdd10 ");				// if zero, round down
sl@0
  1478
	asm("TRealXAdd11: ");					// come here to round up
sl@0
  1479
	asm("adds r1, r1, #1 ");				// increment mantissa
sl@0
  1480
	asm("adcs r2, r2, #0 ");
sl@0
  1481
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa = 80000000 00000000
sl@0
  1482
	asm("addcs r3, r3, #0x10000 ");			// and increment exponent
sl@0
  1483
	asm("cmn r3, #0x10000 ");				// check overflow
sl@0
  1484
	asm("orrcc r3, r3, #0x200 ");			// if no overflow, set rounded-up flag ...
sl@0
  1485
	__JUMP(cc,lr);
sl@0
  1486
	asm("b TRealXAdd12 ");					// if overflow, return infinity
sl@0
  1487
	asm("TRealXAdd10: ");					// come here to round down
sl@0
  1488
	asm("cmn r3, #0x10000 ");				// check overflow
sl@0
  1489
	asm("bcs TRealXAdd12 ");				// if overflow, return infinity
sl@0
  1490
	asm("cmp r7, #0 ");						// if no overflow check if rounding word is zero
sl@0
  1491
	asm("orrne r3, r3, #0x100 ");			// if not, set rounded-down flag ...
sl@0
  1492
	__JUMP(ne,lr);
sl@0
  1493
	asm("and r6, r6, #0x300 ");				// else transfer 2nd mantissa rounding flags
sl@0
  1494
	asm("orr r3, r3, r6 ");					// to result
sl@0
  1495
	__JUMP(,lr);
sl@0
  1496
sl@0
  1497
	asm("TRealXAdd12: ");					// come here if overflow - return infinity
sl@0
  1498
	asm("mov r2, #0x80000000 ");
sl@0
  1499
	asm("mov r1, #0 ");
sl@0
  1500
	asm("mvn r12, #8 ");					// and return KErrOverflow
sl@0
  1501
	__JUMP(,lr);
sl@0
  1502
sl@0
  1503
	asm("TRealXSub1: ");					// come here if operand signs differ
sl@0
  1504
	asm("tst r6, #0x300 ");					// check if 2nd mantissa rounded
sl@0
  1505
	asm("eorne r6, r6, #0x300 ");			// if so, change rounding
sl@0
  1506
	asm("rsbs r7, r7, #0 ");				// subtract mantissas r2:r1:0 -= r5:r4:r7
sl@0
  1507
	asm("sbcs r1, r1, r4 ");
sl@0
  1508
	asm("sbcs r2, r2, r5 ");
sl@0
  1509
	asm("bcs TRealXSub2 ");					// skip if no borrow
sl@0
  1510
	asm("tst r6, #0x300 ");					// check if 2nd mantissa rounded
sl@0
  1511
	asm("eorne r6, r6, #0x300 ");			// if so, change rounding
sl@0
  1512
	asm("rsbs r7, r7, #0 ");				// negate result
sl@0
  1513
	asm("rscs r1, r1, #0 ");
sl@0
  1514
	asm("rscs r2, r2, #0 ");
sl@0
  1515
	asm("eor r3, r3, #1 ");					// and change result sign
sl@0
  1516
	asm("TRealXSub2: ");
sl@0
  1517
	asm("bne TRealXSub3 ");					// skip if mantissa top word is not zero
sl@0
  1518
	asm("movs r2, r1 ");					// else shift up by 32
sl@0
  1519
	asm("mov r1, r7 ");
sl@0
  1520
	asm("mov r7, #0 ");
sl@0
  1521
	asm("bne TRealXSub3a ");				// skip if mantissa top word is not zero now
sl@0
  1522
	asm("movs r2, r1 ");					// else shift up by 32 again
sl@0
  1523
	asm("mov r1, #0 ");
sl@0
  1524
	asm("moveq r3, #0 ");					// if r2 still zero, result is zero - return +0
sl@0
  1525
	__JUMP(eq,lr);
sl@0
  1526
	asm("subs r3, r3, #0x00400000 ");		// else, decrement exponent by 64
sl@0
  1527
	asm("bcs TRealXSub3 ");					// if no borrow, proceed
sl@0
  1528
	asm("b TRealXSub4 ");					// if borrow, underflow
sl@0
  1529
	asm("TRealXSub3a: ");					// needed one 32-bit shift
sl@0
  1530
	asm("subs r3, r3, #0x00200000 ");		// so decrement exponent by 32
sl@0
  1531
	asm("bcc TRealXSub4 ");					// if borrow, underflow
sl@0
  1532
	asm("TRealXSub3: ");					// r2 is now non-zero; still may need up to 31 shifts
sl@0
  1533
#ifdef __CPU_ARM_HAS_CLZ
sl@0
  1534
	CLZ(0,2);
sl@0
  1535
	asm("mov r2, r2, lsl r0 ");
sl@0
  1536
#else
sl@0
  1537
	asm("mov r0, #0 ");						// r0 will be shift count
sl@0
  1538
	asm("cmp r2, #0x00010000 ");
sl@0
  1539
	asm("movcc r2, r2, lsl #16 ");
sl@0
  1540
	asm("addcc r0, r0, #16 ");
sl@0
  1541
	asm("cmp r2, #0x01000000 ");
sl@0
  1542
	asm("movcc r2, r2, lsl #8 ");
sl@0
  1543
	asm("addcc r0, r0, #8 ");
sl@0
  1544
	asm("cmp r2, #0x10000000 ");
sl@0
  1545
	asm("movcc r2, r2, lsl #4 ");
sl@0
  1546
	asm("addcc r0, r0, #4 ");
sl@0
  1547
	asm("cmp r2, #0x40000000 ");
sl@0
  1548
	asm("movcc r2, r2, lsl #2 ");
sl@0
  1549
	asm("addcc r0, r0, #2 ");
sl@0
  1550
	asm("cmp r2, #0x80000000 ");
sl@0
  1551
	asm("movcc r2, r2, lsl #1 ");
sl@0
  1552
	asm("addcc r0, r0, #1 ");
sl@0
  1553
#endif
sl@0
  1554
	asm("rsb r8, r0, #32 ");
sl@0
  1555
	asm("subs r3, r3, r0, lsl #16 ");		// subtract shift count from exponent
sl@0
  1556
	asm("bcc TRealXSub4 ");					// if borrow, underflow
sl@0
  1557
	asm("orr r2, r2, r1, lsr r8 ");			// else shift mantissa up
sl@0
  1558
	asm("mov r1, r1, lsl r0 ");
sl@0
  1559
	asm("orr r1, r1, r7, lsr r8 ");
sl@0
  1560
	asm("mov r7, r7, lsl r0 ");
sl@0
  1561
	asm("cmp r3, #0x10000 ");				// check for underflow
sl@0
  1562
	asm("bcs TRealXAdd9 ");					// if no underflow, branch to round result
sl@0
  1563
sl@0
  1564
	asm("TRealXSub4: ");					// come here if underflow
sl@0
  1565
	asm("and r3, r3, #1 ");					// set exponent to zero, leave sign
sl@0
  1566
	asm("mov r2, #0 ");
sl@0
  1567
	asm("mov r1, #0 ");
sl@0
  1568
	asm("mvn r12, #9 ");					// return KErrUnderflow
sl@0
  1569
	__JUMP(,lr);
sl@0
  1570
sl@0
  1571
	asm("TRealXAdd6: ");					// come here if exponents differ by more than 64
sl@0
  1572
	asm("mov r8, r3, lsl #31 ");			// r8=sign of first operand
sl@0
  1573
	asm("cmp r8, r6, lsl #31 ");			// compare signs
sl@0
  1574
	asm("orreq r3, r3, #0x100 ");			// if same, result has been rounded down
sl@0
  1575
	asm("orrne r3, r3, #0x200 ");			// else result has been rounded up
sl@0
  1576
	__JUMP(,lr);
sl@0
  1577
sl@0
  1578
	asm("TRealXAdd3a: ");					// come here if second operand zero
sl@0
  1579
	asm("cmp r3, #0x10000 ");				// check if first operand also zero
sl@0
  1580
	asm("andcc r3, r3, r6 ");				// if so, result is negative iff both zeros negative
sl@0
  1581
	asm("andcc r3, r3, #1 ");
sl@0
  1582
	__JUMP(,lr);
sl@0
  1583
sl@0
  1584
	asm("TRealXAdd3: ");					// come here if first operand zero, second nonzero
sl@0
  1585
	asm("mov r1, r4 ");						// return second operand unchanged
sl@0
  1586
	asm("mov r2, r5 ");
sl@0
  1587
	asm("mov r3, r6 ");
sl@0
  1588
	__JUMP(,lr);
sl@0
  1589
sl@0
  1590
	asm("TRealXAdd1: ");					// come here if first operand NaN or infinity
sl@0
  1591
	asm("cmp r2, #0x80000000 ");			// check for infinity
sl@0
  1592
	asm("cmpeq r1, #0 ");
sl@0
  1593
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1594
	asm("cmn r6, #0x10000 ");				// check 2nd operand for NaN/infinity
sl@0
  1595
	asm("mvncc r12, #8 ");					// if neither, return KErrOverflow
sl@0
  1596
	__JUMP(cc,lr);
sl@0
  1597
	asm("cmp r5, #0x80000000 ");			// check 2nd operand for infinity
sl@0
  1598
	asm("cmpeq r4, #0 ");
sl@0
  1599
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1600
	asm("mov r0, r3, lsl #31 ");			// both operands are infinity - check signs
sl@0
  1601
	asm("cmp r0, r6, lsl #31 ");
sl@0
  1602
	asm("mvneq r12, #8 ");					// if same, return KErrOverflow
sl@0
  1603
	__JUMP(eq,lr);
sl@0
  1604
sl@0
  1605
	// Return 'real indefinite'
sl@0
  1606
	asm("TRealXRealIndefinite: ");
sl@0
  1607
	asm("ldr r3, [pc, #__RealIndefiniteExponent-.-8] ");
sl@0
  1608
	asm("mov r2, #0xC0000000 ");
sl@0
  1609
	asm("mov r1, #0 ");
sl@0
  1610
	asm("mvn r12, #5 ");					// return KErrArgument
sl@0
  1611
	__JUMP(,lr);
sl@0
  1612
sl@0
  1613
	asm("TRealXAdd2: ");					// come here if 2nd operand NaN/infinity, first finite
sl@0
  1614
	asm("cmp r5, #0x80000000 ");			// check for infinity
sl@0
  1615
	asm("cmpeq r4, #0 ");
sl@0
  1616
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1617
	asm("mov r1, r4 ");						// else return 2nd operand (infinity)
sl@0
  1618
	asm("mov r2, r5 ");
sl@0
  1619
	asm("mov r3, r6 ");
sl@0
  1620
	asm("mvn r12, #8 ");					// return KErrOverflow
sl@0
  1621
	__JUMP(,lr);
sl@0
  1622
sl@0
  1623
	asm("TRealXBinOpNan: ");				// generic routine to process NaNs in binary
sl@0
  1624
											// operations
sl@0
  1625
	asm("cmn r3, #0x10000 ");				// check if first operand is NaN
sl@0
  1626
	asm("movcc r0, r1 ");					// if not, swap the operands
sl@0
  1627
	asm("movcc r1, r4 ");
sl@0
  1628
	asm("movcc r4, r0 ");
sl@0
  1629
	asm("movcc r0, r2 ");
sl@0
  1630
	asm("movcc r2, r5 ");
sl@0
  1631
	asm("movcc r5, r0 ");
sl@0
  1632
	asm("movcc r0, r3 ");
sl@0
  1633
	asm("movcc r3, r6 ");
sl@0
  1634
	asm("movcc r6, r0 ");
sl@0
  1635
	asm("cmn r6, #0x10000 ");				// both operands NaNs?
sl@0
  1636
	asm("bcc TRealXBinOpNan1 ");			// skip if not
sl@0
  1637
	asm("cmp r2, r5 ");						// if so, compare the significands
sl@0
  1638
	asm("cmpeq r1, r4 ");
sl@0
  1639
	asm("movcc r1, r4 ");					// r1,r2,r3 will get NaN with larger significand
sl@0
  1640
	asm("movcc r2, r5 ");
sl@0
  1641
	asm("movcc r3, r6 ");
sl@0
  1642
	asm("TRealXBinOpNan1: ");
sl@0
  1643
	asm("orr r2, r2, #0x40000000 ");		// convert an SNaN to a QNaN
sl@0
  1644
	asm("mvn r12, #5 ");					// return KErrArgument
sl@0
  1645
	__JUMP(,lr);
sl@0
  1646
	}
sl@0
  1647
sl@0
  1648
sl@0
  1649
sl@0
  1650
sl@0
  1651
__NAKED__ EXPORT_C TInt TRealX::MultEq(const TRealX& /*aVal*/)
sl@0
  1652
/**
sl@0
  1653
Multiplies this extended precision number by an extended precision value.
sl@0
  1654
sl@0
  1655
@param aVal The extended precision value to be used as the multiplier.
sl@0
  1656
sl@0
  1657
@return KErrNone, if the operation is successful;
sl@0
  1658
        KErrOverflow, if the operation results in overflow;
sl@0
  1659
        KErrUnderflow, if the operation results in underflow
sl@0
  1660
*/
sl@0
  1661
	{
sl@0
  1662
	// Version for ARM 3M or later
sl@0
  1663
	// Uses umull/umlal
sl@0
  1664
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  1665
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  1666
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  1667
	asm("bl TRealXMultiply ");
sl@0
  1668
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  1669
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  1670
	asm("mov r0, r12 ");
sl@0
  1671
	__JUMP(,lr);
sl@0
  1672
sl@0
  1673
	// TRealX multiplication r1,r2,r3 * r4,r5,r6 result in r1,r2,r3
sl@0
  1674
	// Error code returned in r12
sl@0
  1675
	// Registers r0-r7,r12 modified
sl@0
  1676
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  1677
	asm("TRealXMultiply: ");
sl@0
  1678
	asm("mov r12, #0 ");					// initialise return value to KErrNone
sl@0
  1679
	asm("bic r3, r3, #0x300 ");				// clear rounding flags
sl@0
  1680
	asm("tst r6, #1 ");
sl@0
  1681
	asm("eorne r3, r3, #1 ");				// Exclusive-OR signs
sl@0
  1682
	asm("cmn r3, #0x10000 ");				// check if first operand is NaN or infinity
sl@0
  1683
	asm("bcs TRealXMultiply1 ");			// branch if it is
sl@0
  1684
	asm("cmn r6, #0x10000 ");				// check if second operand is NaN or infinity
sl@0
  1685
	asm("bcs TRealXMultiply2 ");			// branch if it is
sl@0
  1686
	asm("cmp r3, #0x10000 ");				// check if first operand zero
sl@0
  1687
	__JUMP(cc,lr);							// if so, exit
sl@0
  1688
sl@0
  1689
	// Multiply mantissas in r2:r1 and r5:r4, result in r2:r1:r12:r7
sl@0
  1690
	asm("umull r7, r12, r1, r4 ");			// r7:r12=m1.low*m2.low
sl@0
  1691
	asm("movs r0, r6, lsr #16 ");			// r0=2nd operand exponent
sl@0
  1692
	asm("beq TRealXMultiply3 ");			// if zero, return zero
sl@0
  1693
	asm("mov r6, #0 ");						// clear r6 initially
sl@0
  1694
	asm("umlal r12, r6, r1, r5 ");			// r6:r12:r7=m1.low*m2, r1 no longer needed
sl@0
  1695
	asm("add r0, r0, r3, lsr #16 ");		// r0=sum of exponents
sl@0
  1696
	asm("tst r3, #1 ");
sl@0
  1697
	asm("mov r3, #0 ");						// clear r3 initially
sl@0
  1698
	asm("umlal r6, r3, r2, r5 ");			// r3:r6:r12:r7=m2.low*m1+m2.high*m1.high<<64
sl@0
  1699
											// r1,r5 no longer required
sl@0
  1700
	asm("orrne lr, lr, #1 ");				// save sign in bottom bit of lr
sl@0
  1701
	asm("sub r0, r0, #0x7F00 ");
sl@0
  1702
	asm("sub r0, r0, #0x00FE ");			// r0 now contains result exponent
sl@0
  1703
	asm("umull r1, r5, r2, r4 ");			// r5:r1=m2.high*m1.low
sl@0
  1704
	asm("adds r12, r12, r1 ");				// shift left by 32 and add to give final result
sl@0
  1705
	asm("adcs r1, r6, r5 ");
sl@0
  1706
	asm("adcs r2, r3, #0 ");				// final result now in r2:r1:r12:r7
sl@0
  1707
											// set flags on final value of r2 (ms word of result)
sl@0
  1708
sl@0
  1709
	// normalise the result mantissa
sl@0
  1710
	asm("bmi TRealXMultiply4 ");			// skip if already normalised
sl@0
  1711
	asm("adds r7, r7, r7 ");				// else shift left (will only ever need one shift)
sl@0
  1712
	asm("adcs r12, r12, r12 ");
sl@0
  1713
	asm("adcs r1, r1, r1 ");
sl@0
  1714
	asm("adcs r2, r2, r2 ");
sl@0
  1715
	asm("sub r0, r0, #1 ");					// and decrement exponent by one
sl@0
  1716
sl@0
  1717
	// round the result mantissa
sl@0
  1718
	asm("TRealXMultiply4: ");
sl@0
  1719
	asm("and r3, lr, #1 ");					// result sign bit back into r3
sl@0
  1720
	asm("orrs r4, r7, r12 ");				// check for exact result
sl@0
  1721
	asm("beq TRealXMultiply5 ");			// skip if exact
sl@0
  1722
	asm("cmp r12, #0x80000000 ");			// compare bottom 64 bits to 80000000 00000000
sl@0
  1723
	asm("cmpeq r7, #0 ");
sl@0
  1724
	asm("moveqs r4, r1, lsr #1 ");			// if exactly equal, set carry=lsb of result
sl@0
  1725
											// so we round up if lsb=1
sl@0
  1726
	asm("orrcc r3, r3, #0x100 ");			// if rounding down, set rounded-down flag
sl@0
  1727
	asm("orrcs r3, r3, #0x200 ");			// if rounding up, set rounded-up flag
sl@0
  1728
	asm("adcs r1, r1, #0 ");				// increment mantissa if necessary
sl@0
  1729
	asm("adcs r2, r2, #0 ");
sl@0
  1730
	asm("movcs r2, #0x80000000 ");			// if carry, set mantissa to 80000000 00000000
sl@0
  1731
	asm("addcs r0, r0, #1 ");				// and increment result exponent
sl@0
  1732
sl@0
  1733
	// check for overflow or underflow and assemble final result
sl@0
  1734
	asm("TRealXMultiply5: ");
sl@0
  1735
	asm("add r4, r0, #1 ");					// need to add 1 to get usable threshold
sl@0
  1736
	asm("cmp r4, #0x10000 ");				// check if exponent >= 0xFFFF
sl@0
  1737
	asm("bge TRealXMultiply6 ");			// if so, overflow
sl@0
  1738
	asm("cmp r0, #0 ");						// check for underflow
sl@0
  1739
	asm("orrgt r3, r3, r0, lsl #16 ");		// if no underflow, result exponent into r3, ...
sl@0
  1740
	asm("movgt r12, #0 ");					// ... return KErrNone ...
sl@0
  1741
	asm("bicgt pc, lr, #3 ");
sl@0
  1742
sl@0
  1743
	// underflow
sl@0
  1744
	asm("mvn r12, #9 ");					// return KErrUnderflow
sl@0
  1745
	asm("bic pc, lr, #3 ");
sl@0
  1746
sl@0
  1747
	// overflow
sl@0
  1748
	asm("TRealXMultiply6: ");
sl@0
  1749
	asm("bic r3, r3, #0x0000FF00 ");		// clear rounding flags
sl@0
  1750
	asm("orr r3, r3, #0xFF000000 ");		// make exponent FFFF for infinity
sl@0
  1751
	asm("orr r3, r3, #0x00FF0000 ");
sl@0
  1752
	asm("mov r2, #0x80000000 ");			// mantissa = 80000000 00000000
sl@0
  1753
	asm("mov r1, #0 ");
sl@0
  1754
	asm("mvn r12, #8 ");					// return KErrOverflow
sl@0
  1755
	asm("bic pc, lr, #3 ");
sl@0
  1756
sl@0
  1757
	// come here if second operand zero
sl@0
  1758
	asm("TRealXMultiply3: ");
sl@0
  1759
	asm("mov r1, #0 ");
sl@0
  1760
	asm("mov r2, #0 ");
sl@0
  1761
	asm("and r3, r3, #1 ");					// zero exponent, keep xor sign
sl@0
  1762
	asm("mov r12, #0 ");					// return KErrNone
sl@0
  1763
	asm("bic pc, lr, #3 ");
sl@0
  1764
sl@0
  1765
	// First operand NaN or infinity
sl@0
  1766
	asm("TRealXMultiply1: ");
sl@0
  1767
	asm("cmp r2, #0x80000000 ");			// check for infinity
sl@0
  1768
	asm("cmpeq r1, #0 ");
sl@0
  1769
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1770
	asm("cmn r6, #0x10000 ");				// check 2nd operand for NaN/infinity
sl@0
  1771
	asm("bcs TRealXMultiply1a ");			// branch if it is
sl@0
  1772
	asm("cmp r6, #0x10000 ");				// else check if second operand zero
sl@0
  1773
	asm("mvncs r12, #8 ");					// if not, return infinity and KErrOverflow
sl@0
  1774
	asm("biccs pc, lr, #3 ");
sl@0
  1775
	asm("b TRealXRealIndefinite ");			// else return 'real indefinite'
sl@0
  1776
sl@0
  1777
	asm("TRealXMultiply1a: ");
sl@0
  1778
	asm("cmp r5, #0x80000000 ");			// check 2nd operand for infinity
sl@0
  1779
	asm("cmpeq r4, #0 ");
sl@0
  1780
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1781
	asm("mvn r12, #8 ");					// else (infinity), return KErrOverflow
sl@0
  1782
	asm("bic pc, lr, #3 ");
sl@0
  1783
sl@0
  1784
	// Second operand NaN or infinity, first operand finite
sl@0
  1785
	asm("TRealXMultiply2: ");
sl@0
  1786
	asm("cmp r5, #0x80000000 ");			// check for infinity
sl@0
  1787
	asm("cmpeq r4, #0 ");
sl@0
  1788
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  1789
	asm("cmp r3, #0x10000 ");				// if infinity, check if first operand zero
sl@0
  1790
	asm("bcc TRealXRealIndefinite ");		// if it is, return 'real indefinite'
sl@0
  1791
	asm("orr r3, r3, #0xFF000000 ");		// else return infinity with xor sign
sl@0
  1792
	asm("orr r3, r3, #0x00FF0000 ");
sl@0
  1793
	asm("mov r2, #0x80000000 ");
sl@0
  1794
	asm("mov r1, #0 ");
sl@0
  1795
	asm("mvn r12, #8 ");					// return KErrOverflow
sl@0
  1796
	asm("bic pc, lr, #3 ");
sl@0
  1797
	}
sl@0
  1798
sl@0
  1799
sl@0
  1800
sl@0
  1801
sl@0
  1802
__NAKED__ EXPORT_C TInt TRealX::DivEq(const TRealX& /*aVal*/)
sl@0
  1803
/**
sl@0
  1804
Divides this extended precision number by an extended precision value.
sl@0
  1805
sl@0
  1806
@param aVal The extended precision value to be used as the divisor.
sl@0
  1807
sl@0
  1808
@return KErrNone, if the operation is successful;
sl@0
  1809
        KErrOverflow, if the operation results in overflow;
sl@0
  1810
        KErrUnderflow, if the operation results in underflow;
sl@0
  1811
        KErrDivideByZero, if the divisor is zero. 
sl@0
  1812
*/
sl@0
  1813
	{
sl@0
  1814
	asm("stmfd sp!, {r0,r4-r9,lr} ");
sl@0
  1815
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  1816
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  1817
	asm("bl TRealXDivide ");
sl@0
  1818
	asm("ldmfd sp!, {r0,r4-r9,lr} ");
sl@0
  1819
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  1820
	asm("mov r0, r12 ");
sl@0
  1821
	__JUMP(,lr);
sl@0
  1822
sl@0
  1823
	// TRealX division r1,r2,r3 / r4,r5,r6 result in r1,r2,r3
sl@0
  1824
	// Error code returned in r12
sl@0
  1825
	// Registers r0-r9,r12 modified
sl@0
  1826
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  1827
	asm("TRealXDivide: ");
sl@0
  1828
	asm("mov r12, #0 ");					// initialise return value to KErrNone
sl@0
  1829
	asm("bic r3, r3, #0x300 ");				// clear rounding flags
sl@0
  1830
	asm("tst r6, #1 ");
sl@0
  1831
	asm("eorne r3, r3, #1 ");				// Exclusive-OR signs
sl@0
  1832
	asm("cmn r3, #0x10000 ");				// check if dividend is NaN or infinity
sl@0
  1833
	asm("bcs TRealXDivide1 ");				// branch if it is
sl@0
  1834
	asm("cmn r6, #0x10000 ");				// check if divisor is NaN or infinity
sl@0
  1835
	asm("bcs TRealXDivide2 ");				// branch if it is
sl@0
  1836
	asm("cmp r6, #0x10000 ");				// check if divisor zero
sl@0
  1837
	asm("bcc TRealXDivide3 ");				// branch if it is
sl@0
  1838
	asm("cmp r3, #0x10000 ");				// check if dividend zero
sl@0
  1839
	__JUMP(cc,lr);							// if zero, exit
sl@0
  1840
	asm("tst r3, #1 ");
sl@0
  1841
	asm("orrne lr, lr, #1 ");				// save sign in bottom bit of lr
sl@0
  1842
sl@0
  1843
	// calculate result exponent
sl@0
  1844
	asm("mov r0, r3, lsr #16 ");			// r0=dividend exponent
sl@0
  1845
	asm("sub r0, r0, r6, lsr #16 ");		// r0=dividend exponent - divisor exponent
sl@0
  1846
	asm("add r0, r0, #0x7F00 ");
sl@0
  1847
	asm("add r0, r0, #0x00FF ");			// r0 now contains result exponent
sl@0
  1848
	asm("mov r6, r1 ");						// move dividend into r6,r7,r8
sl@0
  1849
	asm("mov r7, r2 ");
sl@0
  1850
	asm("mov r8, #0 ");						// use r8 to hold extra bit shifted up
sl@0
  1851
											// r2:r1 will hold result mantissa
sl@0
  1852
	asm("mov r2, #1 ");						// we will make sure first bit is 1
sl@0
  1853
	asm("cmp r7, r5 ");						// compare dividend mantissa to divisor mantissa
sl@0
  1854
	asm("cmpeq r6, r4 ");
sl@0
  1855
	asm("bcs TRealXDivide4 ");				// branch if dividend >= divisor
sl@0
  1856
	asm("adds r6, r6, r6 ");				// else shift dividend left one
sl@0
  1857
	asm("adcs r7, r7, r7 ");				// ignore carry here
sl@0
  1858
	asm("sub r0, r0, #1 ");					// decrement result exponent by one
sl@0
  1859
	asm("TRealXDivide4: ");
sl@0
  1860
	asm("subs r6, r6, r4 ");				// subtract divisor from dividend
sl@0
  1861
	asm("sbcs r7, r7, r5 ");
sl@0
  1862
sl@0
  1863
	// Main mantissa division code
sl@0
  1864
	// First calculate the top 32 bits of the result
sl@0
  1865
	// Top bit is 1, do 10 lots of 3 bits the one more bit
sl@0
  1866
	asm("mov r12, #10 ");
sl@0
  1867
	asm("TRealXDivide5: ");
sl@0
  1868
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1869
	asm("adcs r7, r7, r7 ");
sl@0
  1870
	asm("adcs r8, r8, r8 ");
sl@0
  1871
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1872
	asm("sbcs r3, r7, r5 ");
sl@0
  1873
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1874
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1875
	asm("movcs r7, r3 ");
sl@0
  1876
	asm("adcs r2, r2, r2 ");				// shift in new result bit
sl@0
  1877
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1878
	asm("adcs r7, r7, r7 ");
sl@0
  1879
	asm("adcs r8, r8, r8 ");
sl@0
  1880
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1881
	asm("sbcs r3, r7, r5 ");
sl@0
  1882
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1883
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1884
	asm("movcs r7, r3 ");
sl@0
  1885
	asm("adcs r2, r2, r2 ");				// shift in new result bit
sl@0
  1886
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1887
	asm("adcs r7, r7, r7 ");
sl@0
  1888
	asm("adcs r8, r8, r8 ");
sl@0
  1889
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1890
	asm("sbcs r3, r7, r5 ");
sl@0
  1891
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1892
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1893
	asm("movcs r7, r3 ");
sl@0
  1894
	asm("adcs r2, r2, r2 ");				// shift in new result bit
sl@0
  1895
	asm("subs r12, r12, #1 ");
sl@0
  1896
	asm("bne TRealXDivide5 ");				// iterate the loop
sl@0
  1897
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1898
	asm("adcs r7, r7, r7 ");
sl@0
  1899
	asm("adcs r8, r8, r8 ");
sl@0
  1900
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1901
	asm("sbcs r3, r7, r5 ");
sl@0
  1902
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1903
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1904
	asm("movcs r7, r3 ");
sl@0
  1905
	asm("adcs r2, r2, r2 ");				// shift in new result bit - now have 32 bits
sl@0
  1906
sl@0
  1907
	// Now calculate the bottom 32 bits of the result
sl@0
  1908
	// Do 8 lots of 4 bits
sl@0
  1909
	asm("mov r12, #8 ");
sl@0
  1910
	asm("TRealXDivide5a: ");
sl@0
  1911
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1912
	asm("adcs r7, r7, r7 ");
sl@0
  1913
	asm("adcs r8, r8, r8 ");
sl@0
  1914
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1915
	asm("sbcs r3, r7, r5 ");
sl@0
  1916
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1917
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1918
	asm("movcs r7, r3 ");
sl@0
  1919
	asm("adcs r1, r1, r1 ");				// shift in new result bit
sl@0
  1920
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1921
	asm("adcs r7, r7, r7 ");
sl@0
  1922
	asm("adcs r8, r8, r8 ");
sl@0
  1923
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1924
	asm("sbcs r3, r7, r5 ");
sl@0
  1925
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1926
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1927
	asm("movcs r7, r3 ");
sl@0
  1928
	asm("adcs r1, r1, r1 ");				// shift in new result bit
sl@0
  1929
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1930
	asm("adcs r7, r7, r7 ");
sl@0
  1931
	asm("adcs r8, r8, r8 ");
sl@0
  1932
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1933
	asm("sbcs r3, r7, r5 ");
sl@0
  1934
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1935
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1936
	asm("movcs r7, r3 ");
sl@0
  1937
	asm("adcs r1, r1, r1 ");				// shift in new result bit
sl@0
  1938
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1939
	asm("adcs r7, r7, r7 ");
sl@0
  1940
	asm("adcs r8, r8, r8 ");
sl@0
  1941
	asm("subs r9, r6, r4 ");				// subtract divisor from accumulator, result in r9,r3
sl@0
  1942
	asm("sbcs r3, r7, r5 ");
sl@0
  1943
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1944
	asm("movcs r6, r9 ");					// if no borrow, replace accumulator with result
sl@0
  1945
	asm("movcs r7, r3 ");
sl@0
  1946
	asm("adcs r1, r1, r1 ");				// shift in new result bit
sl@0
  1947
	asm("subs r12, r12, #1 ");
sl@0
  1948
	asm("bne TRealXDivide5a ");				// iterate the loop
sl@0
  1949
sl@0
  1950
	// r2:r1 now contains a 64-bit normalised mantissa
sl@0
  1951
	// need to do rounding now
sl@0
  1952
	asm("and r3, lr, #1 ");					// result sign back into r3
sl@0
  1953
	asm("orrs r9, r6, r7 ");				// check if accumulator zero
sl@0
  1954
	asm("beq TRealXDivide6 ");				// if it is, result is exact, else generate next bit
sl@0
  1955
	asm("adds r6, r6, r6 ");				// shift accumulator left by one
sl@0
  1956
	asm("adcs r7, r7, r7 ");
sl@0
  1957
	asm("adcs r8, r8, r8 ");
sl@0
  1958
	asm("subs r6, r6, r4 ");				// subtract divisor from accumulator
sl@0
  1959
	asm("sbcs r7, r7, r5 ");
sl@0
  1960
	asm("movccs r8, r8, lsr #1 ");			// if borrow, check for carry from shift
sl@0
  1961
	asm("orrcc r3, r3, #0x100 ");			// if borrow, round down and set round-down flag
sl@0
  1962
	asm("bcc TRealXDivide6 ");
sl@0
  1963
	asm("orrs r9, r6, r7 ");				// if no borrow, check if exactly half-way
sl@0
  1964
	asm("moveqs r9, r1, lsr #1 ");			// if exactly half-way, round to even
sl@0
  1965
	asm("orrcc r3, r3, #0x100 ");			// if C=0, round result down and set round-down flag
sl@0
  1966
	asm("bcc TRealXDivide6 ");
sl@0
  1967
	asm("orr r3, r3, #0x200 ");				// else set round-up flag
sl@0
  1968
	asm("adds r1, r1, #1 ");				// and round mantissa up
sl@0
  1969
	asm("adcs r2, r2, #0 ");
sl@0
  1970
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa = 80000000 00000000
sl@0
  1971
	asm("addcs r0, r0, #1 ");				// and increment exponent
sl@0
  1972
sl@0
  1973
	// check for overflow or underflow and assemble final result
sl@0
  1974
	asm("TRealXDivide6: ");
sl@0
  1975
	asm("add r4, r0, #1 ");					// need to add 1 to get usable threshold
sl@0
  1976
	asm("cmp r4, #0x10000 ");				// check if exponent >= 0xFFFF
sl@0
  1977
	asm("bge TRealXMultiply6 ");			// if so, overflow
sl@0
  1978
	asm("cmp r0, #0 ");						// check for underflow
sl@0
  1979
	asm("orrgt r3, r3, r0, lsl #16 ");		// if no underflow, result exponent into r3, ...
sl@0
  1980
	asm("movgt r12, #0 ");					// ... return KErrNone ...
sl@0
  1981
	asm("bicgt pc, lr, #3 ");
sl@0
  1982
sl@0
  1983
	// underflow
sl@0
  1984
	asm("and r3, r3, #1 ");					// set exponent=0, keep sign
sl@0
  1985
	asm("mvn r12, #9 ");					// return KErrUnderflow
sl@0
  1986
	asm("bic pc, lr, #3 ");
sl@0
  1987
sl@0
  1988
	// come here if divisor is zero, dividend finite
sl@0
  1989
	asm("TRealXDivide3: ");
sl@0
  1990
	asm("cmp r3, #0x10000 ");				// check if dividend also zero
sl@0
  1991
	asm("bcc TRealXRealIndefinite ");		// if so, return 'real indefinite'
sl@0
  1992
	asm("orr r3, r3, #0xFF000000 ");		// else return infinity with xor sign
sl@0
  1993
	asm("orr r3, r3, #0x00FF0000 ");
sl@0
  1994
	asm("mov r2, #0x80000000 ");
sl@0
  1995
	asm("mov r1, #0 ");
sl@0
  1996
	asm("mvn r12, #40 ");					// return KErrDivideByZero
sl@0
  1997
	asm("bic pc, lr, #3 ");
sl@0
  1998
sl@0
  1999
	// Dividend is NaN or infinity
sl@0
  2000
	asm("TRealXDivide1: ");
sl@0
  2001
	asm("cmp r2, #0x80000000 ");			// check for infinity
sl@0
  2002
	asm("cmpeq r1, #0 ");
sl@0
  2003
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2004
	asm("cmn r6, #0x10000 ");				// check 2nd operand for NaN/infinity
sl@0
  2005
	asm("mvncc r12, #8 ");					// if not, return KErrOverflow
sl@0
  2006
	asm("biccc pc, lr, #3 ");
sl@0
  2007
sl@0
  2008
	// Dividend=infinity, divisor=NaN or infinity
sl@0
  2009
	asm("cmp r5, #0x80000000 ");			// check 2nd operand for infinity
sl@0
  2010
	asm("cmpeq r4, #0 ");
sl@0
  2011
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2012
	asm("b TRealXRealIndefinite ");			// else return 'real indefinite'
sl@0
  2013
sl@0
  2014
	// Divisor is NaN or infinity, dividend finite
sl@0
  2015
	asm("TRealXDivide2: ");
sl@0
  2016
	asm("cmp r5, #0x80000000 ");			// check for infinity
sl@0
  2017
	asm("cmpeq r4, #0 ");
sl@0
  2018
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2019
	asm("and r3, r3, #1 ");					// else return zero with xor sign
sl@0
  2020
	asm("bic pc, lr, #3 ");
sl@0
  2021
	}
sl@0
  2022
sl@0
  2023
sl@0
  2024
sl@0
  2025
sl@0
  2026
__NAKED__ EXPORT_C TInt TRealX::ModEq(const TRealX& /*aVal*/)
sl@0
  2027
/**
sl@0
  2028
Modulo-divides this extended precision number by an extended precision value.
sl@0
  2029
sl@0
  2030
@param aVal The extended precision value to be used as the divisor. 
sl@0
  2031
sl@0
  2032
@return KErrNone, if the operation is successful;
sl@0
  2033
        KErrTotalLossOfPrecision, if precision is lost;
sl@0
  2034
        KErrUnderflow, if the operation results in underflow.
sl@0
  2035
*/
sl@0
  2036
	{
sl@0
  2037
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2038
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2039
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2040
	asm("bl TRealXModulo ");
sl@0
  2041
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2042
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2043
	asm("mov r0, r12 ");
sl@0
  2044
	__JUMP(,lr);
sl@0
  2045
sl@0
  2046
	// TRealX remainder r1,r2,r3 % r4,r5,r6 result in r1,r2,r3
sl@0
  2047
	// Error code returned in r12
sl@0
  2048
	// Registers r0-r7,r12 modified
sl@0
  2049
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  2050
	asm("TRealXModulo: ");
sl@0
  2051
	asm("mov r12, #0 ");					// initialise return value to KErrNone
sl@0
  2052
	asm("cmn r3, #0x10000 ");				// check if dividend is NaN or infinity
sl@0
  2053
	asm("bcs TRealXModulo1 ");				// branch if it is
sl@0
  2054
	asm("cmn r6, #0x10000 ");				// check if divisor is NaN or infinity
sl@0
  2055
	asm("bcs TRealXModulo2 ");				// branch if it is
sl@0
  2056
	asm("cmp r6, #0x10000 ");				// check if divisor zero
sl@0
  2057
	asm("bcc TRealXRealIndefinite ");		// if it is, return 'real indefinite'
sl@0
  2058
	asm("mov r0, r3, lsr #16 ");			// r0=dividend exponent
sl@0
  2059
	asm("subs r0, r0, r6, lsr #16 ");		// r0=dividend exponent-divisor exponent
sl@0
  2060
	__JUMP(lt,lr);
sl@0
  2061
	asm("cmp r0, #64 ");					// check if difference >= 64 bits
sl@0
  2062
	asm("bcs TRealXModuloLp ");				// if so, underflow
sl@0
  2063
	asm("b TRealXModulo4 ");				// skip left shift on first iteration
sl@0
  2064
sl@0
  2065
	asm("TRealXModulo3: ");
sl@0
  2066
	asm("adds r1, r1, r1 ");				// shift dividend mantissa left one bit
sl@0
  2067
	asm("adcs r2, r2, r2 ");
sl@0
  2068
	asm("bcs TRealXModulo5 ");				// if one shifted out, override comparison
sl@0
  2069
	asm("TRealXModulo4: ");
sl@0
  2070
	asm("cmp r2, r5 ");						// compare dividend to divisor
sl@0
  2071
	asm("cmpeq r1, r4 ");
sl@0
  2072
	asm("bcc TRealXModulo6 ");				// if dividend<divisor, skip
sl@0
  2073
	asm("TRealXModulo5: ");
sl@0
  2074
	asm("subs r1, r1, r4 ");				// if dividend>=divisor, dividend-=divisor
sl@0
  2075
	asm("sbcs r2, r2, r5 ");
sl@0
  2076
	asm("TRealXModulo6: ");
sl@0
  2077
	asm("subs r0, r0, #1 ");				// decrement loop count
sl@0
  2078
	asm("bpl TRealXModulo3 ");				// if more bits to do, loop
sl@0
  2079
sl@0
  2080
	asm("orrs r0, r1, r2 ");				// test for exact zero result
sl@0
  2081
	asm("andeq r3, r3, #1 ");				// if so, return zero with same sign as dividend
sl@0
  2082
	__JUMP(eq,lr);
sl@0
  2083
	asm("and r7, r3, #1 ");					// dividend sign bit into r7
sl@0
  2084
	asm("mov r3, r6, lsr #16 ");			// r3 lower 16 bits=result exponent=divisor exponent
sl@0
  2085
	asm("cmp r2, #0 ");						// test if upper 32 bits zero
sl@0
  2086
	asm("moveq r2, r1 ");					// if so, shift left by 32
sl@0
  2087
	asm("moveq r1, #0 ");
sl@0
  2088
	asm("subeqs r3, r3, #32 ");				// and subtract 32 from exponent
sl@0
  2089
	asm("bls TRealXModuloUnderflow ");		// if borrow from exponent or exponent 0, underflow
sl@0
  2090
	asm("mov r0, #32 ");					// r0 will hold 32-number of shifts to normalise
sl@0
  2091
	asm("cmp r2, #0x00010000 ");			// normalise
sl@0
  2092
	asm("movcc r2, r2, lsl #16 ");
sl@0
  2093
	asm("subcc r0, r0, #16 ");
sl@0
  2094
	asm("cmp r2, #0x01000000 ");
sl@0
  2095
	asm("movcc r2, r2, lsl #8 ");
sl@0
  2096
	asm("subcc r0, r0, #8 ");
sl@0
  2097
	asm("cmp r2, #0x10000000 ");
sl@0
  2098
	asm("movcc r2, r2, lsl #4 ");
sl@0
  2099
	asm("subcc r0, r0, #4 ");
sl@0
  2100
	asm("cmp r2, #0x40000000 ");
sl@0
  2101
	asm("movcc r2, r2, lsl #2 ");
sl@0
  2102
	asm("subcc r0, r0, #2 ");
sl@0
  2103
	asm("cmp r2, #0x80000000 ");
sl@0
  2104
	asm("movcc r2, r2, lsl #1 ");			// top bit of r2 is now set
sl@0
  2105
	asm("subcc r0, r0, #1 ");
sl@0
  2106
	asm("orr r2, r2, r1, lsr r0 ");			// top bits of r1 into bottom bits of r2
sl@0
  2107
	asm("rsb r0, r0, #32 ");				// r0=number of shifts to normalise
sl@0
  2108
	asm("mov r1, r1, lsl r0 ");				// shift r1 left - mantissa now normalised
sl@0
  2109
	asm("subs r3, r3, r0 ");				// subtract r0 from exponent
sl@0
  2110
	asm("bls TRealXModuloUnderflow ");		// if borrow from exponent or exponent 0, underflow
sl@0
  2111
	asm("orr r3, r7, r3, lsl #16 ");		// else r3=result exponent and sign
sl@0
  2112
	__JUMP(,lr);
sl@0
  2113
sl@0
  2114
	// dividend=NaN or infinity
sl@0
  2115
	asm("TRealXModulo1: ");
sl@0
  2116
	asm("cmp r2, #0x80000000 ");			// check for infinity
sl@0
  2117
	asm("cmpeq r1, #0 ");
sl@0
  2118
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2119
	asm("cmn r6, #0x10000 ");				// check 2nd operand for NaN/infinity
sl@0
  2120
	asm("bcc TRealXRealIndefinite ");		// infinity%finite - return 'real indefinite'
sl@0
  2121
	asm("cmp r5, #0x80000000 ");			// check if divisor=infinity
sl@0
  2122
	asm("cmpeq r4, #0 ");
sl@0
  2123
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2124
	asm("b TRealXRealIndefinite ");			// else infinity%infinity - return 'real indefinite'
sl@0
  2125
sl@0
  2126
	// divisor=NaN or infinity, dividend finite
sl@0
  2127
	asm("TRealXModulo2: ");
sl@0
  2128
	asm("cmp r5, #0x80000000 ");			// check for infinity
sl@0
  2129
	asm("cmpeq r4, #0 ");
sl@0
  2130
	asm("bne TRealXBinOpNan ");				// branch if NaN
sl@0
  2131
	__JUMP(,lr);
sl@0
  2132
sl@0
  2133
	asm("TRealXModuloLp: ");
sl@0
  2134
	asm("mvn r12, #%a0" : : "i" ((TInt)~KErrTotalLossOfPrecision));
sl@0
  2135
	asm("mov r1, #0 ");
sl@0
  2136
	asm("mov r2, #0 ");
sl@0
  2137
	asm("and r3, r3, #1 ");
sl@0
  2138
	__JUMP(,lr);
sl@0
  2139
sl@0
  2140
	asm("TRealXModuloUnderflow: ");
sl@0
  2141
	asm("mvn r12, #%a0" : : "i" ((TInt)~KErrUnderflow));
sl@0
  2142
	asm("mov r1, #0 ");
sl@0
  2143
	asm("mov r2, #0 ");
sl@0
  2144
	asm("and r3, r3, #1 ");
sl@0
  2145
	__JUMP(,lr);
sl@0
  2146
	}
sl@0
  2147
sl@0
  2148
sl@0
  2149
sl@0
  2150
sl@0
  2151
__NAKED__ EXPORT_C TInt TRealX::Add(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
sl@0
  2152
/**
sl@0
  2153
Adds an extended precision value to this extended precision number.
sl@0
  2154
sl@0
  2155
@param aResult On return, a reference to an extended precision object
sl@0
  2156
               containing the result of the operation.
sl@0
  2157
@param aVal    The extended precision value to be added. 
sl@0
  2158
sl@0
  2159
@return KErrNone, if the operation is successful;
sl@0
  2160
        KErrOverflow, if the operation results in overflow;
sl@0
  2161
        KErrUnderflow, if the operation results in underflow. 
sl@0
  2162
*/
sl@0
  2163
	{
sl@0
  2164
	// r0=this, r1=&aResult, r2=&aVal
sl@0
  2165
	asm("stmfd sp!, {r1,r4-r8,lr} ");
sl@0
  2166
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2167
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2168
	asm("bl TRealXAdd ");
sl@0
  2169
	asm("ldmfd sp!, {lr} ");				// lr=&aResult
sl@0
  2170
	asm("stmia lr, {r1,r2,r3} ");
sl@0
  2171
	asm("mov r0, r12 ");					// return value into r0
sl@0
  2172
	__POPRET("r4-r8,");
sl@0
  2173
	}
sl@0
  2174
sl@0
  2175
sl@0
  2176
sl@0
  2177
sl@0
  2178
__NAKED__ EXPORT_C TInt TRealX::Sub(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
sl@0
  2179
/**
sl@0
  2180
Subtracts an extended precision value from this extended precision number.
sl@0
  2181
sl@0
  2182
@param aResult On return, a reference to an extended precision object
sl@0
  2183
               containing the result of the operation.
sl@0
  2184
@param aVal    The extended precision value to be subtracted. 
sl@0
  2185
sl@0
  2186
@return KErrNone, if the operation is successful;
sl@0
  2187
        KErrOverflow, if the operation results in overflow;
sl@0
  2188
        KErrUnderflow, if the operation results in underflow. 
sl@0
  2189
*/
sl@0
  2190
	{
sl@0
  2191
	// r0=this, r1=&aResult, r2=&aVal
sl@0
  2192
	asm("stmfd sp!, {r1,r4-r8,lr} ");
sl@0
  2193
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2194
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2195
	asm("bl TRealXSubtract ");
sl@0
  2196
	asm("ldmfd sp!, {lr} ");				// lr=&aResult
sl@0
  2197
	asm("stmia lr, {r1,r2,r3} ");
sl@0
  2198
	asm("mov r0, r12 ");					// return value into r0
sl@0
  2199
	__POPRET("r4-r8,");
sl@0
  2200
	}
sl@0
  2201
sl@0
  2202
sl@0
  2203
sl@0
  2204
sl@0
  2205
__NAKED__ EXPORT_C TInt TRealX::Mult(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
sl@0
  2206
/**
sl@0
  2207
Multiplies this extended precision number by an extended precision value.
sl@0
  2208
sl@0
  2209
@param aResult On return, a reference to an extended precision object
sl@0
  2210
               containing the result of the operation.
sl@0
  2211
@param aVal    The extended precision value to be used as the multiplier. 
sl@0
  2212
sl@0
  2213
@return KErrNone, if the operation is successful;
sl@0
  2214
        KErrOverflow, if the operation results in overflow;
sl@0
  2215
        KErrUnderflow, if the operation results in underflow. 
sl@0
  2216
*/
sl@0
  2217
	{
sl@0
  2218
	// r0=this, r1=&aResult, r2=&aVal
sl@0
  2219
	asm("stmfd sp!, {r1,r4-r7,lr} ");
sl@0
  2220
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2221
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2222
	asm("bl TRealXMultiply ");
sl@0
  2223
	asm("ldmfd sp!, {lr} ");				// lr=&aResult
sl@0
  2224
	asm("stmia lr, {r1,r2,r3} ");
sl@0
  2225
	asm("mov r0, r12 ");					// return value into r0
sl@0
  2226
	__POPRET("r4-r7,");
sl@0
  2227
	}
sl@0
  2228
sl@0
  2229
sl@0
  2230
sl@0
  2231
sl@0
  2232
__NAKED__ EXPORT_C TInt TRealX::Div(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
sl@0
  2233
/**
sl@0
  2234
Divides this extended precision number by an extended precision value.
sl@0
  2235
sl@0
  2236
@param aResult On return, a reference to an extended precision object
sl@0
  2237
               containing the result of the operation.
sl@0
  2238
@param aVal    The extended precision value to be used as the divisor.
sl@0
  2239
sl@0
  2240
@return KErrNone, if the operation is successful;
sl@0
  2241
        KErrOverflow, if the operation results in overflow;
sl@0
  2242
        KErrUnderflow, if the operation results in underflow;
sl@0
  2243
        KErrDivideByZero, if the divisor is zero.
sl@0
  2244
*/
sl@0
  2245
	{
sl@0
  2246
	// r0=this, r1=&aResult, r2=&aVal
sl@0
  2247
	asm("stmfd sp!, {r1,r4-r9,lr} ");
sl@0
  2248
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2249
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2250
	asm("bl TRealXDivide ");
sl@0
  2251
	asm("ldmfd sp!, {lr} ");				// lr=&aResult
sl@0
  2252
	asm("stmia lr, {r1,r2,r3} ");
sl@0
  2253
	asm("mov r0, r12 ");					// return value into r0
sl@0
  2254
	__POPRET("r4-r9,");
sl@0
  2255
	}
sl@0
  2256
sl@0
  2257
sl@0
  2258
sl@0
  2259
sl@0
  2260
__NAKED__ EXPORT_C TInt TRealX::Mod(TRealX& /*aResult*/,const TRealX& /*aVal*/) const
sl@0
  2261
/**
sl@0
  2262
Modulo-divides this extended precision number by an extended precision value.
sl@0
  2263
sl@0
  2264
@param aResult On return, a reference to an extended precision object
sl@0
  2265
               containing the result of the operation.
sl@0
  2266
sl@0
  2267
@param aVal    The extended precision value to be used as the divisor. 
sl@0
  2268
sl@0
  2269
@return KErrNone, if the operation is successful;
sl@0
  2270
        KErrTotalLossOfPrecision, if precision is lost;
sl@0
  2271
        KErrUnderflow, if the operation results in underflow.
sl@0
  2272
*/
sl@0
  2273
	{
sl@0
  2274
	// r0=this, r1=&aResult, r2=&aVal
sl@0
  2275
	asm("stmfd sp!, {r1,r4-r7,lr} ");
sl@0
  2276
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2277
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2278
	asm("bl TRealXModulo ");
sl@0
  2279
	asm("ldmfd sp!, {lr} ");				// lr=&aResult
sl@0
  2280
	asm("stmia lr, {r1,r2,r3} ");
sl@0
  2281
	asm("mov r0, r12 ");					// return value into r0
sl@0
  2282
	__POPRET("r4-r7,");
sl@0
  2283
	}
sl@0
  2284
sl@0
  2285
extern void PanicOverUnderflowDividebyZero(const TInt aErr);
sl@0
  2286
sl@0
  2287
sl@0
  2288
sl@0
  2289
sl@0
  2290
__NAKED__ EXPORT_C const TRealX& TRealX::operator+=(const TRealX& /*aVal*/)
sl@0
  2291
/**
sl@0
  2292
Adds an extended precision value to this extended precision number.
sl@0
  2293
sl@0
  2294
@param aVal The extended precision value to be added.
sl@0
  2295
sl@0
  2296
@return A reference to this object.
sl@0
  2297
sl@0
  2298
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2299
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2300
*/
sl@0
  2301
	{
sl@0
  2302
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2303
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2304
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2305
	asm("bl TRealXAdd ");
sl@0
  2306
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2307
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2308
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2309
	__JUMP(eq,lr);
sl@0
  2310
	asm("mov r0, r12 ");
sl@0
  2311
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2312
	}
sl@0
  2313
sl@0
  2314
sl@0
  2315
sl@0
  2316
sl@0
  2317
__NAKED__ EXPORT_C const TRealX& TRealX::operator-=(const TRealX& /*aVal*/)
sl@0
  2318
/**
sl@0
  2319
Subtracts an extended precision value from this extended precision number. 
sl@0
  2320
sl@0
  2321
@param aVal The extended precision value to be subtracted.
sl@0
  2322
sl@0
  2323
@return A reference to this object.
sl@0
  2324
sl@0
  2325
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2326
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2327
*/
sl@0
  2328
	{
sl@0
  2329
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2330
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2331
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2332
	asm("bl TRealXSubtract ");
sl@0
  2333
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2334
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2335
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2336
	__JUMP(eq,lr);
sl@0
  2337
	asm("mov r0, r12 ");
sl@0
  2338
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2339
	}
sl@0
  2340
sl@0
  2341
sl@0
  2342
sl@0
  2343
sl@0
  2344
__NAKED__ EXPORT_C const TRealX& TRealX::operator*=(const TRealX& /*aVal*/)
sl@0
  2345
/**
sl@0
  2346
Multiplies this extended precision number by an extended precision value.
sl@0
  2347
sl@0
  2348
@param aVal The extended precision value to be subtracted.
sl@0
  2349
sl@0
  2350
@return A reference to this object.
sl@0
  2351
sl@0
  2352
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2353
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2354
*/
sl@0
  2355
	{
sl@0
  2356
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2357
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2358
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2359
	asm("bl TRealXMultiply ");
sl@0
  2360
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2361
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2362
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2363
	__JUMP(eq,lr);
sl@0
  2364
	asm("mov r0, r12 ");
sl@0
  2365
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2366
	}
sl@0
  2367
sl@0
  2368
sl@0
  2369
sl@0
  2370
sl@0
  2371
__NAKED__ EXPORT_C const TRealX& TRealX::operator/=(const TRealX& /*aVal*/)
sl@0
  2372
/**
sl@0
  2373
Divides this extended precision number by an extended precision value.
sl@0
  2374
sl@0
  2375
@param aVal The extended precision value to be used as the divisor. 
sl@0
  2376
sl@0
  2377
@return A reference to this object.
sl@0
  2378
sl@0
  2379
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2380
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2381
@panic MATHX KErrDivideByZero if the divisor is zero.
sl@0
  2382
*/
sl@0
  2383
	{
sl@0
  2384
	asm("stmfd sp!, {r0,r4-r9,lr} ");
sl@0
  2385
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2386
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2387
	asm("bl TRealXDivide ");
sl@0
  2388
	asm("ldmfd sp!, {r0,r4-r9,lr} ");
sl@0
  2389
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2390
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2391
	__JUMP(eq,lr);
sl@0
  2392
	asm("mov r0, r12 ");
sl@0
  2393
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2394
	}
sl@0
  2395
sl@0
  2396
sl@0
  2397
sl@0
  2398
sl@0
  2399
__NAKED__ EXPORT_C const TRealX& TRealX::operator%=(const TRealX& /*aVal*/)
sl@0
  2400
/**
sl@0
  2401
Modulo-divides this extended precision number by an extended precision value.
sl@0
  2402
sl@0
  2403
@param aVal The extended precision value to be used as the divisor. 
sl@0
  2404
sl@0
  2405
@return A reference to this object.
sl@0
  2406
sl@0
  2407
@panic MATHX KErrTotalLossOfPrecision panic if precision is lost.
sl@0
  2408
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2409
*/
sl@0
  2410
	{
sl@0
  2411
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2412
	asm("ldmia r1, {r4,r5,r6} ");
sl@0
  2413
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2414
	asm("bl TRealXModulo ");
sl@0
  2415
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2416
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2417
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2418
	asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
sl@0
  2419
	__JUMP(eq,lr);
sl@0
  2420
	asm("mov r0, r12 ");
sl@0
  2421
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2422
	}
sl@0
  2423
sl@0
  2424
sl@0
  2425
sl@0
  2426
sl@0
  2427
__NAKED__ EXPORT_C TRealX& TRealX::operator++()
sl@0
  2428
/**
sl@0
  2429
Increments this extended precision number by one,
sl@0
  2430
and then returns a reference to it.
sl@0
  2431
sl@0
  2432
This is also referred to as a prefix operator. 
sl@0
  2433
sl@0
  2434
@return A reference to this object.
sl@0
  2435
sl@0
  2436
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2437
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2438
*/
sl@0
  2439
	{
sl@0
  2440
	// pre-increment
sl@0
  2441
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2442
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2443
	asm("add r4, pc, #__TRealXOne-.-8 ");
sl@0
  2444
	asm("ldmia r4, {r4,r5,r6} ");			// r4,r5,r6=1.0
sl@0
  2445
	asm("bl TRealXAdd ");
sl@0
  2446
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2447
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2448
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2449
	__JUMP(eq,lr);
sl@0
  2450
	asm("mov r0, r12 ");
sl@0
  2451
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2452
sl@0
  2453
	asm("__TRealXOne: ");
sl@0
  2454
	asm(".word 0x00000000 ");
sl@0
  2455
	asm(".word 0x80000000 ");
sl@0
  2456
	asm(".word 0x7FFF0000 ");
sl@0
  2457
	}
sl@0
  2458
sl@0
  2459
sl@0
  2460
sl@0
  2461
sl@0
  2462
__NAKED__ EXPORT_C TRealX TRealX::operator++(TInt)
sl@0
  2463
/**
sl@0
  2464
Returns this extended precision number before incrementing it by one.
sl@0
  2465
sl@0
  2466
This is also referred to as a postfix operator. 
sl@0
  2467
sl@0
  2468
@return A reference to this object.
sl@0
  2469
sl@0
  2470
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2471
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2472
*/
sl@0
  2473
	{
sl@0
  2474
	// post-increment
sl@0
  2475
	// r0=address of return value, r1=this
sl@0
  2476
	asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
sl@0
  2477
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2478
	asm("stmia r0, {r1,r2,r3} ");			// store old value
sl@0
  2479
	asm("add r4, pc, #__TRealXOne-.-8 ");
sl@0
  2480
	asm("ldmia r4, {r4,r5,r6} ");			// r4,r5,r6=1.0
sl@0
  2481
	asm("bl TRealXAdd ");
sl@0
  2482
	asm("ldmfd sp!, {r0,lr} ");				// restore r0, lr=this
sl@0
  2483
	asm("stmia lr, {r1,r2,r3} ");			// store incremented value
sl@0
  2484
	asm("ldmfd sp!, {r4-r8,lr} ");
sl@0
  2485
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2486
	__JUMP(eq,lr);
sl@0
  2487
	asm("mov r0, r12 ");
sl@0
  2488
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2489
	}
sl@0
  2490
sl@0
  2491
sl@0
  2492
sl@0
  2493
sl@0
  2494
__NAKED__ EXPORT_C TRealX& TRealX::operator--()
sl@0
  2495
/**
sl@0
  2496
Decrements this extended precision number by one,
sl@0
  2497
and then returns a reference to it.
sl@0
  2498
sl@0
  2499
This is also referred to as a prefix operator. 
sl@0
  2500
sl@0
  2501
@return A reference to this object.
sl@0
  2502
sl@0
  2503
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2504
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2505
*/
sl@0
  2506
	{
sl@0
  2507
	// pre-decrement
sl@0
  2508
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2509
	asm("ldmia r0, {r1,r2,r3} ");
sl@0
  2510
	asm("add r4, pc, #__TRealXOne-.-8 ");
sl@0
  2511
	asm("ldmia r4, {r4,r5,r6} ");			// r4,r5,r6=1.0
sl@0
  2512
	asm("bl TRealXSubtract ");
sl@0
  2513
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2514
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2515
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2516
	__JUMP(eq,lr);
sl@0
  2517
	asm("mov r0, r12 ");
sl@0
  2518
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2519
	}
sl@0
  2520
sl@0
  2521
sl@0
  2522
sl@0
  2523
sl@0
  2524
__NAKED__ EXPORT_C TRealX TRealX::operator--(TInt)
sl@0
  2525
/**
sl@0
  2526
Returns this extended precision number before decrementing it by one.
sl@0
  2527
sl@0
  2528
This is also referred to as a postfix operator. 
sl@0
  2529
sl@0
  2530
@return A reference to this object.
sl@0
  2531
sl@0
  2532
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2533
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2534
*/
sl@0
  2535
	{
sl@0
  2536
	// post-decrement
sl@0
  2537
	// r0=address of return value, r1=this
sl@0
  2538
	asm("stmfd sp!, {r0,r1,r4-r8,lr} ");
sl@0
  2539
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2540
	asm("stmia r0, {r1,r2,r3} ");			// store old value
sl@0
  2541
	asm("add r4, pc, #__TRealXOne-.-8 ");
sl@0
  2542
	asm("ldmia r4, {r4,r5,r6} ");			// r4,r5,r6=1.0
sl@0
  2543
	asm("bl TRealXSubtract ");
sl@0
  2544
	asm("ldmfd sp!, {r0,lr} ");				// restore r0, lr=this
sl@0
  2545
	asm("stmia lr, {r1,r2,r3} ");			// store decremented value
sl@0
  2546
	asm("ldmfd sp!, {r4-r8,lr} ");
sl@0
  2547
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2548
	__JUMP(eq,lr);
sl@0
  2549
	asm("mov r0, r12 ");
sl@0
  2550
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2551
	}
sl@0
  2552
sl@0
  2553
sl@0
  2554
sl@0
  2555
sl@0
  2556
__NAKED__ EXPORT_C TRealX TRealX::operator+(const TRealX& /*aVal*/) const
sl@0
  2557
/**
sl@0
  2558
Adds an extended precision value to this extended precision number.
sl@0
  2559
sl@0
  2560
@param aVal The extended precision value to be added. 
sl@0
  2561
sl@0
  2562
@return An extended precision object containing the result.
sl@0
  2563
sl@0
  2564
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2565
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2566
*/
sl@0
  2567
	{
sl@0
  2568
	// r0=address of return value, r1=this, r2=&aVal
sl@0
  2569
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2570
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2571
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2572
	asm("bl TRealXAdd ");
sl@0
  2573
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2574
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2575
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2576
	__JUMP(eq,lr);
sl@0
  2577
	asm("mov r0, r12 ");
sl@0
  2578
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2579
	}
sl@0
  2580
sl@0
  2581
sl@0
  2582
sl@0
  2583
sl@0
  2584
__NAKED__ EXPORT_C TRealX TRealX::operator-(const TRealX& /*aVal*/) const
sl@0
  2585
/**
sl@0
  2586
Subtracts an extended precision value from this extended precision number. 
sl@0
  2587
sl@0
  2588
@param aVal The extended precision value to be subtracted. 
sl@0
  2589
sl@0
  2590
@return An extended precision object containing the result. 
sl@0
  2591
sl@0
  2592
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2593
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2594
*/
sl@0
  2595
	{
sl@0
  2596
	// r0=address of return value, r1=this, r2=&aVal
sl@0
  2597
	asm("stmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2598
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2599
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2600
	asm("bl TRealXSubtract ");
sl@0
  2601
	asm("ldmfd sp!, {r0,r4-r8,lr} ");
sl@0
  2602
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2603
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2604
	__JUMP(eq,lr);
sl@0
  2605
	asm("mov r0, r12 ");
sl@0
  2606
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2607
	}
sl@0
  2608
sl@0
  2609
sl@0
  2610
sl@0
  2611
sl@0
  2612
__NAKED__ EXPORT_C TRealX TRealX::operator*(const TRealX& /*aVal*/) const
sl@0
  2613
/**
sl@0
  2614
Multiplies this extended precision number by an extended precision value.
sl@0
  2615
sl@0
  2616
@param aVal The extended precision value to be used as the multiplier. 
sl@0
  2617
sl@0
  2618
@return An extended precision object containing the result. 
sl@0
  2619
sl@0
  2620
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2621
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2622
*/
sl@0
  2623
	{
sl@0
  2624
	// r0=address of return value, r1=this, r2=&aVal
sl@0
  2625
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2626
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2627
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2628
	asm("bl TRealXMultiply ");
sl@0
  2629
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2630
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2631
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2632
	__JUMP(eq,lr);
sl@0
  2633
	asm("mov r0, r12 ");
sl@0
  2634
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2635
	}
sl@0
  2636
sl@0
  2637
sl@0
  2638
sl@0
  2639
sl@0
  2640
__NAKED__ EXPORT_C TRealX TRealX::operator/(const TRealX& /*aVal*/) const
sl@0
  2641
/**
sl@0
  2642
Divides this extended precision number by an extended precision value.
sl@0
  2643
sl@0
  2644
@param aVal The extended precision value to be used as the divisor. 
sl@0
  2645
sl@0
  2646
@return An extended precision object containing the result. 
sl@0
  2647
sl@0
  2648
@panic MATHX KErrOverflow if the operation results in overflow.
sl@0
  2649
@panic MATHX KErrUnderflow if  the operation results in underflow.
sl@0
  2650
@panic MATHX KErrDivideByZero if the divisor is zero.
sl@0
  2651
*/
sl@0
  2652
	{
sl@0
  2653
	// r0=address of return value, r1=this, r2=&aVal
sl@0
  2654
	asm("stmfd sp!, {r0,r4-r9,lr} ");
sl@0
  2655
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2656
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2657
	asm("bl TRealXDivide ");
sl@0
  2658
	asm("ldmfd sp!, {r0,r4-r9,lr} ");
sl@0
  2659
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2660
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2661
	__JUMP(eq,lr);
sl@0
  2662
	asm("mov r0, r12 ");
sl@0
  2663
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2664
	}
sl@0
  2665
sl@0
  2666
sl@0
  2667
sl@0
  2668
sl@0
  2669
__NAKED__ EXPORT_C TRealX TRealX::operator%(const TRealX& /*aVal*/) const
sl@0
  2670
/**
sl@0
  2671
Modulo-divides this extended precision number by an extended precision value.
sl@0
  2672
sl@0
  2673
@param aVal The extended precision value to be used as the divisor. 
sl@0
  2674
sl@0
  2675
@return An extended precision object containing the result. 
sl@0
  2676
sl@0
  2677
@panic MATHX KErrTotalLossOfPrecision if precision is lost.
sl@0
  2678
@panic MATHX KErrUnderflow if the operation results in underflow.
sl@0
  2679
*/
sl@0
  2680
	{
sl@0
  2681
	// r0=address of return value, r1=this, r2=&aVal
sl@0
  2682
	asm("stmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2683
	asm("ldmia r2, {r4,r5,r6} ");
sl@0
  2684
	asm("ldmia r1, {r1,r2,r3} ");
sl@0
  2685
	asm("bl TRealXModulo ");
sl@0
  2686
	asm("ldmfd sp!, {r0,r4-r7,lr} ");
sl@0
  2687
	asm("stmia r0, {r1,r2,r3} ");
sl@0
  2688
	asm("cmp r12, #0 ");					// check the error code
sl@0
  2689
	asm("cmpne r12, #%a0" : : "i" ((TInt)KErrTotalLossOfPrecision));
sl@0
  2690
	__JUMP(eq,lr);
sl@0
  2691
	asm("mov r0, r12 ");
sl@0
  2692
	asm("b  " CSM_Z30PanicOverUnderflowDividebyZeroi);	// else panic
sl@0
  2693
	}
sl@0
  2694
sl@0
  2695
sl@0
  2696
sl@0
  2697
sl@0
  2698
#ifdef __REALS_MACHINE_CODED__
sl@0
  2699
__NAKED__ EXPORT_C TInt Math::Sqrt( TReal &/*aDest*/, const TReal &/*aSrc*/ )
sl@0
  2700
/**
sl@0
  2701
Calculates the square root of a number.
sl@0
  2702
sl@0
  2703
@param aDest A reference containing the result. 
sl@0
  2704
@param aSrc  The number whose square-root is required.
sl@0
  2705
sl@0
  2706
@return KErrNone if successful, otherwise another of
sl@0
  2707
        the system-wide error codes. 
sl@0
  2708
*/
sl@0
  2709
	{
sl@0
  2710
	// r0=address of aDest, r1=address of aSrc
sl@0
  2711
sl@0
  2712
sl@0
  2713
#ifdef __USE_VFP_MATH
sl@0
  2714
	VFP_FLDD(CC_AL,0,1,0);
sl@0
  2715
	VFP_FSQRTD(,0,0);
sl@0
  2716
	VFP_FMRRD(CC_AL,3,2,0);
sl@0
  2717
	asm("bic r1, r2, #0x80000000 ");	// remove sign bit
sl@0
  2718
	asm("cmn r1, #0x00100000 ");		// check if exp=7FF
sl@0
  2719
	asm("movpl r1, #0 ");				// if not return KErrNone
sl@0
  2720
	asm("bpl donesqrt ");
sl@0
  2721
	asm("movs r1, r1, lsl #12 ");		// if exp=7FF, check mantissa
sl@0
  2722
	asm("cmpeq r3, #0 ");
sl@0
  2723
	asm("moveq r1, #-9 ");				// if exp=7FF, mant=0, return KErrOverflow
sl@0
  2724
	asm("mvnne r2, #0x80000000 ");		// else set NaN
sl@0
  2725
	asm("mvnne r3, #0 ");
sl@0
  2726
	asm("movne r1, #-6 ");				// and return KErrArgument
sl@0
  2727
	asm("donesqrt: ");
sl@0
  2728
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  2729
	asm("stmia r0, {r2,r3} ");			// store the result
sl@0
  2730
#else
sl@0
  2731
	asm("str r2, [r0, #4] ");
sl@0
  2732
	asm("str r3, [r0, #0] ");
sl@0
  2733
#endif
sl@0
  2734
	asm("mov r0, r1 ");
sl@0
  2735
	__JUMP(,lr);
sl@0
  2736
#else // __USE_VFP_MATH
sl@0
  2737
	asm("stmfd sp!, {r4-r10,lr} ");
sl@0
  2738
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  2739
	asm("ldmia r1, {r3,r4} ");			// low mant into r4, sign:exp:high mant into r3
sl@0
  2740
#else
sl@0
  2741
	asm("ldr r3, [r1, #4] ");
sl@0
  2742
	asm("ldr r4, [r1, #0] ");
sl@0
  2743
#endif
sl@0
  2744
	asm("bic r5, r3, #0xFF000000 ");
sl@0
  2745
	asm("bic r5, r5, #0x00F00000 ");	// high word of mantissa into r5
sl@0
  2746
	asm("mov r2, r3, lsr #20 ");
sl@0
  2747
	asm("bics r2, r2, #0x800 ");		// exponent now in r2
sl@0
  2748
	asm("beq fastsqrt1 ");				// branch if exponent zero (zero or denormal)
sl@0
  2749
	asm("mov r6, #0xFF ");
sl@0
  2750
	asm("orr r6, r6, #0x700 ");
sl@0
  2751
	asm("cmp r2, r6 ");					// check for infinity or NaN
sl@0
  2752
	asm("beq fastsqrt2 ");				// branch if infinity or NaN
sl@0
  2753
	asm("movs r3, r3 ");				// test sign
sl@0
  2754
	asm("bmi fastsqrtn ");				// branch if negative
sl@0
  2755
	asm("sub r2, r2, #0xFF ");			// unbias the exponent
sl@0
  2756
	asm("sub r2, r2, #0x300 ");			//
sl@0
  2757
	asm("fastsqrtd1: ");
sl@0
  2758
	asm("mov r1, #0x40000000 ");		// value for comparison
sl@0
  2759
	asm("mov r3, #27 ");				// loop counter (number of bits/2)
sl@0
  2760
	asm("movs r2, r2, asr #1 ");		// divide exponent by 2, LSB into CF
sl@0
  2761
	asm("movcs r7, r5, lsl #11 ");		// mantissa into r6,r7 with MSB in MSB of r7
sl@0
  2762
	asm("orrcs r7, r7, r4, lsr #21 ");
sl@0
  2763
	asm("movcs r6, r4, lsl #11 ");
sl@0
  2764
	asm("movcs r4, #0 ");				// r4, r5 will hold result mantissa
sl@0
  2765
	asm("orrcs r7, r7, #0x80000000 ");	// if exponent odd, restore MSB of mantissa
sl@0
  2766
	asm("movcc r7, r5, lsl #12 ");		// mantissa into r6,r7 with MSB in MSB of r7
sl@0
  2767
	asm("orrcc r7, r7, r4, lsr #20 ");	// if exponent even, shift mantissa left an extra
sl@0
  2768
	asm("movcc r6, r4, lsl #12 ");		// place, lose top bit, and
sl@0
  2769
	asm("movcc r4, #1 ");				// set MSB of result, and
sl@0
  2770
	asm("mov r5, #0 ");					// r4, r5 will hold result mantissa
sl@0
  2771
	asm("mov r8, #0 ");					// r8, r9 will be comparison accumulator
sl@0
  2772
	asm("mov r9, #0 ");
sl@0
  2773
	asm("bcc fastsqrt4 ");				// if exponent even, calculate one less bit
sl@0
  2774
										// as result MSB already known
sl@0
  2775
sl@0
  2776
	// Main mantissa square-root loop
sl@0
  2777
	asm("fastsqrt3: ");					// START OF MAIN LOOP
sl@0
  2778
	asm("subs r10, r7, r1 ");			// subtract result:01 from acc:mant
sl@0
  2779
	asm("sbcs r12, r8, r4 ");			// result into r14:r12:r10
sl@0
  2780
	asm("sbcs r14, r9, r5 ");
sl@0
  2781
	asm("movcs r7, r10 ");				// if no borrow replace accumulator with result
sl@0
  2782
	asm("movcs r8, r12 ");
sl@0
  2783
	asm("movcs r9, r14 ");
sl@0
  2784
	asm("adcs r4, r4, r4 ");			// shift result left one, putting in next bit
sl@0
  2785
	asm("adcs r5, r5, r5 ");
sl@0
  2786
	asm("mov r9, r9, lsl #2 ");			// shift acc:mant left by 2 bits
sl@0
  2787
	asm("orr r9, r9, r8, lsr #30 ");
sl@0
  2788
	asm("mov r8, r8, lsl #2 ");
sl@0
  2789
	asm("orr r8, r8, r7, lsr #30 ");
sl@0
  2790
	asm("mov r7, r7, lsl #2 ");
sl@0
  2791
	asm("orr r7, r7, r6, lsr #30 ");
sl@0
  2792
	asm("mov r6, r6, lsl #2 ");
sl@0
  2793
	asm("fastsqrt4: ");					// Come in here if we need to do one less iteration
sl@0
  2794
	asm("subs r10, r7, r1 ");			// subtract result:01 from acc:mant
sl@0
  2795
	asm("sbcs r12, r8, r4 ");			// result into r14:r12:r10
sl@0
  2796
	asm("sbcs r14, r9, r5 ");
sl@0
  2797
	asm("movcs r7, r10 ");				// if no borrow replace accumulator with result
sl@0
  2798
	asm("movcs r8, r12 ");
sl@0
  2799
	asm("movcs r9, r14 ");
sl@0
  2800
	asm("adcs r4, r4, r4 ");			// shift result left one, putting in next bit
sl@0
  2801
	asm("adcs r5, r5, r5 ");
sl@0
  2802
	asm("mov r9, r9, lsl #2 ");			// shift acc:mant left by 2 bits
sl@0
  2803
	asm("orr r9, r9, r8, lsr #30 ");
sl@0
  2804
	asm("mov r8, r8, lsl #2 ");
sl@0
  2805
	asm("orr r8, r8, r7, lsr #30 ");
sl@0
  2806
	asm("mov r7, r7, lsl #2 ");
sl@0
  2807
	asm("orr r7, r7, r6, lsr #30 ");
sl@0
  2808
	asm("mov r6, r6, lsl #2 ");
sl@0
  2809
	asm("subs r3, r3, #1 ");			// decrement loop counter
sl@0
  2810
	asm("bne fastsqrt3 ");				// do necessary number of iterations
sl@0
  2811
sl@0
  2812
	asm("movs r4, r4, lsr #1 ");		// shift result mantissa right 1 place
sl@0
  2813
	asm("orr r4, r4, r5, lsl #31 ");	// LSB (=rounding bit) into carry
sl@0
  2814
	asm("mov r5, r5, lsr #1 ");
sl@0
  2815
	asm("adcs r4, r4, #0 ");			// round the mantissa to 53 bits
sl@0
  2816
	asm("adcs r5, r5, #0 ");
sl@0
  2817
	asm("cmp r5, #0x00200000 ");		// check for mantissa overflow
sl@0
  2818
	asm("addeq r2, r2, #1 ");			// if so, increment exponent - can never overflow
sl@0
  2819
	asm("bic r5, r5, #0x00300000 ");	// remove top bit of mantissa - it is implicit
sl@0
  2820
	asm("add r2, r2, #0xFF ");			// re-bias the exponent
sl@0
  2821
	asm("add r3, r2, #0x300 ");			// and move into r3
sl@0
  2822
	asm("orr r3, r5, r3, lsl #20 ");	// r3 now contains exponent + top of mantissa
sl@0
  2823
	asm("fastsqrt_ok: ");
sl@0
  2824
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  2825
	asm("stmia r0, {r3,r4} ");			// store the result
sl@0
  2826
#else
sl@0
  2827
	asm("str r3, [r0, #4] ");
sl@0
  2828
	asm("str r4, [r0, #0] ");
sl@0
  2829
#endif
sl@0
  2830
	asm("mov r0, #0 ");					// error code KErrNone
sl@0
  2831
	__POPRET("r4-r10,");
sl@0
  2832
sl@0
  2833
	asm("fastsqrt1: ");
sl@0
  2834
	asm("orrs r6, r5, r4 ");			// exponent zero - test mantissa
sl@0
  2835
	asm("beq fastsqrt_ok ");			// if zero, return 0
sl@0
  2836
sl@0
  2837
	asm("movs r3, r3 ");				// denormal - test sign
sl@0
  2838
	asm("bmi fastsqrtn ");				// branch out if negative
sl@0
  2839
	asm("sub r2, r2, #0xFE ");			// unbias the exponent
sl@0
  2840
	asm("sub r2, r2, #0x300 ");			//
sl@0
  2841
	asm("fastsqrtd: ");
sl@0
  2842
	asm("adds r4, r4, r4 ");			// shift mantissa left
sl@0
  2843
	asm("adcs r5, r5, r5 ");
sl@0
  2844
	asm("sub r2, r2, #1 ");				// and decrement exponent
sl@0
  2845
	asm("tst r5, #0x00100000 ");		// test if normalised
sl@0
  2846
	asm("beq fastsqrtd ");				// loop until normalised
sl@0
  2847
	asm("b fastsqrtd1 ");				// now treat as a normalised number
sl@0
  2848
	asm("fastsqrt2: ");					// get here if infinity or NaN
sl@0
  2849
	asm("orrs r6, r5, r4 ");			// if mantissa zero, infinity
sl@0
  2850
	asm("bne fastsqrtnan ");			// branch if not - must be NaN
sl@0
  2851
	asm("movs r3, r3 ");				// test sign of infinity
sl@0
  2852
	asm("bmi fastsqrtn ");				// branch if -ve
sl@0
  2853
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  2854
	asm("stmia r0, {r3,r4} ");			// store the result
sl@0
  2855
#else
sl@0
  2856
	asm("str r3, [r0, #4] ");
sl@0
  2857
	asm("str r4, [r0, #0] ");
sl@0
  2858
#endif
sl@0
  2859
	asm("mov r0, #-9 ");				// return KErrOverflow
sl@0
  2860
	asm("b fastsqrt_end ");
sl@0
  2861
sl@0
  2862
	asm("fastsqrtn: ");					// get here if negative or QNaN operand
sl@0
  2863
	asm("mov r3, #0xFF000000 ");		// generate "real indefinite" QNaN
sl@0
  2864
	asm("orr r3, r3, #0x00F80000 ");	// sign=1, exp=7FF, mantissa = 1000...0
sl@0
  2865
	asm("mov r4, #0 ");
sl@0
  2866
	asm("fastsqrtxa: ");
sl@0
  2867
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  2868
	asm("stmia r0, {r3,r4} ");			// store the result
sl@0
  2869
#else
sl@0
  2870
	asm("str r3, [r0, #4] ");
sl@0
  2871
	asm("str r4, [r0, #0] ");
sl@0
  2872
#endif
sl@0
  2873
	asm("mov r0, #-6 ");				// return KErrArgument
sl@0
  2874
	asm("fastsqrt_end: ");
sl@0
  2875
	__POPRET("r4-r10,");
sl@0
  2876
sl@0
  2877
	asm("fastsqrtnan: ");				// operand is a NaN
sl@0
  2878
	asm("tst r5, #0x00080000 ");		// test MSB of mantissa
sl@0
  2879
	asm("bne fastsqrtn ");				// if set it is a QNaN - so return "real indefinite"
sl@0
  2880
	asm("bic r3, r3, #0x00080000 ");	// else convert SNaN to QNaN
sl@0
  2881
	asm("b fastsqrtxa ");				// and return KErrArgument
sl@0
  2882
#endif // __USE_VFP_MATH
sl@0
  2883
	}
sl@0
  2884
sl@0
  2885
sl@0
  2886
sl@0
  2887
sl@0
  2888
__NAKED__ EXPORT_C TReal Math::Poly(TReal /*aX*/,const SPoly* /*aPoly*/) __SOFTFP
sl@0
  2889
/**
sl@0
  2890
Evaluates the polynomial:
sl@0
  2891
{a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
sl@0
  2892
sl@0
  2893
sl@0
  2894
@param aX    The value of the x-variable 
sl@0
  2895
@param aPoly A pointer to the structure containing the set of coefficients
sl@0
  2896
             in the order: a[0], a[1], ..., a[n-1], a[n].
sl@0
  2897
sl@0
  2898
@return The result of the evaluation.
sl@0
  2899
*/
sl@0
  2900
//
sl@0
  2901
// Evaluate a power series in x for a P_POLY coefficient table.
sl@0
  2902
// Changed to use TRealX throughout the calculation
sl@0
  2903
//
sl@0
  2904
	{
sl@0
  2905
	// On entry r0,r1=aX, r2=aPoly
sl@0
  2906
	asm("stmfd sp!, {r4-r11,lr} ");
sl@0
  2907
	asm("mov r11, r2 ");
sl@0
  2908
	asm("ldr r10, [r11], #4 ");			// r10=number of coefficients, r11=first coeff addr
sl@0
  2909
	asm("add r11, r11, r10, lsl #3 ");	// r11=address of last coefficient+8
sl@0
  2910
	asm("mov r2, r1 ");					// aX into r1,r2
sl@0
  2911
	asm("mov r1, r0 ");
sl@0
  2912
	asm("bl ConvertTReal64ToTRealX ");	// convert to TRealX in r1,r2,r3
sl@0
  2913
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  2914
	asm("mov r5, r2 ");
sl@0
  2915
	asm("mov r6, r3 ");
sl@0
  2916
	asm("ldmdb r11!, {r1,r2} ");		// last coefficient into r1,r2
sl@0
  2917
	asm("bl ConvertTReal64ToTRealX ");	// convert to TRealX in r1,r2,r3
sl@0
  2918
	asm("subs r10, r10, #1 ");
sl@0
  2919
	asm("beq polynomial0 ");			// if no more coefficients, exit
sl@0
  2920
sl@0
  2921
	asm("polynomial1: ");
sl@0
  2922
	asm("stmfd sp!, {r4,r5,r6} ");		// save value of aX
sl@0
  2923
	asm("bl TRealXMultiply ");			// r *= aX
sl@0
  2924
	asm("mov r4, r1 ");					// move result into r4,r5,r6
sl@0
  2925
	asm("mov r5, r2 ");
sl@0
  2926
	asm("mov r6, r3 ");
sl@0
  2927
	asm("ldmdb r11!, {r1,r2} ");		// next coefficient into r1,r2
sl@0
  2928
	asm("bl ConvertTReal64ToTRealX ");	// convert to TRealX in r1,r2,r3
sl@0
  2929
	asm("bl TRealXAdd ");				// r += *--pR
sl@0
  2930
	asm("ldmfd sp!, {r4,r5,r6} ");		// aX back into r4,r5,r6
sl@0
  2931
	asm("subs r10, r10, #1 ");			// iterate until all coefficients processed
sl@0
  2932
	asm("bne polynomial1 ");
sl@0
  2933
sl@0
  2934
	asm("polynomial0: ");				// result now in r1,r2,r3
sl@0
  2935
	asm("bl ConvertTRealXToTReal64 ");	// convert back to TReal64
sl@0
  2936
	__POPRET("r4-r11,");
sl@0
  2937
	}
sl@0
  2938
sl@0
  2939
sl@0
  2940
sl@0
  2941
sl@0
  2942
__NAKED__ EXPORT_C void Math::PolyX(TRealX& /*aY*/,const TRealX& /*aX*/,TInt /*aDeg*/,const TRealX* /*aCoef*/)
sl@0
  2943
/**
sl@0
  2944
Evaluates the polynomial:
sl@0
  2945
{a[n]X^n + a[n-1]X^(n-1) + ... + a[2]X^2 + a[1]X^1 + a[0]}.
sl@0
  2946
sl@0
  2947
@param aY      A reference containing the result. 
sl@0
  2948
@param aX      The value of the x-variable. 
sl@0
  2949
@param aDeg    The degree of the polynomial (the highest power of x
sl@0
  2950
               which is present).
sl@0
  2951
@param aCoef   A pointer to a contiguous set of TRealX values containing
sl@0
  2952
               the coefficients.
sl@0
  2953
               They must be in the order: a[0], a[1], ..., a[n-1], a[n].
sl@0
  2954
*/
sl@0
  2955
//
sl@0
  2956
// Evaluate a polynomial with TRealX argument, coefficients and result
sl@0
  2957
//
sl@0
  2958
	{
sl@0
  2959
	// On entry r0=&aY, r1=&aX, r2=aDeg, r3=aCoef
sl@0
  2960
	asm("stmfd sp!, {r0,r4-r11,lr} ");
sl@0
  2961
	asm("add r11, r3, r2, lsl #3 ");	// r11=address of last coefficient
sl@0
  2962
	asm("add r11, r11, r2, lsl #2 ");
sl@0
  2963
	asm("mov r9, r1 ");					// r9=address of argument
sl@0
  2964
	asm("movs r10, r2 ");				// r10=number of coefficients-1
sl@0
  2965
	asm("ldmia r11, {r1,r2,r3} ");		// last coefficient into r1,r2,r3
sl@0
  2966
	asm("beq polyx0 ");					// if no more coefficients, exit
sl@0
  2967
sl@0
  2968
	asm("polyx1: ");
sl@0
  2969
	asm("ldmia r9, {r4,r5,r6} ");		// aX into r4,r5,r6
sl@0
  2970
	asm("bl TRealXMultiply ");			// result *= aX
sl@0
  2971
	asm("ldmdb r11!, {r4,r5,r6} ");		// next coefficient into r4,r5,r6
sl@0
  2972
	asm("bl TRealXAdd ");				// result += next coeff
sl@0
  2973
	asm("subs r10, r10, #1 ");			// iterate until all coefficients processed
sl@0
  2974
	asm("bne polyx1 ");
sl@0
  2975
sl@0
  2976
	asm("polyx0: ");					// result now in r1,r2,r3
sl@0
  2977
	asm("ldmfd sp!, {r0,r4-r11,lr} ");	// restore registers, including destination address in r0
sl@0
  2978
	asm("stmia r0, {r1,r2,r3} ");		// store result
sl@0
  2979
	__JUMP(,lr);
sl@0
  2980
	}
sl@0
  2981
sl@0
  2982
sl@0
  2983
sl@0
  2984
sl@0
  2985
#ifndef __USE_VFP_MATH
sl@0
  2986
__NAKED__ EXPORT_C TInt Math::Int(TReal& /*aTrg*/, const TReal& /*aSrc*/)
sl@0
  2987
/**
sl@0
  2988
Calculates the integer part of a number.
sl@0
  2989
sl@0
  2990
The integer part is that before a decimal point.
sl@0
  2991
Truncation is toward zero, so that
sl@0
  2992
int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
sl@0
  2993
sl@0
  2994
sl@0
  2995
@param aTrg A reference containing the result. 
sl@0
  2996
@param aSrc The number whose integer part is required. 
sl@0
  2997
sl@0
  2998
@return KErrNone if successful, otherwise another of
sl@0
  2999
        the system-wide error codes. 
sl@0
  3000
*/
sl@0
  3001
//
sl@0
  3002
// Write the integer part of aSrc to the TReal at aTrg
sl@0
  3003
// Negative numbers are rounded towards zero.
sl@0
  3004
//
sl@0
  3005
	{
sl@0
  3006
	// r0=&aTrg, r1=&aSrc, return value in r0
sl@0
  3007
	asm("stmfd sp!, {lr} ");
sl@0
  3008
	asm("mov r12, r0 ");				// r12=&aTrg
sl@0
  3009
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3010
	asm("ldmia r1, {r0,r1} ");			// input value into r0,r1
sl@0
  3011
#else
sl@0
  3012
	asm("ldr r0, [r1, #4] ");
sl@0
  3013
	asm("ldr r1, [r1, #0] ");
sl@0
  3014
#endif
sl@0
  3015
	asm("bl TReal64Int ");
sl@0
  3016
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3017
	asm("stmia r12, {r0,r1} ");			// store result
sl@0
  3018
#else
sl@0
  3019
	asm("str r0, [r12, #4] ");
sl@0
  3020
	asm("str r1, [r12, #0] ");
sl@0
  3021
#endif
sl@0
  3022
	asm("bic r0, r0, #0x80000000 ");	// remove sign bit
sl@0
  3023
	asm("cmn r0, #0x00100000 ");		// check for NaN or infinity
sl@0
  3024
	asm("movpl r0, #0 ");				// if neither, return KErrNone
sl@0
  3025
	asm("bpl math_int_0 ");
sl@0
  3026
	asm("movs r0, r0, lsl #12 ");		// check for infinity
sl@0
  3027
	asm("cmpeq r1, #0 ");
sl@0
  3028
	asm("mvneq r0, #8 ");				// if infinity return KErrOverflow
sl@0
  3029
	asm("mvnne r0, #5 ");				// else return KErrArgument
sl@0
  3030
	asm("math_int_0: ");
sl@0
  3031
	__POPRET("");
sl@0
  3032
sl@0
  3033
	// Take integer part of TReal64 in r0,r1
sl@0
  3034
	// Infinity and NaNs are unaffected
sl@0
  3035
	// r0-r3 modified
sl@0
  3036
	asm("TReal64Int: ");
sl@0
  3037
	asm("mov r2, r0, lsr #20 ");
sl@0
  3038
	asm("bic r2, r2, #0x800 ");			// r2=exponent
sl@0
  3039
	asm("mov r3, #0x300 ");
sl@0
  3040
	asm("orr r3, r3, #0xFF ");			// r3=0x3FF
sl@0
  3041
	asm("subs r2, r2, r3 ");			// r2=exponent-3FF=number of integer bits-1
sl@0
  3042
	asm("ble TReal64Int1 ");			// branch if <=1 integer bits
sl@0
  3043
	asm("cmp r2, #52 ");
sl@0
  3044
	__JUMP(ge,lr);
sl@0
  3045
	asm("cmp r2, #20 ");
sl@0
  3046
	asm("bgt TReal64Int2 ");			// jump if >21 integer bits (r0 will be unaffected)
sl@0
  3047
	asm("rsb r2, r2, #20 ");			// r2=number of bits to clear at bottom end of r0
sl@0
  3048
	asm("mov r0, r0, lsr r2 ");			// clear them
sl@0
  3049
	asm("mov r0, r0, lsl r2 ");
sl@0
  3050
	asm("mov r1, #0 ");					// clear r1
sl@0
  3051
	__JUMP(,lr);
sl@0
  3052
	asm("TReal64Int2: ");
sl@0
  3053
	asm("rsb r2, r2, #52 ");			// r2=number of bits to clear at bottom end of r1
sl@0
  3054
	asm("mov r1, r1, lsr r2 ");			// clear them
sl@0
  3055
	asm("mov r1, r1, lsl r2 ");
sl@0
  3056
	__JUMP(,lr);
sl@0
  3057
	asm("TReal64Int1: ");				// result is either 0 or 1
sl@0
  3058
	asm("mov r1, #0 ");					// lower mantissa bits of result will be zero
sl@0
  3059
	asm("moveq r0, r0, lsr #20 ");		// if result is 1, clear mantissa but leave exponent
sl@0
  3060
	asm("moveq r0, r0, lsl #20 ");
sl@0
  3061
	asm("andlt r0, r0, #0x80000000 ");	// if result is 0, clear mantissa and exponent
sl@0
  3062
	__JUMP(,lr);
sl@0
  3063
	}
sl@0
  3064
sl@0
  3065
sl@0
  3066
sl@0
  3067
sl@0
  3068
__NAKED__ EXPORT_C TInt Math::Int(TInt16& /*aTrg*/, const TReal& /*aSrc*/)
sl@0
  3069
/**
sl@0
  3070
Calculates the integer part of a number.
sl@0
  3071
sl@0
  3072
The integer part is that before a decimal point.
sl@0
  3073
Truncation is toward zero, so that:
sl@0
  3074
int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
sl@0
  3075
sl@0
  3076
This function is suitable when the result is known to be small enough
sl@0
  3077
for a 16-bit signed integer.
sl@0
  3078
sl@0
  3079
@param aTrg A reference containing the result. 
sl@0
  3080
@param aSrc The number whose integer part is required. 
sl@0
  3081
sl@0
  3082
@return KErrNone if successful, otherwise another of
sl@0
  3083
        the system-wide error codes. 
sl@0
  3084
*/
sl@0
  3085
//
sl@0
  3086
// If the integer part of aSrc is in the range -32768 to +32767
sl@0
  3087
// inclusive, write the integer part to the TInt16 at aTrg
sl@0
  3088
// Negative numbers are rounded towards zero.
sl@0
  3089
// If an overflow or underflow occurs, aTrg is set to the max/min value
sl@0
  3090
//
sl@0
  3091
	{
sl@0
  3092
	// r0=&aTrg, r1=&aSrc
sl@0
  3093
	asm("stmfd sp!, {lr} ");
sl@0
  3094
	asm("mov r3, r0 ");					// r3=&aTrg
sl@0
  3095
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3096
	asm("ldmia r1, {r0,r1} ");			// input value into r0,r1
sl@0
  3097
#else
sl@0
  3098
	asm("ldr r0, [r1, #4] ");
sl@0
  3099
	asm("ldr r1, [r1, #0] ");
sl@0
  3100
#endif
sl@0
  3101
	asm("bl TReal64GetTInt ");			// do the conversion
sl@0
  3102
	asm("cmp r0, #0x8000 ");			// limit answer to TInt16 range
sl@0
  3103
	asm("movge r0, #0x7F00 ");
sl@0
  3104
	asm("orrge r0, r0, #0xFF ");
sl@0
  3105
	asm("mvnge r12, #8 ");				// set error code if limiting occurred
sl@0
  3106
	asm("cmn r0, #0x8000 ");
sl@0
  3107
	asm("movlt r0, #0x8000 ");
sl@0
  3108
	asm("mvnlt r12, #9 ");				// set error code if limiting occurred
sl@0
  3109
	asm("mov r1, r0, lsr #8 ");			// top byte of answer into r1
sl@0
  3110
	asm("strb r0, [r3] ");				// store result in aTrg
sl@0
  3111
	asm("strb r1, [r3, #1] ");
sl@0
  3112
	asm("mov r0, r12 ");				// return error code in r0
sl@0
  3113
	__POPRET("");
sl@0
  3114
	} 
sl@0
  3115
sl@0
  3116
sl@0
  3117
sl@0
  3118
__NAKED__ EXPORT_C TInt Math::Int(TInt32& /*aTrg*/, const TReal& /*aSrc*/)
sl@0
  3119
/**
sl@0
  3120
Calculates the integer part of a number.
sl@0
  3121
sl@0
  3122
The integer part is that before a decimal point.
sl@0
  3123
Truncation is toward zero, so that
sl@0
  3124
int(2.4)=2, int(2)=2, int(-1)=-1, int(-1.4)=-1, int(-1.999)=-1.
sl@0
  3125
sl@0
  3126
This function is suitable when the result is known to be small enough
sl@0
  3127
for a 32-bit signed integer.
sl@0
  3128
sl@0
  3129
@param aTrg A reference containing the result. 
sl@0
  3130
@param aSrc The number whose integer part is required.
sl@0
  3131
sl@0
  3132
@return KErrNone if successful, otherwise another of
sl@0
  3133
        the system-wide error codes.
sl@0
  3134
*/
sl@0
  3135
//													 
sl@0
  3136
// If the integer part of the float is in the range -2147483648 to +2147483647
sl@0
  3137
// inclusive, write the integer part to the TInt32 at aTrg
sl@0
  3138
// Negative numbers are rounded towards zero.
sl@0
  3139
// If an overflow or underflow occurs, aTrg is set to the max/min value
sl@0
  3140
//
sl@0
  3141
	{
sl@0
  3142
	// r0=&aTrg, r1=&aSrc
sl@0
  3143
	asm("stmfd sp!, {lr} ");
sl@0
  3144
	asm("mov r3, r0 ");					// r3=&aTrg
sl@0
  3145
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3146
	asm("ldmia r1, {r0,r1} ");			// input value into r0,r1
sl@0
  3147
#else
sl@0
  3148
	asm("ldr r0, [r1, #4] ");
sl@0
  3149
	asm("ldr r1, [r1, #0] ");
sl@0
  3150
#endif
sl@0
  3151
	asm("bl TReal64GetTInt ");			// do the conversion
sl@0
  3152
	asm("str r0, [r3] ");				// store result in aTrg
sl@0
  3153
	asm("mov r0, r12 ");				// return error code in r0
sl@0
  3154
	__POPRET("");
sl@0
  3155
sl@0
  3156
	//  Convert double in r0,r1 to int in r0
sl@0
  3157
	//	Return error code in r12
sl@0
  3158
	//	Registers r0,r1,r2,r12 modified
sl@0
  3159
	asm("TReal64GetTInt: ");
sl@0
  3160
	asm("mov r2, r0, lsr #20 ");
sl@0
  3161
	asm("bic r2, r2, #0x800 ");			// r1=exponent
sl@0
  3162
	asm("add r12, r2, #1 ");
sl@0
  3163
	asm("cmp r12, #0x800 ");			// check for NaN
sl@0
  3164
	asm("bne TReal64GetTInt1 ");
sl@0
  3165
	asm("movs r12, r0, lsl #12 ");		// exponent=FF, check mantissa
sl@0
  3166
	asm("cmpeq r1, #0 ");
sl@0
  3167
	asm("movne r0, #0 ");				// if non-zero, input is a NaN so return 0
sl@0
  3168
	asm("mvnne r12, #5 ");				// and return KErrArgument
sl@0
  3169
	__JUMP(ne,lr);
sl@0
  3170
	asm("TReal64GetTInt1: ");
sl@0
  3171
	asm("mov r12, #0x400 ");
sl@0
  3172
	asm("orr r12, r12, #0x1E ");		// r12=0x41E (exponent of 2^31)
sl@0
  3173
	asm("subs r2, r12, r2 ");			// r2=number of shifts to produce integer
sl@0
  3174
	asm("mov r12, #0 ");				// set return code to KErrNone
sl@0
  3175
	asm("ble TReal64GetTInt2 ");		// if <=0, saturate result
sl@0
  3176
	asm("cmp r2, #31 ");				// check if more than 31 shifts needed
sl@0
  3177
	asm("movhi r0, #0 ");				// if so, underflow result to 0
sl@0
  3178
	__JUMP(hi,lr);
sl@0
  3179
	asm("cmp r0, #0 ");					// check sign bit
sl@0
  3180
	asm("orr r0, r0, #0x00100000 ");	// set implicit integer bit
sl@0
  3181
	asm("mov r0, r0, lsl #11 ");		// shift mantissa up so MSB is in MSB of r0
sl@0
  3182
	asm("orr r0, r0, r1, lsr #21 ");	// put in bits from r1
sl@0
  3183
	asm("mov r0, r0, lsr r2 ");			// r0=absolute integer
sl@0
  3184
	asm("rsbmi r0, r0, #0 ");			// if negative, negate
sl@0
  3185
	__JUMP(,lr);
sl@0
  3186
	asm("TReal64GetTInt2: ");
sl@0
  3187
	asm("blt TReal64GetTInt3 ");		// if exponent>0x41E, definitely an overflow
sl@0
  3188
	asm("cmp r0, #0 ");					// check sign bit
sl@0
  3189
	asm("bpl TReal64GetTInt3 ");		// if positive, definitely an overflow
sl@0
  3190
	asm("orr r0, r0, #0x00100000 ");	// set implicit integer bit
sl@0
  3191
	asm("mov r0, r0, lsl #11 ");		// shift mantissa up so MSB is in MSB of r0
sl@0
  3192
	asm("orr r0, r0, r1, lsr #21 ");	// put in bits from r1
sl@0
  3193
	asm("cmp r0, #0x80000000 ");		// check if value is = -2^31
sl@0
  3194
	__JUMP(eq,lr);
sl@0
  3195
	asm("TReal64GetTInt3: ");
sl@0
  3196
	asm("cmp r0, #0 ");					// check sign
sl@0
  3197
	asm("mov r0, #0x80000000 ");
sl@0
  3198
	asm("subpl r0, r0, #1 ");			// if -ve return 80000000, if +ve return 7FFFFFFF
sl@0
  3199
	asm("mvnpl r12, #8 ");				// if +ve return KErrOverflow
sl@0
  3200
	asm("mvnmi r12, #9 ");				// if -ve return KErrUnderflow
sl@0
  3201
	__JUMP(,lr);
sl@0
  3202
	}
sl@0
  3203
#endif // __USE_VFP_MATH
sl@0
  3204
sl@0
  3205
sl@0
  3206
sl@0
  3207
sl@0
  3208
__NAKED__ EXPORT_C TBool Math::IsZero(const TReal& /*aVal*/)
sl@0
  3209
/**
sl@0
  3210
Determines whether a value is zero.
sl@0
  3211
sl@0
  3212
@param aVal A reference to the value to be checked. 
sl@0
  3213
sl@0
  3214
@return True, if aVal is zero; false, otherwise.
sl@0
  3215
*/
sl@0
  3216
	{
sl@0
  3217
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3218
	asm("ldmia r0, {r1,r2} ");			// input value into r0,r1
sl@0
  3219
#else
sl@0
  3220
	asm("ldr r2, [r0, #0] ");
sl@0
  3221
	asm("ldr r1, [r0, #4] ");
sl@0
  3222
#endif
sl@0
  3223
	asm("TReal64IsZero: ");
sl@0
  3224
	asm("mov r0, #0 ");					// default return value is 0
sl@0
  3225
	asm("bics r1, r1, #0x80000000 ");	// remove sign bit
sl@0
  3226
	asm("cmpeq r2, #0 ");				// and check both exponent and mantissa are zero
sl@0
  3227
	asm("moveq r0, #1 ");				// return 1 if zero
sl@0
  3228
	__JUMP(,lr);
sl@0
  3229
	}
sl@0
  3230
sl@0
  3231
sl@0
  3232
sl@0
  3233
sl@0
  3234
__NAKED__ EXPORT_C TBool Math::IsNaN(const TReal& /*aVal*/)
sl@0
  3235
/**
sl@0
  3236
Determines whether a value is not a number.
sl@0
  3237
sl@0
  3238
@param aVal A reference to the value to be checked. 
sl@0
  3239
sl@0
  3240
@return True, if aVal is not a number; false, otherwise.
sl@0
  3241
*/
sl@0
  3242
	{
sl@0
  3243
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3244
	asm("ldmia r0, {r1,r2} ");			// input value into r0,r1
sl@0
  3245
#else
sl@0
  3246
	asm("ldr r2, [r0, #0] ");
sl@0
  3247
	asm("ldr r1, [r0, #4] ");
sl@0
  3248
#endif
sl@0
  3249
	asm("TReal64IsNaN: ");
sl@0
  3250
	asm("mov r0, #0 ");					// default return value is 0
sl@0
  3251
	asm("bic r1, r1, #0x80000000 ");	// remove sign bit
sl@0
  3252
	asm("cmn r1, #0x00100000 ");		// check if exponent=7FF
sl@0
  3253
	__JUMP(pl,lr);
sl@0
  3254
	asm("movs r1, r1, lsl #12 ");		// exponent=7FF, check mantissa
sl@0
  3255
	asm("cmpeq r2, #0 ");
sl@0
  3256
	asm("movne r0, #1 ");				// if mantissa nonzero, return 1
sl@0
  3257
	__JUMP(,lr);
sl@0
  3258
	}
sl@0
  3259
sl@0
  3260
sl@0
  3261
sl@0
  3262
sl@0
  3263
__NAKED__ EXPORT_C TBool Math::IsInfinite(const TReal& /*aVal*/)
sl@0
  3264
/**
sl@0
  3265
Determines whether a value is infinite.
sl@0
  3266
sl@0
  3267
@param aVal A reference to the value to be checked.
sl@0
  3268
sl@0
  3269
@return True, if aVal is infinite; false, otherwise.
sl@0
  3270
*/
sl@0
  3271
	{
sl@0
  3272
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3273
	asm("ldmia r0, {r1,r2} ");			// input value into r0,r1
sl@0
  3274
#else
sl@0
  3275
	asm("ldr r2, [r0, #0] ");
sl@0
  3276
	asm("ldr r1, [r0, #4] ");
sl@0
  3277
#endif
sl@0
  3278
	asm("TReal64IsInfinite: ");
sl@0
  3279
	asm("mov r0, #0 ");					// default return value is 0
sl@0
  3280
	asm("mov r3, #0x00200000 ");		// r3 == - (0x7ff00000 << 1)
sl@0
  3281
	asm("cmp r2, #0 ");
sl@0
  3282
	asm("cmneq r3, r1, lsl #1 ");		// check exp=7FF && mant=0
sl@0
  3283
	asm("moveq r0, #1 ");				// if so, return 1
sl@0
  3284
	__JUMP(,lr);
sl@0
  3285
	}
sl@0
  3286
sl@0
  3287
sl@0
  3288
sl@0
  3289
sl@0
  3290
__NAKED__ EXPORT_C TBool Math::IsFinite(const TReal& /*aVal*/)
sl@0
  3291
/**
sl@0
  3292
Determines whether a value is finite.
sl@0
  3293
sl@0
  3294
In this context, a value is finite if it is a valid number and
sl@0
  3295
is not infinite.
sl@0
  3296
sl@0
  3297
@param aVal A reference to the value to be checked.
sl@0
  3298
sl@0
  3299
@return True, if aVal is finite; false, otherwise.
sl@0
  3300
*/
sl@0
  3301
	{
sl@0
  3302
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3303
	asm("ldr r1, [r0, #0] ");			// only need exponent - get it into r0
sl@0
  3304
#else
sl@0
  3305
	asm("ldr r1, [r0, #4] ");			// only need exponent - get it into r0
sl@0
  3306
#endif
sl@0
  3307
	asm("TReal64IsFinite: ");
sl@0
  3308
	asm("mov r0, #0 ");					// default return value is 0
sl@0
  3309
	asm("bic r1, r1, #0x80000000 ");	// remove sign bit
sl@0
  3310
	asm("cmn r1, #0x00100000 ");		// check if exponent=7FF
sl@0
  3311
	asm("movpl r0, #1 ");				// else return 1
sl@0
  3312
	__JUMP(,lr);
sl@0
  3313
	}
sl@0
  3314
sl@0
  3315
sl@0
  3316
sl@0
  3317
sl@0
  3318
__NAKED__ EXPORT_C void Math::SetZero(TReal& /*aVal*/, TInt /*aSign*/)
sl@0
  3319
//
sl@0
  3320
// Constructs zeros, assuming default sign is positive
sl@0
  3321
//
sl@0
  3322
	{
sl@0
  3323
	asm("cmp r1, #0 ");					// test aSign
sl@0
  3324
	asm("movne r1, #0x80000000 ");		// if nonzero, set sign bit
sl@0
  3325
	asm("mov r2, #0 ");					// mantissa=0
sl@0
  3326
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3327
	asm("stmia r0, {r1,r2} ");
sl@0
  3328
#else
sl@0
  3329
	asm("str r2, [r0, #0] ");
sl@0
  3330
	asm("str r1, [r0, #4] ");
sl@0
  3331
#endif
sl@0
  3332
	__JUMP(,lr);
sl@0
  3333
	}
sl@0
  3334
sl@0
  3335
sl@0
  3336
sl@0
  3337
sl@0
  3338
__NAKED__ EXPORT_C void Math::SetNaN(TReal& /*aVal*/)
sl@0
  3339
//
sl@0
  3340
// Constructs NaN (+ve sign for Java)
sl@0
  3341
//
sl@0
  3342
	{
sl@0
  3343
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3344
	asm("mvn r1, #0x80000000 ");		// r1=7FFFFFFF
sl@0
  3345
	asm("mvn r2, #0 ");					// r2=FFFFFFFF
sl@0
  3346
#else
sl@0
  3347
	asm("mvn r2, #0x80000000 ");		// r2=7FFFFFFF
sl@0
  3348
	asm("mvn r1, #0 ");					// r1=FFFFFFFF
sl@0
  3349
#endif
sl@0
  3350
	asm("stmia r0, {r1,r2} ");
sl@0
  3351
	__JUMP(,lr);
sl@0
  3352
	}
sl@0
  3353
sl@0
  3354
sl@0
  3355
sl@0
  3356
sl@0
  3357
__NAKED__ EXPORT_C void Math::SetInfinite(TReal& /*aVal*/, TInt /*aSign*/)
sl@0
  3358
//
sl@0
  3359
// Constructs infinities
sl@0
  3360
//
sl@0
  3361
	{
sl@0
  3362
	asm("cmp r1, #0 ");					// test aSign
sl@0
  3363
	asm("movne r1, #0x80000000 ");		// if nonzero, set sign bit
sl@0
  3364
	asm("orr r1, r1, #0x70000000 ");	// set exponent to 7FF
sl@0
  3365
	asm("orr r1, r1, #0x0FF00000 ");
sl@0
  3366
	asm("mov r2, #0 ");					// mantissa=0
sl@0
  3367
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3368
	asm("stmia r0, {r1,r2} ");
sl@0
  3369
#else
sl@0
  3370
	asm("str r2, [r0, #0] ");
sl@0
  3371
	asm("str r1, [r0, #4] ");
sl@0
  3372
#endif
sl@0
  3373
	__JUMP(,lr);
sl@0
  3374
	}
sl@0
  3375
sl@0
  3376
sl@0
  3377
sl@0
  3378
#ifndef __USE_VFP_MATH
sl@0
  3379
__NAKED__ EXPORT_C TInt Math::Frac(TReal& /*aTrg*/, const TReal& /*aSrc*/)
sl@0
  3380
/**
sl@0
  3381
Calculates the fractional part of a number.
sl@0
  3382
sl@0
  3383
The fractional part is that after a decimal point.
sl@0
  3384
Truncation is toward zero, so that
sl@0
  3385
Frac(2.4)=0.4, Frac(2)=0, Frac(-1)=0, Frac(-1.4)=0.4.
sl@0
  3386
sl@0
  3387
@param aTrg A reference containing the result.
sl@0
  3388
@param aSrc The number whose fractional part is required. 
sl@0
  3389
sl@0
  3390
@return KErrNone if successful, otherwise another of
sl@0
  3391
        the system-wide error codes.
sl@0
  3392
*/
sl@0
  3393
	{
sl@0
  3394
	// on entry r0=aTrg, r1=&Src
sl@0
  3395
	// on exit r0=return code
sl@0
  3396
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3397
	asm("ldmia r1, {r1,r2} ");		// r1,r2=aSrc
sl@0
  3398
#else
sl@0
  3399
	asm("ldr r2, [r1, #0] ");
sl@0
  3400
	asm("ldr r1, [r1, #4] ");
sl@0
  3401
#endif
sl@0
  3402
	asm("and r3, r1, #0x80000000 ");
sl@0
  3403
	asm("str r3, [sp, #-4]! ");		// save sign
sl@0
  3404
	asm("mov r3, r1, lsr #20 ");
sl@0
  3405
	asm("bic r3, r3, #0x800 ");		// r3=exponent of aSrc
sl@0
  3406
	asm("mov r12, #0x300 ");
sl@0
  3407
	asm("orr r12, r12, #0xFE ");	// r12=0x3FE
sl@0
  3408
	asm("subs r3, r3, r12 ");		// r3=exponent of aSrc-0x3FE=number of integer bits
sl@0
  3409
	asm("ble MathFrac0 ");			// if <=0, return aSrc unaltered
sl@0
  3410
	asm("cmp r3, #53 ");
sl@0
  3411
	asm("bge MathFrac1 ");			// if >=53 integer bits, there is no fractional part
sl@0
  3412
	asm("mov r1, r1, lsl #11 ");	// left-justify mantissa in r1,r2
sl@0
  3413
	asm("orr r1, r1, r2, lsr #21 ");
sl@0
  3414
	asm("mov r2, r2, lsl #11 ");
sl@0
  3415
	asm("cmp r3, #32 ");			// check for >=32 integer bits
sl@0
  3416
	asm("bge MathFrac2 ");
sl@0
  3417
	asm("rsb r12, r3, #32 ");
sl@0
  3418
	asm("mov r1, r1, lsl r3 ");		// shift mantissa left by number of integer bits
sl@0
  3419
	asm("orrs r1, r1, r2, lsr r12 ");
sl@0
  3420
	asm("mov r2, r2, lsl r3 ");
sl@0
  3421
	asm("mov r3, #0x300 ");			// r3 holds exponent = 0x3FE initially
sl@0
  3422
	asm("orr r3, r3, #0xFE ");
sl@0
  3423
	asm("beq MathFrac3 ");			// branch if >=32 shifts to normalise
sl@0
  3424
#ifdef __CPU_ARM_HAS_CLZ
sl@0
  3425
	CLZ(12,1);
sl@0
  3426
	asm("mov r1, r1, lsl r12 ");
sl@0
  3427
	asm("rsb r12, r12, #32 ");
sl@0
  3428
	asm("orr r1, r1, r2, lsr r12 ");
sl@0
  3429
	asm("rsb r12, r12, #32 ");
sl@0
  3430
#else
sl@0
  3431
	asm("mov r12, #32 ");			// else r12=32-number of shifts needed
sl@0
  3432
	asm("cmp r1, #0x10000 ");		// calculate shift count
sl@0
  3433
	asm("movcc r1, r1, lsl #16 ");
sl@0
  3434
	asm("subcc r12, r12, #16 ");
sl@0
  3435
	asm("cmp r1, #0x1000000 ");
sl@0
  3436
	asm("movcc r1, r1, lsl #8 ");
sl@0
  3437
	asm("subcc r12, r12, #8 ");
sl@0
  3438
	asm("cmp r1, #0x10000000 ");
sl@0
  3439
	asm("movcc r1, r1, lsl #4 ");
sl@0
  3440
	asm("subcc r12, r12, #4 ");
sl@0
  3441
	asm("cmp r1, #0x40000000 ");
sl@0
  3442
	asm("movcc r1, r1, lsl #2 ");
sl@0
  3443
	asm("subcc r12, r12, #2 ");
sl@0
  3444
	asm("cmp r1, #0x80000000 ");
sl@0
  3445
	asm("movcc r1, r1, lsl #1 ");
sl@0
  3446
	asm("subcc r12, r12, #1 ");
sl@0
  3447
	asm("orr r1, r1, r2, lsr r12 ");	// normalise
sl@0
  3448
	asm("rsb r12, r12, #32 ");			// r12=shift count
sl@0
  3449
#endif
sl@0
  3450
	asm("mov r2, r2, lsl r12 ");
sl@0
  3451
	asm("sub r3, r3, r12 ");			// exponent-=shift count
sl@0
  3452
	asm("b MathFrac4 ");				// branch to assemble and store result
sl@0
  3453
sl@0
  3454
	// come here if >=32 shifts to normalise
sl@0
  3455
	asm("MathFrac3: ");
sl@0
  3456
	asm("sub r3, r3, #32 ");		// decrement exponent by 32
sl@0
  3457
	asm("movs r1, r2 ");			// shift left by 32, set Z if result zero
sl@0
  3458
	asm("mov r2, #0 ");
sl@0
  3459
	asm("bne MathFrac6 ");			// if result nonzero, normalise
sl@0
  3460
	asm("beq MathFrac5 ");			// branch if result zero
sl@0
  3461
sl@0
  3462
	// come here if >=32 integer bits
sl@0
  3463
	asm("MathFrac2: ");
sl@0
  3464
	asm("sub r3, r3, #32 ");
sl@0
  3465
	asm("movs r1, r2, lsl r3 ");	// shift left by number of integer bits, set Z if result zero
sl@0
  3466
	asm("mov r2, #0 ");
sl@0
  3467
	asm("mov r3, #0x300 ");			// r3 holds exponent = 0x3FE initially
sl@0
  3468
	asm("orr r3, r3, #0xFE ");
sl@0
  3469
	asm("beq MathFrac5 ");			// branch if result zero
sl@0
  3470
	asm("MathFrac6: ");
sl@0
  3471
	asm("cmp r1, #0x10000 ");		// else normalise
sl@0
  3472
	asm("movcc r1, r1, lsl #16 ");
sl@0
  3473
	asm("subcc r3, r3, #16 ");
sl@0
  3474
	asm("cmp r1, #0x1000000 ");
sl@0
  3475
	asm("movcc r1, r1, lsl #8 ");
sl@0
  3476
	asm("subcc r3, r3, #8 ");
sl@0
  3477
	asm("cmp r1, #0x10000000 ");
sl@0
  3478
	asm("movcc r1, r1, lsl #4 ");
sl@0
  3479
	asm("subcc r3, r3, #4 ");
sl@0
  3480
	asm("cmp r1, #0x40000000 ");
sl@0
  3481
	asm("movcc r1, r1, lsl #2 ");
sl@0
  3482
	asm("subcc r3, r3, #2 ");
sl@0
  3483
	asm("cmp r1, #0x80000000 ");
sl@0
  3484
	asm("movcc r1, r1, lsl #1 ");
sl@0
  3485
	asm("subcc r3, r3, #1 ");
sl@0
  3486
sl@0
  3487
	// come here to assemble and store result
sl@0
  3488
	asm("MathFrac4: ");
sl@0
  3489
	asm("bic r1, r1, #0x80000000 ");	// remove integer bit
sl@0
  3490
	asm("mov r2, r2, lsr #11 ");		// shift mantissa right by 11
sl@0
  3491
	asm("orr r2, r2, r1, lsl #21 ");
sl@0
  3492
	asm("mov r1, r1, lsr #11 ");
sl@0
  3493
	asm("ldr r12, [sp] ");
sl@0
  3494
	asm("orr r1, r1, r3, lsl #20 ");	// exponent into r1 bits 20-30
sl@0
  3495
	asm("orr r1, r1, r12 ");			// sign bit into r1 bit 31
sl@0
  3496
sl@0
  3497
	// come here to return source unaltered
sl@0
  3498
	asm("MathFrac0: ");
sl@0
  3499
	asm("add sp, sp, #4 ");
sl@0
  3500
	asm("MathFrac_ok: ");
sl@0
  3501
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3502
	asm("stmia r0, {r1,r2} ");			// store result
sl@0
  3503
#else
sl@0
  3504
	asm("str r2, [r0, #0] ");
sl@0
  3505
	asm("str r1, [r0, #4] ");
sl@0
  3506
#endif
sl@0
  3507
	asm("mov r0, #0 ");					// return KErrNone
sl@0
  3508
	__JUMP(,lr);
sl@0
  3509
sl@0
  3510
	// come here if infinity, NaN or >=53 integer bits
sl@0
  3511
	asm("MathFrac1: ");
sl@0
  3512
	asm("cmp r3, #0x400 ");				// check for infinity/NaN
sl@0
  3513
	asm("bhi MathFrac7 ");				// branch if so
sl@0
  3514
sl@0
  3515
	// come here to return zero
sl@0
  3516
	asm("MathFrac5: ");
sl@0
  3517
	asm("ldr r1, [sp], #4 ");			// r1 bit 31=sign, rest zero
sl@0
  3518
	asm("mov r2, #0 ");
sl@0
  3519
	asm("b MathFrac_ok ");
sl@0
  3520
sl@0
  3521
	// come here if infinity/NaN
sl@0
  3522
	asm("MathFrac7: ");
sl@0
  3523
	asm("movs r12, r1, lsl #12 ");		// check for infinity
sl@0
  3524
	asm("cmpeq r2, #0 ");
sl@0
  3525
	asm("bne MathFrac8 ");				// branch if NaN
sl@0
  3526
	asm("ldr r1, [sp], #4 ");			// r1 bit 31=sign, rest zero
sl@0
  3527
	asm("mov r2, #0 ");
sl@0
  3528
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3529
	asm("stmia r0, {r1,r2} ");			// store zero result
sl@0
  3530
#else
sl@0
  3531
	asm("str r2, [r0, #0] ");
sl@0
  3532
	asm("str r1, [r0, #4] ");
sl@0
  3533
#endif
sl@0
  3534
	asm("mvn r0, #8 ");					// return KErrOverflow
sl@0
  3535
	__JUMP(,lr);
sl@0
  3536
	asm("MathFrac8: ");					// NaN
sl@0
  3537
	asm("add sp, sp, #4 ");
sl@0
  3538
#ifdef __DOUBLE_WORDS_SWAPPED__
sl@0
  3539
	asm("stmia r0, {r1,r2} ");			// store NaN unchanged
sl@0
  3540
#else
sl@0
  3541
	asm("str r2, [r0, #0] ");
sl@0
  3542
	asm("str r1, [r0, #4] ");
sl@0
  3543
#endif
sl@0
  3544
	asm("mvn r0, #5 ");					// return KErrArgument
sl@0
  3545
	__JUMP(,lr);
sl@0
  3546
	}
sl@0
  3547
#endif // __USE_VFP_MATH
sl@0
  3548
#endif
sl@0
  3549
sl@0
  3550
#ifdef __REALS_MACHINE_CODED__
sl@0
  3551
#ifndef __ARMCC__
sl@0
  3552
extern "C" {
sl@0
  3553
sl@0
  3554
extern "C" void __math_exception(TInt aErrType);
sl@0
  3555
__NAKED__ EXPORT_C TReal32 __addsf3(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3556
//
sl@0
  3557
// Add two floats
sl@0
  3558
//
sl@0
  3559
    {
sl@0
  3560
	// a1 is in r0, a2 in r1 on entry; return with answer in r0
sl@0
  3561
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
  3562
	asm("bl ConvertTReal32ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3563
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3564
	asm("mov r5, r2 ");
sl@0
  3565
	asm("mov r6, r3 ");
sl@0
  3566
	asm("mov r1, r0 ");					// a1 into r1
sl@0
  3567
	asm("bl ConvertTReal32ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3568
	asm("bl TRealXAdd ");				// add a1+a2, result in r1,r2,r3
sl@0
  3569
	asm("bl TRealXGetTReal32 ");		// convert result to TReal32 in r0, error code in r12
sl@0
  3570
	asm("cmp r12, #0 ");				// check error code
sl@0
  3571
	__CPOPRET(eq,"r4-r8,");
sl@0
  3572
	asm("stmfd sp!, {r0} ");			// save result
sl@0
  3573
	asm("mov r0, r12 ");				// error code into r0
sl@0
  3574
	asm("bl __math_exception ");		// raise exception
sl@0
  3575
	__POPRET("r0,r4-r8,");
sl@0
  3576
    }
sl@0
  3577
sl@0
  3578
__NAKED__ EXPORT_C TReal64 __adddf3(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3579
//
sl@0
  3580
// Add two doubles
sl@0
  3581
//
sl@0
  3582
    {
sl@0
  3583
	// a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
sl@0
  3584
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
  3585
	asm("mov r7, r2 ");					// save a2
sl@0
  3586
	asm("mov r8, r3 ");
sl@0
  3587
	asm("mov r2, r1 ");					// a1 into r1,r2
sl@0
  3588
	asm("mov r1, r0 ");
sl@0
  3589
	asm("bl ConvertTReal64ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3590
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3591
	asm("mov r5, r2 ");
sl@0
  3592
	asm("mov r6, r3 ");
sl@0
  3593
	asm("mov r1, r7 ");					// a2 into r1,r2
sl@0
  3594
	asm("mov r2, r8 ");
sl@0
  3595
	asm("bl ConvertTReal64ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3596
	asm("bl TRealXAdd ");				// add a1+a2, result in r1,r2,r3
sl@0
  3597
	asm("bl TRealXGetTReal64 ");		// convert result to TReal64 in r0,r1 error code in r12
sl@0
  3598
	asm("cmp r12, #0 ");				// check error code
sl@0
  3599
	__CPOPRET(eq,"r4-r8,");
sl@0
  3600
	asm("stmfd sp!, {r0,r1} ");			// save result
sl@0
  3601
	asm("mov r0, r12 ");				// error code into r0
sl@0
  3602
	asm("bl __math_exception ");		// raise exception
sl@0
  3603
	__POPRET("r0,r1,r4-r8,");
sl@0
  3604
    }
sl@0
  3605
sl@0
  3606
__NAKED__ EXPORT_C TReal32 __subsf3(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3607
//
sl@0
  3608
// Subtract two floats
sl@0
  3609
//
sl@0
  3610
    {
sl@0
  3611
	// a1 is in r0, a2 in r1 on entry; return with answer in r0
sl@0
  3612
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
  3613
	asm("bl ConvertTReal32ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3614
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3615
	asm("mov r5, r2 ");
sl@0
  3616
	asm("mov r6, r3 ");
sl@0
  3617
	asm("mov r1, r0 ");					// a1 into r1
sl@0
  3618
	asm("bl ConvertTReal32ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3619
	asm("bl TRealXSubtract ");			// subtract a1-a2, result in r1,r2,r3
sl@0
  3620
	asm("bl TRealXGetTReal32 ");		// convert result to TReal32 in r0, error code in r12
sl@0
  3621
	asm("cmp r12, #0 ");				// check error code
sl@0
  3622
	__CPOPRET(eq,"r4-r8,");
sl@0
  3623
	asm("stmfd sp!, {r0} ");			// save result
sl@0
  3624
	asm("mov r0, r12 ");				// error code into r0
sl@0
  3625
	asm("bl __math_exception ");		// raise exception
sl@0
  3626
	__POPRET("r0,r4-r8,");
sl@0
  3627
	}
sl@0
  3628
sl@0
  3629
__NAKED__ EXPORT_C TReal64 __subdf3(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3630
//
sl@0
  3631
// Subtract two doubles
sl@0
  3632
//
sl@0
  3633
    {
sl@0
  3634
	// a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
sl@0
  3635
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
  3636
	asm("mov r7, r0 ");					// save a1
sl@0
  3637
	asm("mov r8, r1 ");
sl@0
  3638
	asm("mov r1, r2 ");					// a2 into r1,r2
sl@0
  3639
	asm("mov r2, r3 ");
sl@0
  3640
	asm("bl ConvertTReal64ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3641
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3642
	asm("mov r5, r2 ");
sl@0
  3643
	asm("mov r6, r3 ");
sl@0
  3644
	asm("mov r1, r7 ");					// a1 into r1,r2
sl@0
  3645
	asm("mov r2, r8 ");
sl@0
  3646
	asm("bl ConvertTReal64ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3647
	asm("bl TRealXSubtract ");			// subtract a1-a2, result in r1,r2,r3
sl@0
  3648
	asm("bl TRealXGetTReal64 ");		// convert result to TReal64 in r0,r1 error code in r12
sl@0
  3649
	asm("cmp r12, #0 ");				// check error code
sl@0
  3650
	__CPOPRET(eq,"r4-r8,");
sl@0
  3651
	asm("stmfd sp!, {r0,r1} ");			// save result
sl@0
  3652
	asm("mov r0, r12 ");				// error code into r0
sl@0
  3653
	asm("bl __math_exception ");		// raise exception
sl@0
  3654
	__POPRET("r0,r1,r4-r8,");
sl@0
  3655
    }
sl@0
  3656
sl@0
  3657
__NAKED__ EXPORT_C TInt __cmpsf3(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3658
//
sl@0
  3659
// Compare two floats
sl@0
  3660
//
sl@0
  3661
    {
sl@0
  3662
	// a1 in r0, a2 in r1 on entry
sl@0
  3663
	asm("stmfd sp!, {lr} ");
sl@0
  3664
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3665
	asm("mov r0, r0, lsl #28 ");
sl@0
  3666
	asm("msr cpsr_flg, r0 ");			// N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
sl@0
  3667
	asm("mov r0, #0 ");
sl@0
  3668
	asm("mvnvs r0, #0 ");				// if a1<a2 r0=-1
sl@0
  3669
	asm("moveq r0, #1 ");				// if a1>a2 r0=+1
sl@0
  3670
	__POPRET("");
sl@0
  3671
sl@0
  3672
	// Compare two TReal32s in r0, r1.
sl@0
  3673
	// Return 1 if r0<r1, 2 if r0=r1, 4 if r0>r1, 8 if unordered
sl@0
  3674
	// Registers r0,r1,r12 modified
sl@0
  3675
	asm("CompareTReal32: ");
sl@0
  3676
	asm("mov r12, r0, lsr #23 ");
sl@0
  3677
	asm("and r12, r12, #0xFF ");		// r12=r0 exponent
sl@0
  3678
	asm("cmp r12, #0xFF ");				// check if r0 is a NaN
sl@0
  3679
	asm("bne CompareTReal32a ");
sl@0
  3680
	asm("movs r12, r0, lsl #9 ");		// exponent=FF, check mantissa
sl@0
  3681
	asm("movne r0, #8 ");				// if not zero, r0 is a NaN so result is unordered
sl@0
  3682
	__JUMP(ne,lr);
sl@0
  3683
	asm("CompareTReal32a: ");
sl@0
  3684
	asm("mov r12, r1, lsr #23 ");
sl@0
  3685
	asm("and r12, r12, #0xFF ");		// r12=r1 exponent
sl@0
  3686
	asm("cmp r12, #0xFF ");				// check if r1 is a NaN
sl@0
  3687
	asm("bne CompareTReal32b ");
sl@0
  3688
	asm("movs r12, r1, lsl #9 ");		// exponent=FF, check mantissa
sl@0
  3689
	asm("movne r0, #8 ");				// if not zero, r1 is a NaN so result is unordered
sl@0
  3690
	__JUMP(ne,lr);
sl@0
  3691
	asm("CompareTReal32b: ");
sl@0
  3692
	asm("bics r12, r0, #0x80000000 ");	// check if r0=0 (can be +0 or -0)
sl@0
  3693
	asm("moveq r0, #0 ");				// if it is, make it +0
sl@0
  3694
	asm("bics r12, r1, #0x80000000 ");	// check if r1=0 (can be +0 or -0)
sl@0
  3695
	asm("moveq r1, #0 ");				// if it is, make it +0
sl@0
  3696
	asm("teq r0, r1 ");					// test if signs different
sl@0
  3697
	asm("bmi CompareTReal32c ");		// branch if different
sl@0
  3698
	asm("cmp r0, r1 ");					// if same, check exponents + mantissas
sl@0
  3699
	asm("moveq r0, #2 ");				// if equal, return 2
sl@0
  3700
	__JUMP(eq,lr);
sl@0
  3701
	asm("movhi r0, #4 ");				// if r0>r1, r0=4
sl@0
  3702
	asm("movcc r0, #1 ");				// if r0<r1, r0=1
sl@0
  3703
	asm("cmp r1, #0 ");					// check signs
sl@0
  3704
	asm("eormi r0, r0, #5 ");			// if negative, switch 1 and 4
sl@0
  3705
	__JUMP(,lr);
sl@0
  3706
	asm("CompareTReal32c: ");			// come here if signs different
sl@0
  3707
	asm("cmp r0, #0 ");					// check sign of r0
sl@0
  3708
	asm("movpl r0, #4 ");				// if r0 nonnegative, then r0 is greater so return 4
sl@0
  3709
	asm("movmi r0, #1 ");				// if r0 negative, return 1
sl@0
  3710
	__JUMP(,lr);
sl@0
  3711
    }
sl@0
  3712
sl@0
  3713
__NAKED__ EXPORT_C TInt __cmpdf3(TReal64 /*a1*/,TReal64 /*a2*/)
sl@0
  3714
//
sl@0
  3715
// Compare two doubles
sl@0
  3716
//
sl@0
  3717
    {
sl@0
  3718
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3719
	asm("stmfd sp!, {lr} ");
sl@0
  3720
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3721
	asm("mov r0, r0, lsl #28 ");
sl@0
  3722
	asm("msr cpsr_flg, r0 ");			// N=unordered, Z=(a1>a2), C=(a1=a2), V=(a1<a2)
sl@0
  3723
	asm("mov r0, #0 ");
sl@0
  3724
	asm("mvnvs r0, #0 ");				// if a1<a2 r0=-1
sl@0
  3725
	asm("moveq r0, #1 ");				// if a1>a2 r0=+1
sl@0
  3726
	__POPRET("");
sl@0
  3727
sl@0
  3728
	// Compare two TReal64s in r0,r1 and r2,r3.
sl@0
  3729
	// Return 1 if r0,r1<r2,r3
sl@0
  3730
	// Return 2 if r0,r1=r2,r3
sl@0
  3731
	// Return 4 if r0,r1>r2,r3
sl@0
  3732
	// Return 8 if unordered
sl@0
  3733
	// Registers r0,r1,r12 modified
sl@0
  3734
	asm("CompareTReal64: ");
sl@0
  3735
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  3736
	asm("mov r12, r0 ");
sl@0
  3737
	asm("mov r0, r1 ");
sl@0
  3738
	asm("mov r1, r12 ");
sl@0
  3739
	asm("mov r12, r2 ");
sl@0
  3740
	asm("mov r2, r3 ");
sl@0
  3741
	asm("mov r3, r12 ");
sl@0
  3742
#endif
sl@0
  3743
	asm("mov r12, r0, lsr #20 ");
sl@0
  3744
	asm("bic r12, r12, #0x800 ");		// r12=first operand exponent
sl@0
  3745
	asm("add r12, r12, #1 ");			// add 1 to get usable compare value
sl@0
  3746
	asm("cmp r12, #0x800 ");			// check if first operand is a NaN
sl@0
  3747
	asm("bne CompareTReal64a ");
sl@0
  3748
	asm("movs r12, r0, lsl #12 ");		// exponent=7FF, check mantissa
sl@0
  3749
	asm("cmpeq r1, #0 ");
sl@0
  3750
	asm("movne r0, #8 ");				// if not zero, 1st op is a NaN so result is unordered
sl@0
  3751
	__JUMP(ne,lr);
sl@0
  3752
	asm("CompareTReal64a: ");
sl@0
  3753
	asm("mov r12, r2, lsr #20 ");
sl@0
  3754
	asm("bic r12, r12, #0x800 ");		// r12=second operand exponent
sl@0
  3755
	asm("add r12, r12, #1 ");			// add 1 to get usable compare value
sl@0
  3756
	asm("cmp r12, #0x800 ");			// check if second operand is a NaN
sl@0
  3757
	asm("bne CompareTReal64b ");
sl@0
  3758
	asm("movs r12, r2, lsl #12 ");		// exponent=7FF, check mantissa
sl@0
  3759
	asm("cmpeq r3, #0 ");
sl@0
  3760
	asm("movne r0, #8 ");				// if not zero, 2nd op is a NaN so result is unordered
sl@0
  3761
	__JUMP(ne,lr);
sl@0
  3762
	asm("CompareTReal64b: ");
sl@0
  3763
	asm("bics r12, r0, #0x80000000 ");	// check if first operand is zero (can be +0 or -0)
sl@0
  3764
	asm("cmpeq r1, #0 ");
sl@0
  3765
	asm("moveq r0, #0 ");				// if it is, make it +0
sl@0
  3766
	asm("bics r12, r2, #0x80000000 ");	// check if second operand is zero (can be +0 or -0)
sl@0
  3767
	asm("cmpeq r3, #0 ");
sl@0
  3768
	asm("moveq r2, #0 ");				// if it is, make it +0
sl@0
  3769
	asm("teq r0, r2 ");					// test if signs different
sl@0
  3770
	asm("bmi CompareTReal64c ");		// branch if different
sl@0
  3771
	asm("cmp r0, r2 ");					// if same, check exponents + mantissas
sl@0
  3772
	asm("cmpeq r1, r3 ");
sl@0
  3773
	asm("moveq r0, #2 ");				// if equal, return 2
sl@0
  3774
	__JUMP(eq,lr);
sl@0
  3775
	asm("movhi r0, #4 ");				// if 1st operand > 2nd operand, r0=4
sl@0
  3776
	asm("movcc r0, #1 ");				// if 1st operand < 2nd operand, r0=1
sl@0
  3777
	asm("cmp r2, #0 ");					// check signs
sl@0
  3778
	asm("eormi r0, r0, #5 ");			// if negative, switch 1 and 4
sl@0
  3779
	__JUMP(,lr);
sl@0
  3780
	asm("CompareTReal64c: ");			// come here if signs different
sl@0
  3781
	asm("cmp r0, #0 ");					// check sign of r0
sl@0
  3782
	asm("movpl r0, #4 ");				// if first operand nonnegative, return 4
sl@0
  3783
	asm("movmi r0, #1 ");				// if first operand negative, return 1
sl@0
  3784
	__JUMP(,lr);
sl@0
  3785
    }
sl@0
  3786
sl@0
  3787
__NAKED__ EXPORT_C TInt __eqsf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3788
//
sl@0
  3789
// Compare if two floats are equal
sl@0
  3790
//
sl@0
  3791
    {
sl@0
  3792
	// a1 in r0, a2 in r1 on entry
sl@0
  3793
	asm("stmfd sp!, {lr} ");
sl@0
  3794
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3795
	asm("tst r0, #2 ");
sl@0
  3796
	asm("movne r0, #0 ");				// if ordered and equal return 0
sl@0
  3797
	asm("moveq r0, #1 ");				// else return 1
sl@0
  3798
	__POPRET("");
sl@0
  3799
    }
sl@0
  3800
sl@0
  3801
__NAKED__ EXPORT_C TInt __eqdf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3802
//
sl@0
  3803
// Compare if two doubles are equal
sl@0
  3804
//
sl@0
  3805
    {
sl@0
  3806
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3807
	asm("stmfd sp!, {lr} ");
sl@0
  3808
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3809
	asm("tst r0, #2 ");
sl@0
  3810
	asm("movne r0, #0 ");				// if ordered and equal return 0
sl@0
  3811
	asm("moveq r0, #1 ");				// else return 1
sl@0
  3812
	__POPRET("");
sl@0
  3813
    }
sl@0
  3814
sl@0
  3815
__NAKED__ EXPORT_C TInt __nesf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3816
//
sl@0
  3817
// Compare if two floats are not equal
sl@0
  3818
//
sl@0
  3819
    {
sl@0
  3820
	// a1 in r0, a2 in r1 on entry
sl@0
  3821
	asm("stmfd sp!, {lr} ");
sl@0
  3822
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3823
	asm("tst r0, #5 ");					// test if ordered and unequal
sl@0
  3824
	asm("moveq r0, #0 ");				// if equal or unordered return 0
sl@0
  3825
	asm("movne r0, #1 ");				// if ordered and unequal return 1
sl@0
  3826
	__POPRET("");
sl@0
  3827
    }
sl@0
  3828
sl@0
  3829
__NAKED__ EXPORT_C TInt __nedf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3830
//
sl@0
  3831
// Compare if two doubles are not equal
sl@0
  3832
//
sl@0
  3833
    {
sl@0
  3834
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3835
	asm("stmfd sp!, {lr} ");
sl@0
  3836
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3837
	asm("tst r0, #5 ");					// test if ordered and unequal
sl@0
  3838
	asm("moveq r0, #0 ");				// if equal or unordered return 0
sl@0
  3839
	asm("movne r0, #1 ");				// if ordered and unequal return 1
sl@0
  3840
	__POPRET("");
sl@0
  3841
    }
sl@0
  3842
sl@0
  3843
__NAKED__ EXPORT_C TInt __gtsf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3844
//
sl@0
  3845
// Compare if one float is greater than another
sl@0
  3846
//
sl@0
  3847
    {
sl@0
  3848
	// a1 in r0, a2 in r1 on entry
sl@0
  3849
	asm("stmfd sp!, {lr} ");
sl@0
  3850
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3851
	asm("tst r0, #4 ");					// test if ordered and a1>a2
sl@0
  3852
	asm("movne r0, #1 ");				// if ordered and a1>a2 return +1
sl@0
  3853
	asm("mvneq r0, #0 ");				// else return -1
sl@0
  3854
	__POPRET("");
sl@0
  3855
    }
sl@0
  3856
sl@0
  3857
__NAKED__ EXPORT_C TInt __gtdf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3858
//
sl@0
  3859
// Compare if one double is greater than another
sl@0
  3860
//
sl@0
  3861
    {
sl@0
  3862
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3863
	asm("stmfd sp!, {lr} ");
sl@0
  3864
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3865
	asm("tst r0, #4 ");					// test if ordered and a1>a2
sl@0
  3866
	asm("movne r0, #1 ");				// if ordered and a1>a2 return +1
sl@0
  3867
	asm("mvneq r0, #0 ");				// else return -1
sl@0
  3868
	__POPRET("");
sl@0
  3869
    }
sl@0
  3870
sl@0
  3871
__NAKED__ EXPORT_C TInt __gesf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3872
//
sl@0
  3873
// Compare if one float is greater than or equal to another
sl@0
  3874
//
sl@0
  3875
    {
sl@0
  3876
	// a1 in r0, a2 in r1 on entry
sl@0
  3877
	asm("stmfd sp!, {lr} ");
sl@0
  3878
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3879
	asm("tst r0, #6 ");					// test if ordered and a1>=a2
sl@0
  3880
	asm("movne r0, #1 ");				// if ordered and a1>=a2 return +1
sl@0
  3881
	asm("mvneq r0, #0 ");				// else return -1
sl@0
  3882
	__POPRET("");
sl@0
  3883
    }
sl@0
  3884
sl@0
  3885
__NAKED__ EXPORT_C TInt __gedf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3886
//
sl@0
  3887
// Compare if one double is greater than or equal to another
sl@0
  3888
//
sl@0
  3889
    {
sl@0
  3890
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3891
	asm("stmfd sp!, {lr} ");
sl@0
  3892
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3893
	asm("tst r0, #6 ");					// test if ordered and a1>=a2
sl@0
  3894
	asm("movne r0, #1 ");				// if ordered and a1>=a2 return +1
sl@0
  3895
	asm("mvneq r0, #0 ");				// else return -1
sl@0
  3896
	__POPRET("");
sl@0
  3897
    }
sl@0
  3898
sl@0
  3899
__NAKED__ EXPORT_C TInt __ltsf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3900
//
sl@0
  3901
// Compare if one float is less than another
sl@0
  3902
//
sl@0
  3903
    {
sl@0
  3904
	// a1 in r0, a2 in r1 on entry
sl@0
  3905
	asm("stmfd sp!, {lr} ");
sl@0
  3906
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3907
	asm("tst r0, #1 ");					// test if ordered and a1<a2
sl@0
  3908
	asm("mvnne r0, #0 ");				// if ordered and a1<a2 return -1
sl@0
  3909
	asm("moveq r0, #1 ");				// else return +1
sl@0
  3910
	__POPRET("");
sl@0
  3911
    }
sl@0
  3912
sl@0
  3913
__NAKED__ EXPORT_C TInt __ltdf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3914
//
sl@0
  3915
// Compare if one double is less than another
sl@0
  3916
//
sl@0
  3917
    {
sl@0
  3918
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3919
	asm("stmfd sp!, {lr} ");
sl@0
  3920
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3921
	asm("tst r0, #1 ");					// test if ordered and a1<a2
sl@0
  3922
	asm("mvnne r0, #0 ");				// if ordered and a1<a2 return -1
sl@0
  3923
	asm("moveq r0, #1 ");				// else return +1
sl@0
  3924
	__POPRET("");
sl@0
  3925
    }
sl@0
  3926
sl@0
  3927
__NAKED__ EXPORT_C TInt __lesf2(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  3928
//
sl@0
  3929
// Compare if one float is less than or equal to another
sl@0
  3930
//
sl@0
  3931
    {
sl@0
  3932
	// a1 in r0, a2 in r1 on entry
sl@0
  3933
	asm("stmfd sp!, {lr} ");
sl@0
  3934
	asm("bl CompareTReal32 ");			// compare the two numbers
sl@0
  3935
	asm("tst r0, #3 ");					// test if ordered and a1<=a2
sl@0
  3936
	asm("mvnne r0, #0 ");				// if ordered and a1<=a2 return -1
sl@0
  3937
	asm("moveq r0, #1 ");				// else return +1
sl@0
  3938
	__POPRET("");
sl@0
  3939
    }
sl@0
  3940
sl@0
  3941
__NAKED__ EXPORT_C TInt __ledf2(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3942
//
sl@0
  3943
// Compare if one double is less than or equal to another
sl@0
  3944
//
sl@0
  3945
    {
sl@0
  3946
	// a1 in r0,r1, a2 in r2,r3 on entry
sl@0
  3947
	asm("stmfd sp!, {lr} ");
sl@0
  3948
	asm("bl CompareTReal64 ");			// compare the two numbers
sl@0
  3949
	asm("tst r0, #3 ");					// test if ordered and a1<=a2
sl@0
  3950
	asm("mvnne r0, #0 ");				// if ordered and a1<=a2 return -1
sl@0
  3951
	asm("moveq r0, #1 ");				// else return +1
sl@0
  3952
	__POPRET("");
sl@0
  3953
    }
sl@0
  3954
sl@0
  3955
__NAKED__ EXPORT_C TReal32 __mulsf3(TReal32 /*a1*/,TReal32 /*a2*/)
sl@0
  3956
//
sl@0
  3957
// Multiply two floats
sl@0
  3958
//
sl@0
  3959
    {
sl@0
  3960
	// a1 is in r0, a2 in r1 on entry; return with answer in r0
sl@0
  3961
	asm("stmfd sp!, {r4-r7,lr} ");
sl@0
  3962
	asm("bl ConvertTReal32ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3963
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3964
	asm("mov r5, r2 ");
sl@0
  3965
	asm("mov r6, r3 ");
sl@0
  3966
	asm("mov r1, r0 ");					// a1 into r1
sl@0
  3967
	asm("bl ConvertTReal32ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3968
	asm("bl TRealXMultiply ");			// multiply a1*a2, result in r1,r2,r3
sl@0
  3969
	asm("bl TRealXGetTReal32 ");		// convert result to TReal32 in r0, error code in r12
sl@0
  3970
	asm("cmp r12, #0 ");				// check error code
sl@0
  3971
	__CPOPRET(eq,"r4-r7,");
sl@0
  3972
	asm("stmfd sp!, {r0} ");			// save result
sl@0
  3973
	asm("mov r0, r12 ");				// error code into r0
sl@0
  3974
	asm("bl __math_exception ");		// raise exception
sl@0
  3975
	__POPRET("r0,r4-r7,");
sl@0
  3976
    }
sl@0
  3977
sl@0
  3978
__NAKED__ EXPORT_C TReal64 __muldf3(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  3979
//
sl@0
  3980
// Multiply two doubles
sl@0
  3981
//
sl@0
  3982
    {
sl@0
  3983
	// a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
sl@0
  3984
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
  3985
	asm("mov r7, r2 ");					// save a2
sl@0
  3986
	asm("mov r8, r3 ");
sl@0
  3987
	asm("mov r2, r1 ");					// a1 into r1,r2
sl@0
  3988
	asm("mov r1, r0 ");
sl@0
  3989
	asm("bl ConvertTReal64ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  3990
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  3991
	asm("mov r5, r2 ");
sl@0
  3992
	asm("mov r6, r3 ");
sl@0
  3993
	asm("mov r1, r7 ");					// a2 into r1,r2
sl@0
  3994
	asm("mov r2, r8 ");
sl@0
  3995
	asm("bl ConvertTReal64ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  3996
	asm("bl TRealXMultiply ");			// multiply a1*a2, result in r1,r2,r3
sl@0
  3997
	asm("bl TRealXGetTReal64 ");		// convert result to TReal64 in r0,r1 error code in r12
sl@0
  3998
	asm("cmp r12, #0 ");				// check error code
sl@0
  3999
	__CPOPRET(eq,"r4-r8,");
sl@0
  4000
	asm("stmfd sp!, {r0,r1} ");			// save result
sl@0
  4001
	asm("mov r0, r12 ");				// error code into r0
sl@0
  4002
	asm("bl __math_exception ");		// raise exception
sl@0
  4003
	__POPRET("r0,r1,r4-r8,");
sl@0
  4004
    }
sl@0
  4005
sl@0
  4006
__NAKED__ EXPORT_C TReal32 __divsf3(TReal32 /*a1*/, TReal32 /*a2*/)
sl@0
  4007
//
sl@0
  4008
// Divide two floats
sl@0
  4009
//
sl@0
  4010
    {
sl@0
  4011
	// a1 is in r0, a2 in r1 on entry; return with answer in r0
sl@0
  4012
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
  4013
	asm("bl ConvertTReal32ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  4014
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  4015
	asm("mov r5, r2 ");
sl@0
  4016
	asm("mov r6, r3 ");
sl@0
  4017
	asm("mov r1, r0 ");					// a1 into r1
sl@0
  4018
	asm("bl ConvertTReal32ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  4019
	asm("bl TRealXDivide ");			// divide a1/a2, result in r1,r2,r3 error code in r12
sl@0
  4020
	asm("mov r9, r12 ");				// save error code in case it's division by zero
sl@0
  4021
	asm("bl TRealXGetTReal32 ");		// convert result to TReal32 in r0, error code in r12
sl@0
  4022
	asm("cmn r9, #41 ");				// check for KErrDivideByZero
sl@0
  4023
	asm("moveq r12, r9 ");
sl@0
  4024
	asm("cmp r12, #0 ");				// check error code
sl@0
  4025
	__CPOPRET(eq,"r4-r9,");
sl@0
  4026
	asm("stmfd sp!, {r0} ");			// save result
sl@0
  4027
	asm("mov r0, r12 ");				// error code into r0
sl@0
  4028
	asm("bl __math_exception ");		// raise exception
sl@0
  4029
	__POPRET("r0,r4-r9,");
sl@0
  4030
    }
sl@0
  4031
sl@0
  4032
__NAKED__ EXPORT_C TReal64 __divdf3(TReal64 /*a1*/, TReal64 /*a2*/)
sl@0
  4033
	//
sl@0
  4034
	// Divide two doubles
sl@0
  4035
	//
sl@0
  4036
	{
sl@0
  4037
	// a1 is in r0,r1 a2 in r2,r3 on entry; return with answer in r0,r1
sl@0
  4038
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
  4039
	asm("mov r7, r0 ");					// save a1
sl@0
  4040
	asm("mov r8, r1 ");
sl@0
  4041
	asm("mov r1, r2 ");					// a2 into r1,r2
sl@0
  4042
	asm("mov r2, r3 ");
sl@0
  4043
	asm("bl ConvertTReal64ToTRealX ");	// convert a2 to TRealX in r1,r2,r3
sl@0
  4044
	asm("mov r4, r1 ");					// move into r4,r5,r6
sl@0
  4045
	asm("mov r5, r2 ");
sl@0
  4046
	asm("mov r6, r3 ");
sl@0
  4047
	asm("mov r1, r7 ");					// a1 into r1,r2
sl@0
  4048
	asm("mov r2, r8 ");
sl@0
  4049
	asm("bl ConvertTReal64ToTRealX ");	// convert a1 to TRealX in r1,r2,r3
sl@0
  4050
	asm("bl TRealXDivide ");			// divide a1/a2, result in r1,r2,r3
sl@0
  4051
	asm("mov r9, r12 ");				// save error code in case it's division by zero
sl@0
  4052
	asm("bl TRealXGetTReal64 ");		// convert result to TReal64 in r0,r1 error code in r12
sl@0
  4053
	asm("cmn r9, #41 ");				// check for KErrDivideByZero
sl@0
  4054
	asm("moveq r12, r9 ");
sl@0
  4055
	asm("cmp r12, #0 ");				// check error code
sl@0
  4056
	__CPOPRET(eq,"r4-r9,");
sl@0
  4057
	asm("stmfd sp!, {r0,r1} ");			// save result
sl@0
  4058
	asm("mov r0, r12 ");				// error code into r0
sl@0
  4059
	asm("bl __math_exception ");		// raise exception
sl@0
  4060
	__POPRET("r0,r1,r4-r9,");
sl@0
  4061
	}
sl@0
  4062
sl@0
  4063
__NAKED__ EXPORT_C TReal32 __negsf2(TReal32 /*a1*/)
sl@0
  4064
//
sl@0
  4065
// Negate a float
sl@0
  4066
//
sl@0
  4067
    {
sl@0
  4068
	// a1 in r0 on entry, return value in r0
sl@0
  4069
	asm("eor r0, r0, #0x80000000 ");	// change sign bit
sl@0
  4070
	__JUMP(,lr);
sl@0
  4071
    }
sl@0
  4072
sl@0
  4073
__NAKED__ EXPORT_C TReal64 __negdf2(TReal64 /*a1*/)
sl@0
  4074
//
sl@0
  4075
// Negate a double
sl@0
  4076
//
sl@0
  4077
    {
sl@0
  4078
	// a1 in r0,r1 on entry, return value in r0,r1
sl@0
  4079
	asm("eor r0, r0, #0x80000000 ");	// change sign bit
sl@0
  4080
	__JUMP(,lr);
sl@0
  4081
    }
sl@0
  4082
sl@0
  4083
__NAKED__ EXPORT_C TReal32 __floatsisf(TInt /*a1*/)
sl@0
  4084
//
sl@0
  4085
// Convert int to float
sl@0
  4086
//
sl@0
  4087
    {
sl@0
  4088
	// a1 in r0 on entry, return value in r0
sl@0
  4089
	asm("cmp r0, #0 ");					// test for zero or negative
sl@0
  4090
	__JUMP(eq,lr);
sl@0
  4091
	asm("and ip, r0, #0x80000000 ");	// ip=bit 31 of r0 (sign bit)
sl@0
  4092
	asm("rsbmi r0, r0, #0 ");			// if negative, negate it
sl@0
  4093
	asm("mov r2, #0x9E ");				// r2=0x9E=exponent of 2^31
sl@0
  4094
	asm("cmp r0, #0x00010000 ");		// normalise integer, adjusting exponent
sl@0
  4095
	asm("movcc r0, r0, lsl #16 ");
sl@0
  4096
	asm("subcc r2, r2, #16 ");
sl@0
  4097
	asm("cmp r0, #0x01000000 ");
sl@0
  4098
	asm("movcc r0, r0, lsl #8 ");
sl@0
  4099
	asm("subcc r2, r2, #8 ");
sl@0
  4100
	asm("cmp r0, #0x10000000 ");
sl@0
  4101
	asm("movcc r0, r0, lsl #4 ");
sl@0
  4102
	asm("subcc r2, r2, #4 ");
sl@0
  4103
	asm("cmp r0, #0x40000000 ");
sl@0
  4104
	asm("movcc r0, r0, lsl #2 ");
sl@0
  4105
	asm("subcc r2, r2, #2 ");
sl@0
  4106
	asm("cmp r0, #0x80000000 ");
sl@0
  4107
	asm("movcc r0, r0, lsl #1 ");
sl@0
  4108
	asm("subcc r2, r2, #1 ");
sl@0
  4109
	asm("and r1, r0, #0xFF ");			// r1=bottom 8 bits=rounding bits
sl@0
  4110
	asm("cmp r1, #0x80 ");				// check if we need to round up (carry=1 if we do)
sl@0
  4111
	asm("moveqs r1, r0, lsr #9 ");		// if bottom 8 bits=0x80, set carry=LSB of mantissa
sl@0
  4112
	asm("addcss r0, r0, #0x100 ");		// round up if necessary
sl@0
  4113
	asm("addcs r2, r2, #1 ");			// if carry, increment exponent
sl@0
  4114
	asm("bic r0, r0, #0x80000000 ");	// remove top bit (integer bit of mantissa implicit)
sl@0
  4115
	asm("mov r0, r0, lsr #8 ");			// mantissa into r0 bits 0-22
sl@0
  4116
	asm("orr r0, r0, r2, lsl #23 ");	// exponent into r0 bits 23-30
sl@0
  4117
	asm("orr r0, r0, ip ");				// sign bit into r0 bit 31
sl@0
  4118
	__JUMP(,lr);
sl@0
  4119
    }
sl@0
  4120
sl@0
  4121
__NAKED__ EXPORT_C TReal64 __floatsidf(TInt /*a1*/)
sl@0
  4122
//
sl@0
  4123
// Convert int to double
sl@0
  4124
//
sl@0
  4125
    {
sl@0
  4126
	// a1 in r0 on entry, return value in r0,r1
sl@0
  4127
	asm("cmp r0, #0 ");					// test for zero or negative
sl@0
  4128
	asm("moveq r1, #0 ");				// if zero, return 0
sl@0
  4129
	__JUMP(eq,lr);
sl@0
  4130
	asm("and ip, r0, #0x80000000 ");	// ip=bit 31 of r0 (sign bit)
sl@0
  4131
	asm("rsbmi r0, r0, #0 ");			// if negative, negate it
sl@0
  4132
	asm("mov r2, #0x400 ");				// r2=0x41E=exponent of 2^31
sl@0
  4133
	asm("orr r2, r2, #0x1E ");
sl@0
  4134
	asm("cmp r0, #0x00010000 ");		// normalise integer, adjusting exponent
sl@0
  4135
	asm("movcc r0, r0, lsl #16 ");
sl@0
  4136
	asm("subcc r2, r2, #16 ");
sl@0
  4137
	asm("cmp r0, #0x01000000 ");
sl@0
  4138
	asm("movcc r0, r0, lsl #8 ");
sl@0
  4139
	asm("subcc r2, r2, #8 ");
sl@0
  4140
	asm("cmp r0, #0x10000000 ");
sl@0
  4141
	asm("movcc r0, r0, lsl #4 ");
sl@0
  4142
	asm("subcc r2, r2, #4 ");
sl@0
  4143
	asm("cmp r0, #0x40000000 ");
sl@0
  4144
	asm("movcc r0, r0, lsl #2 ");
sl@0
  4145
	asm("subcc r2, r2, #2 ");
sl@0
  4146
	asm("cmp r0, #0x80000000 ");
sl@0
  4147
	asm("movcc r0, r0, lsl #1 ");
sl@0
  4148
	asm("subcc r2, r2, #1 ");
sl@0
  4149
	asm("bic r0, r0, #0x80000000 ");	// remove top bit (integer bit of mantissa implicit)
sl@0
  4150
	asm("mov r1, r0, lsl #21 ");		// low 11 bits of mantissa into r1
sl@0
  4151
	asm("mov r0, r0, lsr #11 ");		// high 20 bits of mantissa into r0 bits 0-19
sl@0
  4152
	asm("orr r0, r0, r2, lsl #20 ");	// exponent into r0 bits 20-30
sl@0
  4153
	asm("orr r0, r0, ip ");				// sign bit into r0 bit 31
sl@0
  4154
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  4155
	asm("mov ip, r0 ");
sl@0
  4156
	asm("mov r0, r1 ");
sl@0
  4157
	asm("mov r1, ip ");
sl@0
  4158
#endif
sl@0
  4159
	__JUMP(,lr);
sl@0
  4160
    }
sl@0
  4161
sl@0
  4162
__NAKED__ EXPORT_C TInt __fixsfsi(TReal32 /*a1*/)
sl@0
  4163
//
sl@0
  4164
// Convert float to int
sl@0
  4165
//
sl@0
  4166
    {
sl@0
  4167
	// a1 in r0 on entry, return value in r0
sl@0
  4168
	asm("mov r1, r0, lsr #23 ");
sl@0
  4169
	asm("and r1, r1, #0xFF ");			// r1=exponent of a1
sl@0
  4170
	asm("cmp r1, #0xFF ");				// check for NaN
sl@0
  4171
	asm("bne fixsfsi1 ");
sl@0
  4172
	asm("movs r2, r0, lsl #9 ");		// exponent=FF, check mantissa
sl@0
  4173
	asm("movne r0, #0 ");				// if non-zero, a1 is a NaN so return 0
sl@0
  4174
	__JUMP(ne,lr);
sl@0
  4175
	asm("fixsfsi1: ");
sl@0
  4176
	asm("rsbs r1, r1, #0x9E ");			// r1=number of shifts to produce integer
sl@0
  4177
	asm("ble fixsfsi2 ");				// if <=0, saturate result
sl@0
  4178
	asm("cmp r0, #0 ");					// check sign bit
sl@0
  4179
	asm("orr r0, r0, #0x00800000 ");	// set implicit integer bit
sl@0
  4180
	asm("mov r0, r0, lsl #8 ");			// shift mantissa up so MSB is in MSB of r0
sl@0
  4181
	asm("mov r0, r0, lsr r1 ");			// r0=absolute integer
sl@0
  4182
	asm("rsbmi r0, r0, #0 ");			// if negative, negate
sl@0
  4183
	__JUMP(,lr);
sl@0
  4184
	asm("fixsfsi2: ");
sl@0
  4185
	asm("cmp r0, #0 ");					// check sign
sl@0
  4186
	asm("mov r0, #0x80000000 ");
sl@0
  4187
	asm("subpl r0, r0, #1 ");			// if -ve return 80000000, if +ve return 7FFFFFFF
sl@0
  4188
	__JUMP(,lr);
sl@0
  4189
    }
sl@0
  4190
sl@0
  4191
__NAKED__ EXPORT_C TInt __fixdfsi(TReal64 /*a1*/)
sl@0
  4192
//
sl@0
  4193
// Convert double to int
sl@0
  4194
//
sl@0
  4195
    {
sl@0
  4196
	// a1 in r0,r1 on entry, return value in r0
sl@0
  4197
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  4198
	asm("mov r2, r0 ");
sl@0
  4199
	asm("mov r0, r1 ");
sl@0
  4200
	asm("mov r1, r2 ");
sl@0
  4201
#endif
sl@0
  4202
	asm("mov r2, r0, lsr #20 ");
sl@0
  4203
	asm("bic r2, r2, #0x800 ");			// r1=exponent of a1
sl@0
  4204
	asm("add r3, r2, #1 ");
sl@0
  4205
	asm("cmp r3, #0x800 ");				// check for NaN
sl@0
  4206
	asm("bne fixdfsi1 ");
sl@0
  4207
	asm("movs r3, r0, lsl #12 ");		// exponent=FF, check mantissa
sl@0
  4208
	asm("cmpeq r1, #0 ");
sl@0
  4209
	asm("movne r0, #0 ");				// if non-zero, a1 is a NaN so return 0
sl@0
  4210
	__JUMP(ne,lr);
sl@0
  4211
	asm("fixdfsi1: ");
sl@0
  4212
	asm("mov r3, #0x400 ");
sl@0
  4213
	asm("orr r3, r3, #0x1E ");			// r3=0x41E (exponent of 2^31)
sl@0
  4214
	asm("subs r2, r3, r2 ");			// r2=number of shifts to produce integer
sl@0
  4215
	asm("ble fixdfsi2 ");				// if <=0, saturate result
sl@0
  4216
	asm("cmp r2, #31 ");				// check if more than 31 shifts needed
sl@0
  4217
	asm("movhi r0, #0 ");				// if so, underflow result to 0
sl@0
  4218
	__JUMP(hi,lr);
sl@0
  4219
	asm("cmp r0, #0 ");					// check sign bit
sl@0
  4220
	asm("orr r0, r0, #0x00100000 ");	// set implicit integer bit
sl@0
  4221
	asm("mov r0, r0, lsl #11 ");		// shift mantissa up so MSB is in MSB of r0
sl@0
  4222
	asm("orr r0, r0, r1, lsr #21 ");	// put in bits from r1
sl@0
  4223
	asm("mov r0, r0, lsr r2 ");			// r0=absolute integer
sl@0
  4224
	asm("rsbmi r0, r0, #0 ");			// if negative, negate
sl@0
  4225
	__JUMP(,lr);
sl@0
  4226
	asm("fixdfsi2: ");
sl@0
  4227
	asm("cmp r0, #0 ");					// check sign
sl@0
  4228
	asm("mov r0, #0x80000000 ");
sl@0
  4229
	asm("subpl r0, r0, #1 ");			// if -ve return 80000000, if +ve return 7FFFFFFF
sl@0
  4230
	__JUMP(,lr);
sl@0
  4231
    }
sl@0
  4232
sl@0
  4233
__NAKED__ EXPORT_C TReal64 __extendsfdf2(TReal32 /*a1*/)
sl@0
  4234
//
sl@0
  4235
// Convert a float to a double
sl@0
  4236
//
sl@0
  4237
    {
sl@0
  4238
	// a1 in r0, return in r0,r1
sl@0
  4239
	asm("mov r3, r0, lsr #3 ");
sl@0
  4240
	asm("ands r3, r3, #0x0FF00000 ");	// r3 bits 20-27 hold exponent, Z=1 if zero/denormal
sl@0
  4241
	asm("mov r1, r0, lsl #9 ");			// r1 = TReal32 mantissa << 9
sl@0
  4242
	asm("and r0, r0, #0x80000000 ");	// leave only sign bit in r0
sl@0
  4243
	asm("beq extendsfdf2a ");			// branch if zero/denormal
sl@0
  4244
	asm("cmp r3, #0x0FF00000 ");		// check for infinity or NaN
sl@0
  4245
	asm("orrcs r3, r3, #0x70000000 ");	// if infinity or NaN, exponent = 7FF
sl@0
  4246
	asm("addcc r3, r3, #0x38000000 ");	// else exponent = TReal32 exponent + 380
sl@0
  4247
	asm("orr r0, r0, r1, lsr #12 ");	// top 20 mantissa bits into r0 bits 0-19
sl@0
  4248
	asm("mov r1, r1, lsl #20 ");		// remaining mantissa bits in r1 bits 29-31
sl@0
  4249
	asm("orr r0, r0, r3 ");				// exponent into r0 bits 20-30
sl@0
  4250
	asm("b 0f ");
sl@0
  4251
	asm("extendsfdf2a: ");				// come here if zero or denormal
sl@0
  4252
	asm("cmp r1, #0 ");					// check for zero
sl@0
  4253
	asm("beq 0f ");
sl@0
  4254
	asm("mov r3, #0x38000000 ");		// else exponent = 380 (highest denormal exponent)
sl@0
  4255
	asm("cmp r1, #0x10000 ");			// normalise mantissa, decrementing exponent as needed
sl@0
  4256
	asm("movcc r1, r1, lsl #16 ");
sl@0
  4257
	asm("subcc r3, r3, #0x01000000 ");
sl@0
  4258
	asm("cmp r1, #0x1000000 ");
sl@0
  4259
	asm("movcc r1, r1, lsl #8 ");
sl@0
  4260
	asm("subcc r3, r3, #0x00800000 ");
sl@0
  4261
	asm("cmp r1, #0x10000000 ");
sl@0
  4262
	asm("movcc r1, r1, lsl #4 ");
sl@0
  4263
	asm("subcc r3, r3, #0x00400000 ");
sl@0
  4264
	asm("cmp r1, #0x40000000 ");
sl@0
  4265
	asm("movcc r1, r1, lsl #2 ");
sl@0
  4266
	asm("subcc r3, r3, #0x00200000 ");
sl@0
  4267
	asm("cmp r1, #0x80000000 ");
sl@0
  4268
	asm("movcc r1, r1, lsl #1 ");
sl@0
  4269
	asm("subcc r3, r3, #0x00100000 ");
sl@0
  4270
	asm("add r1, r1, r1 ");				// shift mantissa left one more to remove integer bit
sl@0
  4271
	asm("orr r0, r0, r1, lsr #12 ");	// top 20 mantissa bits into r0 bits 0-19
sl@0
  4272
	asm("mov r1, r1, lsl #20 ");		// remaining mantissa bits in r1 bits 29-31
sl@0
  4273
	asm("orr r0, r0, r3 ");				// exponent into r0 bits 20-30
sl@0
  4274
	asm("0: ");
sl@0
  4275
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  4276
	asm("mov r3, r0 ");
sl@0
  4277
	asm("mov r0, r1 ");
sl@0
  4278
	asm("mov r1, r3 ");
sl@0
  4279
#endif
sl@0
  4280
	__JUMP(,lr);
sl@0
  4281
    }
sl@0
  4282
sl@0
  4283
__NAKED__ EXPORT_C TReal32 __truncdfsf2(TReal64 /*a1*/)
sl@0
  4284
//
sl@0
  4285
// Convert a double to a float
sl@0
  4286
// Raises an exception if conversion results in an error
sl@0
  4287
//
sl@0
  4288
    {
sl@0
  4289
	asm("stmfd sp!, {lr} ");
sl@0
  4290
	asm("bl TReal64GetTReal32 ");			// do the conversion
sl@0
  4291
	asm("cmp r12, #0 ");					// check error code
sl@0
  4292
	__CPOPRET(eq,"");
sl@0
  4293
	asm("stmfd sp!, {r0} ");				// else save result
sl@0
  4294
	asm("mov r0, r12 ");					// error code into r0
sl@0
  4295
	asm("bl __math_exception ");			// raise exception
sl@0
  4296
	__POPRET("r0,");
sl@0
  4297
sl@0
  4298
	// Convert TReal64 in r0,r1 to TReal32 in r0
sl@0
  4299
	// Return error code in r12
sl@0
  4300
	// r0-r3, r12 modified
sl@0
  4301
	// NB This function is purely internal to EUSER and therefore IS ONLY EVER CALLED IN ARM MODE.
sl@0
  4302
	asm("TReal64GetTReal32: ");
sl@0
  4303
#ifndef __DOUBLE_WORDS_SWAPPED__
sl@0
  4304
	asm("mov r2, r0 ");
sl@0
  4305
	asm("mov r0, r1 ");
sl@0
  4306
	asm("mov r1, r2 ");
sl@0
  4307
#endif
sl@0
  4308
	asm("mov r12, r0, lsr #20 ");
sl@0
  4309
	asm("bic r12, r12, #0x800 ");			// r12=a1 exponent
sl@0
  4310
	asm("sub r12, r12, #0x380 ");			// r12=exp in - 380 = result exponent if in range
sl@0
  4311
	asm("cmp r12, #0xFF ");					// check if input exponent too big for TReal32
sl@0
  4312
	asm("bge TReal64GetTReal32a ");			// branch if it is
sl@0
  4313
	asm("mov r2, r0, lsl #11 ");			// left justify mantissa in r2:r1
sl@0
  4314
	asm("orr r2, r2, r1, lsr #21 ");
sl@0
  4315
	asm("mov r1, r1, lsl #11 ");
sl@0
  4316
	asm("orr r2, r2, #0x80000000 ");		// set implied integer bit in mantissa
sl@0
  4317
	asm("cmp r12, #0 ");
sl@0
  4318
	asm("bgt TReal64GetTReal32b ");			// branch if normalised result
sl@0
  4319
	asm("cmn r12, #23 ");					// check for total underflow or zero
sl@0
  4320
	asm("bge TReal64GetTReal32e ");			// skip if not
sl@0
  4321
	asm("bics r2, r0, #0x80000000 ");		// check if input value zero
sl@0
  4322
	asm("cmpeq r1, #0 ");
sl@0
  4323
	asm("moveq r12, #0 ");					// if zero return KErrNone
sl@0
  4324
	asm("mvnne r12, #9 ");					// else return KErrUnderflow
sl@0
  4325
	asm("and r0, r0, #0x80000000 ");		// return zero of appropriate sign
sl@0
  4326
	asm("mov r1, #0 ");
sl@0
  4327
	__JUMP(,lr);
sl@0
  4328
	asm("TReal64GetTReal32e: ");			// result will be a denormal
sl@0
  4329
	asm("add r12, r12, #31 ");				// r12=32-mantissa shift required = 32-(1-r12)
sl@0
  4330
	asm("movs r3, r1, lsl r12 ");			// r3=lost bits when r2:r1 is shifted
sl@0
  4331
	asm("orrne lr, lr, #1 ");				// if these are not zero, set rounded down flag
sl@0
  4332
	asm("rsb r3, r12, #32 ");
sl@0
  4333
	asm("mov r1, r1, lsr r3 ");
sl@0
  4334
	asm("orr r1, r1, r2, lsl r12 ");
sl@0
  4335
	asm("mov r2, r2, lsr r3 ");				// r2 top 24 bits now give unrounded result mantissa
sl@0
  4336
	asm("mov r12, #0 ");					// result exponent will be zero
sl@0
  4337
	asm("TReal64GetTReal32b: ");
sl@0
  4338
	asm("movs r3, r2, lsl #24 ");			// top 8 truncated bits into top byte of r3
sl@0
  4339
	asm("bpl TReal64GetTReal32c ");			// if top bit clear, truncate
sl@0
  4340
	asm("cmp r3, #0x80000000 ");
sl@0
  4341
	asm("cmpeq r1, #0 ");					// compare rounding bits to 1000...
sl@0
  4342
	asm("bhi TReal64GetTReal32d ");			// if >, round up
sl@0
  4343
	asm("tst lr, #1 ");						// check rounded-down flag
sl@0
  4344
	asm("bne TReal64GetTReal32d ");			// if rounded down, round up
sl@0
  4345
	asm("tst r2, #0x100 ");					// else round to even - test LSB of result mantissa
sl@0
  4346
	asm("beq TReal64GetTReal32c ");			// if zero, truncate, else round up
sl@0
  4347
	asm("TReal64GetTReal32d: ");			// come here to round up
sl@0
  4348
	asm("adds r2, r2, #0x100 ");			// increment the mantissa
sl@0
  4349
	asm("movcs r2, #0x80000000 ");			// if carry, mantissa=800000
sl@0
  4350
	asm("addcs r12, r12, #1 ");				// and increment exponent
sl@0
  4351
	asm("cmpmi r12, #1 ");					// if mantissa normalised, check exponent>0
sl@0
  4352
	asm("movmi r12, #1 ");					// if normalised and exponent=0, set exponent to 1
sl@0
  4353
	asm("TReal64GetTReal32c: ");			// come here to truncate
sl@0
  4354
	asm("and r0, r0, #0x80000000 ");		// leave only sign bit in r0
sl@0
  4355
	asm("orr r0, r0, r12, lsl #23 ");		// exponent into r0 bits 23-30
sl@0
  4356
	asm("bic r2, r2, #0x80000000 ");		// remove integer bit from mantissa
sl@0
  4357
	asm("orr r0, r0, r2, lsr #8 ");			// non-integer mantissa bits into r0 bits 0-22
sl@0
  4358
	asm("cmp r12, #0xFF ");					// check for overflow
sl@0
  4359
	asm("mvneq r12, #8 ");					// if overflow, return KErrOverflow
sl@0
  4360
	asm("biceq pc, lr, #3 ");
sl@0
  4361
	asm("bics r1, r0, #0x80000000 ");		// check for underflow
sl@0
  4362
	asm("mvneq r12, #9 ");					// if underflow return KErrUnderflow
sl@0
  4363
	asm("movne r12, #0 ");					// else return KErrNone
sl@0
  4364
	asm("bic pc, lr, #3 ");
sl@0
  4365
	asm("TReal64GetTReal32a: ");			// come here if overflow, infinity or NaN
sl@0
  4366
	asm("add r3, r12, #1 ");
sl@0
  4367
	asm("cmp r3, #0x480 ");					// check for infinity or NaN
sl@0
  4368
	asm("movne r1, #0 ");					// if not, set mantissa to 0 for infinity result
sl@0
  4369
	asm("movne r0, r0, lsr #20 ");
sl@0
  4370
	asm("movne r0, r0, lsl #20 ");
sl@0
  4371
	asm("mov r1, r1, lsr #29 ");			// assemble 23 bit mantissa in r1
sl@0
  4372
	asm("orr r1, r1, r0, lsl #3 ");
sl@0
  4373
	asm("bic r1, r1, #0xFF000000 ");
sl@0
  4374
	asm("and r0, r0, #0x80000000 ");		// leave only sign in r0
sl@0
  4375
	asm("orr r0, r0, #0x7F000000 ");		// r0 bits 23-30 = FF = exponent
sl@0
  4376
	asm("orr r0, r0, #0x00800000 ");
sl@0
  4377
	asm("orr r0, r0, r1 ");					// r0 bits 0-22 = result mantissa
sl@0
  4378
	asm("movs r12, r0, lsl #9 ");			// check if result is infinity or NaN
sl@0
  4379
	asm("mvneq r12, #8 ");					// if infinity return KErrOverflow
sl@0
  4380
	asm("mvnne r12, #5 ");					// else return KErrArgument
sl@0
  4381
	asm("bic pc, lr, #3 ");
sl@0
  4382
    }
sl@0
  4383
}	// end of extern "C" declaration
sl@0
  4384
#endif
sl@0
  4385
#endif
sl@0
  4386