os/kernelhwsrv/kernel/eka/common/arm/carray.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\arm\carray.cia
sl@0
    15
// Machine coded arrays for ARM
sl@0
    16
// 
sl@0
    17
//
sl@0
    18
sl@0
    19
#include <e32cia.h>
sl@0
    20
#include "../common.h"
sl@0
    21
sl@0
    22
#ifdef __ARRAY_MACHINE_CODED__
sl@0
    23
extern "C" void PanicBadArrayFindMode();
sl@0
    24
sl@0
    25
EXPORT_C __NAKED__ TAny*& RPointerArrayBase::At(TInt /*anIndex*/) const
sl@0
    26
	{
sl@0
    27
	asm("ldmia r0, {r2,r3} ");			// r2=iCount, r3=iEntries
sl@0
    28
	asm("cmp r1, #0 ");					// check anIndex>=0
sl@0
    29
	asm("cmpge r2, r1 ");				// if so, check iCount>anIndex
sl@0
    30
	asm("addgt r0, r3, r1, lsl #2 ");	// address of entry = iEntries+4*anIndex
sl@0
    31
#ifdef __CPU_ARMV6
sl@0
    32
	asm("ble 1f ");						// avoid conditional return on ARMv6
sl@0
    33
 	__JUMP(,lr);
sl@0
    34
#else
sl@0
    35
 	__JUMP(gt,lr);
sl@0
    36
#endif
sl@0
    37
	asm("1: ");
sl@0
    38
	asm("b  " CSM_Z18PanicBadArrayIndexv);
sl@0
    39
	}
sl@0
    40
sl@0
    41
EXPORT_C __NAKED__ TInt RPointerArrayBase::Append(const TAny* /*anEntry*/)
sl@0
    42
	{
sl@0
    43
	asm("ldmia r0, {r2,r3,r12} ");		// r2=iCount, r3=iEntries, r12=iAllocated
sl@0
    44
	asm("cmp r2, r12 ");
sl@0
    45
	asm("beq ptr_append_1 ");
sl@0
    46
	asm("ptr_append_0: ");
sl@0
    47
	asm("str r1, [r3, r2, lsl #2] ");
sl@0
    48
	asm("add r2, r2, #1 ");
sl@0
    49
	asm("str r2, [r0] ");
sl@0
    50
	asm("mov r0, #0 ");
sl@0
    51
	__JUMP(,lr);
sl@0
    52
	asm("ptr_append_1: ");
sl@0
    53
	asm("stmfd sp!, {r0,r1,r2,lr} ");
sl@0
    54
	asm("bl  " CSM_ZN17RPointerArrayBase4GrowEv);
sl@0
    55
	asm("cmp r0, #0 ");
sl@0
    56
	asm("bne ptr_append_2 ");
sl@0
    57
	asm("ldmfd sp!, {r0,r1,r2,lr} ");
sl@0
    58
	asm("ldmia r0, {r2, r3} ");
sl@0
    59
	asm("b ptr_append_0 ");
sl@0
    60
	asm("ptr_append_2: ");				// error enlarging array
sl@0
    61
	asm("add sp, sp, #12 ");
sl@0
    62
	__POPRET("");
sl@0
    63
	}
sl@0
    64
sl@0
    65
EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/) const
sl@0
    66
	{
sl@0
    67
	asm("ldmia r0, {r2,r3} ");			// r2=iCount, r3=iEntries
sl@0
    68
	asm("mov r0, #0 ");					// r0=0 (will be index+1)
sl@0
    69
	asm("subs r2, r2, #1 ");			// r2=iCount-1
sl@0
    70
	asm("blt 0f ");
sl@0
    71
	asm("1: ");
sl@0
    72
	asm("ldr r12, [r3], #4 ");			// r12=iEntries[r0]
sl@0
    73
	asm("add r0, r0, #1 ");				// r0 = index+1
sl@0
    74
	asm("cmp r2, r0 ");					// C=1 iff iCount-1>=index+1 iff index<=iCount-2 iff this isn't last entry
sl@0
    75
	asm("teq r12, r1 ");				// check for equality, doesn't affect C
sl@0
    76
	asm("bhi 1b ");						// loop if C=1 & Z=0, i.e. if no match and this isn't last entry
sl@0
    77
	asm("0: ");
sl@0
    78
	asm("movne r0, #0 ");				// if no match set r0=0
sl@0
    79
	asm("sub r0, r0, #1 ");				// r0 was index+1, return index
sl@0
    80
	__JUMP(,lr);
sl@0
    81
	}
sl@0
    82
sl@0
    83
EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const
sl@0
    84
	{
sl@0
    85
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
    86
	__EH_FRAME_PUSH2(r4-r8,lr)
sl@0
    87
	asm("ldmia r0, {r4,r5} ");			// r4=iCount, r5=iEntries
sl@0
    88
	asm("mvn r6, #0 ");
sl@0
    89
	asm("mov r7, r1 ");
sl@0
    90
	asm("mov r8, r2 ");
sl@0
    91
	asm("subs r4, r4, #1 ");			// r4=iCount-1
sl@0
    92
	asm("bmi ptr_find2_return ");		// if count=0, return -1
sl@0
    93
	asm("ptr_find2_loop: ");
sl@0
    94
	asm("ldr r1, [r5], #4 ");			// r1=pointer to entry r6
sl@0
    95
	asm("add r6, r6, #1 ");
sl@0
    96
	asm("mov r0, r7 ");					// r0=anEntry
sl@0
    97
	__JUMPL(8);
sl@0
    98
	asm("cmp r0, #0 ");
sl@0
    99
	asm("bne ptr_find2_return ");		// if equal, return r6
sl@0
   100
	asm("cmp r6, r4 ");
sl@0
   101
	asm("blt ptr_find2_loop ");
sl@0
   102
	asm("mvn r6, #0 ");
sl@0
   103
	asm("ptr_find2_return: ");			// return r6
sl@0
   104
	asm("mov r0, r6 ");
sl@0
   105
	__POPRET("r4-r8,");
sl@0
   106
	}
sl@0
   107
sl@0
   108
EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/) const
sl@0
   109
	{
sl@0
   110
	asm("mov r3, #0 ");
sl@0
   111
	// fall through
sl@0
   112
	}
sl@0
   113
sl@0
   114
EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const
sl@0
   115
	{
sl@0
   116
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   117
	__EH_FRAME_PUSH2(r4-r6,lr)
sl@0
   118
	asm("mov r6, r2 ");					// r6=&anIndex
sl@0
   119
	asm("ldmia r0, {r2,r4} ");			// r2=count, r4=iEntries
sl@0
   120
	asm("bl BinarySearchSigned ");
sl@0
   121
	asm("str r2, [r6] ");				// store index
sl@0
   122
	__POPRET("r4-r6,");
sl@0
   123
sl@0
   124
// Binary search list of signed integers
sl@0
   125
// Match value in r1
sl@0
   126
// List address in r4
sl@0
   127
// Count in r2
sl@0
   128
// Match mode in r3
sl@0
   129
// Return with: r0=0 if match found, r0=-1 otherwise
sl@0
   130
//				Z flag set if match found, clear if not
sl@0
   131
//				r2=index of match or next higher
sl@0
   132
// r5 modified
sl@0
   133
	asm("BinarySearchSigned: ");
sl@0
   134
#ifdef _DEBUG
sl@0
   135
	asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   136
	asm("bhs PanicBadArrayFindMode ");
sl@0
   137
#endif
sl@0
   138
	asm("mov r3, r3, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   139
	asm("orr r3, r3, #1 ");				// set NOT FOUND flag
sl@0
   140
	asm("cmp r2, #0 ");					// r2 will be right index
sl@0
   141
	asm("beq 0f ");
sl@0
   142
	asm("mov r5, #0 ");					// r5 = left index
sl@0
   143
	asm("1: ");
sl@0
   144
	asm("add r12, r2, r5 ");
sl@0
   145
	asm("mov r12, r12, lsr #1 ");		// r12 = mid index
sl@0
   146
	asm("ldr r0, [r4, r12, lsl #2] ");	// r0 = entry[mid]
sl@0
   147
	asm("subs r0, r0, r1 ");			// r0 = entry[mid] - match
sl@0
   148
	asm("beq 2f ");						// if match branch out
sl@0
   149
	asm("3: ");
sl@0
   150
	asm("addlt r5, r12, #1 ");			// else if entry<match left=mid+1
sl@0
   151
	asm("movgt r2, r12 ");				// else if entry>match right=mid
sl@0
   152
	asm("subs r0, r2, r5 ");			// right > left ?
sl@0
   153
	asm("bgt 1b ");						// r0 = 0 when loop terminates
sl@0
   154
	asm("0: ");
sl@0
   155
	asm("tst r3, #1 ");					// test not found flag
sl@0
   156
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   157
	__JUMP(,lr);
sl@0
   158
	asm("2: ");
sl@0
   159
	asm("bics r3, r3, #1 ");			// clear NOT FOUND flag, test for find mode ANY (Z set if so)
sl@0
   160
	asm("bne 3b ");						// if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT
sl@0
   161
	asm("mov r2, r12 ");				// if so, r2 = mid
sl@0
   162
	__JUMP(,lr);						// and return with r0 = 0
sl@0
   163
	}
sl@0
   164
sl@0
   165
EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/) const
sl@0
   166
	{
sl@0
   167
	asm("mov r3, #0 ");
sl@0
   168
	// fall through
sl@0
   169
	}
sl@0
   170
sl@0
   171
EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const
sl@0
   172
	{
sl@0
   173
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   174
	__EH_FRAME_PUSH2(r4-r6,lr)
sl@0
   175
	asm("mov r6, r2 ");					// r6=&anIndex
sl@0
   176
	asm("ldmia r0, {r2,r4} ");			// r2=count, r4=iEntries
sl@0
   177
	asm("bl BinarySearchUnsigned ");
sl@0
   178
	asm("str r2, [r6] ");				// store index
sl@0
   179
	__POPRET("r4-r6,");
sl@0
   180
sl@0
   181
// Binary search list of unsigned integers
sl@0
   182
// Match value in r1
sl@0
   183
// List address in r4
sl@0
   184
// Count in r2
sl@0
   185
// Match mode in r3
sl@0
   186
// Return with: r0=0 if match found, r0=-1 otherwise
sl@0
   187
//				Z flag set if match found, clear if not
sl@0
   188
//				r2=index of match or next higher
sl@0
   189
// r5 modified
sl@0
   190
	asm("BinarySearchUnsigned: ");
sl@0
   191
#ifdef _DEBUG
sl@0
   192
	asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   193
	asm("bhs PanicBadArrayFindMode ");
sl@0
   194
#endif
sl@0
   195
	asm("mov r3, r3, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   196
	asm("orr r3, r3, #1 ");				// set NOT FOUND flag
sl@0
   197
	asm("cmp r2, #0 ");					// r2 will be right index
sl@0
   198
	asm("beq 0f ");
sl@0
   199
	asm("mov r5, #0 ");					// r5 = left index
sl@0
   200
	asm("1: ");
sl@0
   201
	asm("add r12, r2, r5 ");
sl@0
   202
	asm("mov r12, r12, lsr #1 ");		// r12 = mid index
sl@0
   203
	asm("ldr r0, [r4, r12, lsl #2] ");	// r0 = entry[mid]
sl@0
   204
	asm("subs r0, r1, r0 ");			// r0 = match - entry[mid]
sl@0
   205
	asm("beq 2f ");						// if match branch out
sl@0
   206
	asm("3: ");
sl@0
   207
	asm("addhi r5, r12, #1 ");			// else if entry<match left=mid+1	HI = C &~ Z
sl@0
   208
	asm("movlo r2, r12 ");				// else if entry>match right=mid	LO = ~C
sl@0
   209
	asm("subs r0, r2, r5 ");			// right > left ?
sl@0
   210
	asm("bgt 1b ");						// r0 = 0 when loop terminates
sl@0
   211
	asm("0: ");
sl@0
   212
	asm("tst r3, #1 ");					// test not found flag
sl@0
   213
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   214
	__JUMP(,lr);
sl@0
   215
	asm("2: ");							// N=0 Z=1 C=1 V=0 r0=0 here
sl@0
   216
	asm("bics r3, r3, #1 ");			// clear NOT FOUND flag, test for find mode ANY (Z set if so)
sl@0
   217
	asm("cmpne r3, #0x60000000 ");		// HI if LAST, LO if FIRST
sl@0
   218
	asm("bne 3b ");						// if not ANY, branch back
sl@0
   219
	asm("mov r2, r12 ");				// if ANY, r2 = mid
sl@0
   220
	__JUMP(,lr);						// and return with r0 = 0
sl@0
   221
	}
sl@0
   222
sl@0
   223
EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const
sl@0
   224
	{
sl@0
   225
	asm("stmfd sp!, {r2,r4-r11,lr} ");	// store &anIndex, r4-r11, lr
sl@0
   226
	__EH_FRAME_ADDRESS(sp,4)
sl@0
   227
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
   228
	asm("ldmia r0, {r5,r6} ");			// r5=count, r6=iEntries
sl@0
   229
	asm("ldr r11, [sp, #40] ");			// r11 = aMode
sl@0
   230
	asm("mov r7, r3 ");					// r7=anOrder
sl@0
   231
	asm("mov r4, r1 ");					// r1=anEntry
sl@0
   232
	asm("bl BinarySearchPointers ");	// r0=KErrNone if match, KErrNotFound if not
sl@0
   233
	asm("ldmfd sp!, {r2,r4} ");			// r2=&anIndex, restore r4
sl@0
   234
	// Dont need to FRAME RESTORE here since there's no barrier here
sl@0
   235
	asm("str r5, [r2] ");				// store index
sl@0
   236
	__POPRET("r5-r11,");				// restore r5-r11 and return
sl@0
   237
sl@0
   238
// Binary search list of pointers
sl@0
   239
// Pointer to match value in r4
sl@0
   240
// List address in r6
sl@0
   241
// Count in r5
sl@0
   242
// Pointer to ordering function in r7
sl@0
   243
// r11 = find mode
sl@0
   244
// Return with: r0=0 if match found, r0=-1 otherwise
sl@0
   245
//				Z flag set if match found, clear otherwise
sl@0
   246
//				r5=index of match or next higher
sl@0
   247
// r9,r10,r11 modified
sl@0
   248
	asm("BinarySearchPointers: ");
sl@0
   249
#ifdef _DEBUG
sl@0
   250
	asm("cmp r11, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   251
	asm("bhs PanicBadArrayFindMode ");
sl@0
   252
#endif
sl@0
   253
	asm("movs r11, r11, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   254
	asm("eorne r11, r11, #0xC0000000 ");	// match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last)
sl@0
   255
	asm("orr r11, r11, #1 ");			// set NOT FOUND flag
sl@0
   256
	asm("mov r9, lr ");
sl@0
   257
	asm("cmp r5, #0 ");					// r5 will be right index
sl@0
   258
	asm("beq 0f ");
sl@0
   259
	asm("mov r10, #0 ");				// r10 = left index
sl@0
   260
	asm("1: ");
sl@0
   261
	asm("add r8, r5, r10 ");
sl@0
   262
	asm("mov r8, r8, lsr #1 ");			// r8 = mid index
sl@0
   263
sl@0
   264
/** the latency of the indirect call should mask the latency of the ldr
sl@0
   265
	arm1136 requires base register to be valid one cycle early
sl@0
   266
*/
sl@0
   267
	asm("mov r0, r4 ");					// r0 points to match value
sl@0
   268
	asm("ldr r1, [r6, r8, lsl #2] ");	// r1 points to entry[mid]
sl@0
   269
	__JUMPL(7);							// call ordering function (match, entry)
sl@0
   270
	asm("cmp r0, #0 ");
sl@0
   271
	asm("biceq r11, r11, #1 ");			// if match clear NOT FOUND flag
sl@0
   272
	asm("addeqs r0, r0, r11 ");			// and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY)
sl@0
   273
	asm("beq 2f ");						// branch out if match and ANY
sl@0
   274
	asm("addgt r10, r8, #1 ");			// else if match > entry, left = mid + 1
sl@0
   275
	asm("movlt r5, r8 ");				// else if match < entry, right = mid
sl@0
   276
	asm("subs r0, r5, r10 ");			// loop if right > left
sl@0
   277
	asm("bgt 1b ");						// finish loop with r0 = 0
sl@0
   278
	asm("0: ");
sl@0
   279
	asm("tst r11, #1 ");				// test not found flag
sl@0
   280
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   281
	__JUMP(,r9);
sl@0
   282
	asm("2: ");
sl@0
   283
	asm("mov r5, r8 ");					// if ANY, r8 = mid
sl@0
   284
	__JUMP(,r9);						// and return with r0 = 0, Z=1
sl@0
   285
	}
sl@0
   286
sl@0
   287
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/) const
sl@0
   288
	{
sl@0
   289
	asm("mov r2, #0 ");
sl@0
   290
	// fall through
sl@0
   291
	}
sl@0
   292
sl@0
   293
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/, TInt /*aMode*/) const
sl@0
   294
	{
sl@0
   295
#ifdef __EABI__
sl@0
   296
	// sp needs correct alignment
sl@0
   297
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   298
	__EH_FRAME_PUSH2(r4-r6,lr)
sl@0
   299
#else
sl@0
   300
	asm("stmfd sp!, {r4,r5,lr} ");
sl@0
   301
#endif
sl@0
   302
	asm("mov r3, r2 ");					// r3 = match mode
sl@0
   303
	asm("ldmia r0, {r2,r4} ");			// r2=count, r4=iEntries
sl@0
   304
	asm("bl BinarySearchSigned ");
sl@0
   305
	asm("moveq r0, r2 ");				// if match r0=match index; if no match, r0=KErrNotFound
sl@0
   306
#ifdef __EABI__
sl@0
   307
	__POPRET("r4-r6,");
sl@0
   308
#else
sl@0
   309
	__POPRET("r4,r5,");
sl@0
   310
#endif
sl@0
   311
	}
sl@0
   312
sl@0
   313
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/) const
sl@0
   314
	{
sl@0
   315
	asm("mov r2, #0 ");
sl@0
   316
	// fall through
sl@0
   317
	}
sl@0
   318
sl@0
   319
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/, TInt /*aMode*/) const
sl@0
   320
	{
sl@0
   321
#ifdef __EABI__
sl@0
   322
	// sp needs correct alignment
sl@0
   323
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   324
	__EH_FRAME_PUSH2(r4-r6,lr)
sl@0
   325
#else
sl@0
   326
	asm("stmfd sp!, {r4,r5,lr} ");
sl@0
   327
#endif
sl@0
   328
	asm("mov r3, r2 ");					// r3 = match mode
sl@0
   329
	asm("ldmia r0, {r2,r4} ");			// r2=count, r4=iEntries
sl@0
   330
	asm("bl BinarySearchUnsigned ");
sl@0
   331
	asm("moveq r0, r2 ");				// if match r0=match index; if no match, r0=KErrNotFound
sl@0
   332
#ifdef __EABI__
sl@0
   333
	__POPRET("r4-r6,");
sl@0
   334
#else
sl@0
   335
	__POPRET("r4,r5,");
sl@0
   336
#endif
sl@0
   337
	}
sl@0
   338
sl@0
   339
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const
sl@0
   340
	{
sl@0
   341
	asm("mov r3, #0 ");
sl@0
   342
	// fall through
sl@0
   343
	}
sl@0
   344
sl@0
   345
EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const
sl@0
   346
	{
sl@0
   347
sl@0
   348
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
   349
	__EH_FRAME_PUSH2(r4-r6,lr)
sl@0
   350
	asm("ldmia r0, {r5,r6} ");			// r5=count, r6=iEntries
sl@0
   351
	asm("mov r11, r3 ");				// r11 = aMode
sl@0
   352
	asm("mov r7, r2 ");					// r7=anOrder
sl@0
   353
	asm("mov r4, r1 ");					// r1=anEntry
sl@0
   354
	asm("bl BinarySearchPointers ");
sl@0
   355
	asm("moveq r0, r5 ");				// if match, r0=match index
sl@0
   356
	__POPRET("r3-r11,");
sl@0
   357
	}
sl@0
   358
sl@0
   359
#ifndef __KERNEL_MODE__
sl@0
   360
EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortSigned()
sl@0
   361
	{
sl@0
   362
#ifdef __EABI__
sl@0
   363
	asm("stmfd sp!, {r4-r10,lr} ");
sl@0
   364
	__EH_FRAME_PUSH2(r4-r10,lr)
sl@0
   365
#else
sl@0
   366
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
   367
#endif
sl@0
   368
	asm("ldmia r0, {r4,r5} ");			// r4=iCount, r5=iEntries
sl@0
   369
	asm("bl HeapSortSigned ");
sl@0
   370
#ifdef __EABI__
sl@0
   371
	__POPRET("r4-r10,");
sl@0
   372
#else
sl@0
   373
	__POPRET("r4-r9,");
sl@0
   374
#endif
sl@0
   375
	// Heap sort list of signed integers
sl@0
   376
	// List address in r5, count in r4
sl@0
   377
	// r4=ss, r6=sh, r7=si
sl@0
   378
	// r8,r9 modified
sl@0
   379
	asm("HeapSortSigned: ");
sl@0
   380
	asm("cmp r4, #1 ");
sl@0
   381
	__JUMP(le,lr);
sl@0
   382
	asm("mov r6, r4, lsr #1 ");
sl@0
   383
	asm("hss_loop_start1: ");
sl@0
   384
	asm("sub r6, r6, #1 ");
sl@0
   385
	asm("ldr r7, [r5, r6, lsl #2] ");
sl@0
   386
	asm("mov r8, r6 ");
sl@0
   387
	asm("mov r9, r6 ");
sl@0
   388
	asm("b hss_loop_start2 ");
sl@0
   389
	asm("hss_loop_inner: ");
sl@0
   390
	asm("ldr r0, [r5, r8, lsl #2] ");
sl@0
   391
	asm("str r0, [r5, r9, lsl #2] ");
sl@0
   392
	asm("mov r9, r8 ");
sl@0
   393
	asm("hss_loop_start2: ");
sl@0
   394
	asm("add r8, r8, #1 ");
sl@0
   395
	asm("add r8, r8, r8 ");
sl@0
   396
	asm("cmp r8, r4 ");
sl@0
   397
	asm("bgt hss_loop_inner_end ");
sl@0
   398
	asm("add r0, r5, r8, lsl #2 ");
sl@0
   399
	asm("ldmneda r0, {r1,r2} ");
sl@0
   400
	asm("ldreq r1, [r0, #-4] ");
sl@0
   401
	asm("subeq r8, r8, #1 ");
sl@0
   402
	asm("beq hss_loop_inner2 ");
sl@0
   403
	asm("cmp r1, r2 ");
sl@0
   404
	asm("subgt r8, r8, #1 ");
sl@0
   405
	asm("movle r1, r2 ");
sl@0
   406
	asm("hss_loop_inner2: ");
sl@0
   407
	asm("cmp r1, r7 ");
sl@0
   408
	asm("bgt hss_loop_inner ");
sl@0
   409
	asm("hss_loop_inner_end: ");
sl@0
   410
	asm("str r7, [r5, r9, lsl #2] ");
sl@0
   411
	asm("cmp r6, #0 ");
sl@0
   412
	asm("bne hss_loop_start1 ");
sl@0
   413
	asm("sub r4, r4, #1 ");
sl@0
   414
	asm("ldr r7, [r5, r4, lsl #2] ");
sl@0
   415
	asm("ldr r0, [r5, #0] ");
sl@0
   416
	asm("str r0, [r5, r4, lsl #2] ");
sl@0
   417
	asm("cmp r4, #1 ");
sl@0
   418
	asm("mov r8, r6 ");
sl@0
   419
	asm("mov r9, r6 ");
sl@0
   420
	asm("bgt hss_loop_start2 ");
sl@0
   421
	asm("str r7, [r5, #0] ");
sl@0
   422
	__JUMP(,lr);
sl@0
   423
	}
sl@0
   424
sl@0
   425
EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortUnsigned()
sl@0
   426
	{
sl@0
   427
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
   428
	asm("ldmia r0, {r4,r5} ");			// r4=iCount, r5=iEntries
sl@0
   429
	asm("bl HeapSortUnsigned ");
sl@0
   430
	__POPRET("r4-r9,");
sl@0
   431
	}
sl@0
   432
#endif  // !__KERNEL_MODE__
sl@0
   433
sl@0
   434
__NAKED__ void HeapSortUnsigned(TUint* aEntries,TInt aCount)
sl@0
   435
	{
sl@0
   436
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
   437
	asm("mov r4,r1");			// r4=iCount
sl@0
   438
	asm("mov r5,r0");			// r5=iEntries
sl@0
   439
	asm("bl HeapSortUnsigned ");
sl@0
   440
	__POPRET("r4-r9,");
sl@0
   441
sl@0
   442
	// Heap sort list of unsigned integers
sl@0
   443
	// List address in r5, count in r4
sl@0
   444
	// r4=ss, r6=sh, r7=si
sl@0
   445
	// r8,r9 modified
sl@0
   446
	asm("HeapSortUnsigned: ");
sl@0
   447
	asm("cmp r4, #1 ");
sl@0
   448
	__JUMP(le,lr);
sl@0
   449
	asm("mov r6, r4, lsr #1 ");
sl@0
   450
	asm("hsu_loop_start1: ");
sl@0
   451
	asm("sub r6, r6, #1 ");
sl@0
   452
	asm("ldr r7, [r5, r6, lsl #2] ");
sl@0
   453
	asm("mov r8, r6 ");
sl@0
   454
	asm("mov r9, r6 ");
sl@0
   455
	asm("b hsu_loop_start2 ");
sl@0
   456
	asm("hsu_loop_inner: ");
sl@0
   457
	asm("ldr r0, [r5, r8, lsl #2] ");
sl@0
   458
	asm("str r0, [r5, r9, lsl #2] ");
sl@0
   459
	asm("mov r9, r8 ");
sl@0
   460
	asm("hsu_loop_start2: ");
sl@0
   461
	asm("add r8, r8, #1 ");
sl@0
   462
	asm("add r8, r8, r8 ");
sl@0
   463
	asm("cmp r8, r4 ");
sl@0
   464
	asm("bgt hsu_loop_inner_end ");
sl@0
   465
	asm("add r0, r5, r8, lsl #2 ");
sl@0
   466
	asm("ldmneda r0, {r1,r2} ");
sl@0
   467
	asm("ldreq r1, [r0, #-4] ");
sl@0
   468
	asm("subeq r8, r8, #1 ");
sl@0
   469
	asm("beq hsu_loop_inner2 ");
sl@0
   470
	asm("cmp r1, r2 ");
sl@0
   471
	asm("subhi r8, r8, #1 ");
sl@0
   472
	asm("movls r1, r2 ");
sl@0
   473
	asm("hsu_loop_inner2: ");
sl@0
   474
	asm("cmp r1, r7 ");
sl@0
   475
	asm("bhi hsu_loop_inner ");
sl@0
   476
	asm("hsu_loop_inner_end: ");
sl@0
   477
	asm("str r7, [r5, r9, lsl #2] ");
sl@0
   478
	asm("cmp r6, #0 ");
sl@0
   479
	asm("bne hsu_loop_start1 ");
sl@0
   480
	asm("sub r4, r4, #1 ");
sl@0
   481
	asm("ldr r7, [r5, r4, lsl #2] ");
sl@0
   482
	asm("ldr r0, [r5, #0] ");
sl@0
   483
	asm("str r0, [r5, r4, lsl #2] ");
sl@0
   484
	asm("cmp r4, #1 ");
sl@0
   485
	asm("mov r8, r6 ");
sl@0
   486
	asm("mov r9, r6 ");
sl@0
   487
	asm("bgt hsu_loop_start2 ");
sl@0
   488
	asm("str r7, [r5, #0] ");
sl@0
   489
	__JUMP(,lr);
sl@0
   490
	}
sl@0
   491
sl@0
   492
#ifndef __KERNEL_MODE__
sl@0
   493
EXPORT_C __NAKED__ void RPointerArrayBase::HeapSort(TGeneralLinearOrder /*anOrder*/)
sl@0
   494
	{
sl@0
   495
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
   496
	// r3 is caller save
sl@0
   497
	__EH_FRAME_ADDRESS(sp,4)
sl@0
   498
	// we can push the callee save regs
sl@0
   499
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
   500
	asm("ldmia r0, {r4,r5} ");			// r4=iCount, r5=iEntries
sl@0
   501
	asm("mov r10, r1 ");				// r10=anOrder
sl@0
   502
	asm("bl HeapSortPointers ");
sl@0
   503
	__POPRET("r3-r11,");
sl@0
   504
sl@0
   505
	// Heap sort list of pointers
sl@0
   506
	// List address in r5, count in r4, r10 points to ordering function
sl@0
   507
	// r4=ss, r6=sh, r7=si
sl@0
   508
	// r8,r9,r11 modified
sl@0
   509
	asm("HeapSortPointers: ");
sl@0
   510
	asm("cmp r4, #1 ");
sl@0
   511
	__JUMP(le,lr);
sl@0
   512
	asm("mov r11, lr ");
sl@0
   513
	asm("mov r6, r4, lsr #1 ");
sl@0
   514
	asm("hsp_loop_start1: ");
sl@0
   515
	asm("sub r6, r6, #1 ");
sl@0
   516
	asm("ldr r7, [r5, r6, lsl #2] ");
sl@0
   517
	asm("mov r8, r6 ");
sl@0
   518
	asm("mov r9, r6 ");
sl@0
   519
	asm("b hsp_loop_start2 ");
sl@0
   520
	asm("hsp_loop_inner: ");
sl@0
   521
	asm("ldr r0, [r5, r8, lsl #2] ");
sl@0
   522
	asm("str r0, [r5, r9, lsl #2] ");
sl@0
   523
	asm("mov r9, r8 ");
sl@0
   524
	asm("hsp_loop_start2: ");
sl@0
   525
	asm("add r8, r8, #1 ");
sl@0
   526
	asm("add r8, r8, r8 ");
sl@0
   527
	asm("cmp r8, r4 ");
sl@0
   528
	asm("bgt hsp_loop_inner_end ");
sl@0
   529
	asm("subeq r8, r8, #1 ");
sl@0
   530
	asm("beq hsp_loop_inner2 ");
sl@0
   531
	asm("add r0, r5, r8, lsl #2 ");
sl@0
   532
	asm("ldmda r0, {r0,r1} ");
sl@0
   533
	__JUMPL(10);
sl@0
   534
	asm("cmp r0, #0 ");
sl@0
   535
	asm("subgt r8, r8, #1 ");
sl@0
   536
	asm("hsp_loop_inner2: ");
sl@0
   537
	asm("ldr r0, [r5, r8, lsl #2] ");
sl@0
   538
	asm("mov r1, r7 ");
sl@0
   539
	__JUMPL(10);
sl@0
   540
	asm("cmp r0, #0 ");
sl@0
   541
	asm("bgt hsp_loop_inner ");
sl@0
   542
	asm("hsp_loop_inner_end: ");
sl@0
   543
	asm("str r7, [r5, r9, lsl #2] ");
sl@0
   544
	asm("cmp r6, #0 ");
sl@0
   545
	asm("bne hsp_loop_start1 ");
sl@0
   546
	asm("sub r4, r4, #1 ");
sl@0
   547
	asm("ldr r7, [r5, r4, lsl #2] ");
sl@0
   548
	asm("ldr r0, [r5, #0] ");
sl@0
   549
	asm("str r0, [r5, r4, lsl #2] ");
sl@0
   550
	asm("cmp r4, #1 ");
sl@0
   551
	asm("mov r8, r6 ");
sl@0
   552
	asm("mov r9, r6 ");
sl@0
   553
	asm("bgt hsp_loop_start2 ");
sl@0
   554
	asm("str r7, [r5, #0] ");
sl@0
   555
	__JUMP(,r11);
sl@0
   556
	}
sl@0
   557
#endif	// __KERNEL_MODE__
sl@0
   558
sl@0
   559
EXPORT_C __NAKED__ TAny* RArrayBase::At(TInt /*anIndex*/) const
sl@0
   560
	{
sl@0
   561
	asm("ldmia r0, {r2,r3,r12} ");		// r2=iCount, r3=iEntries, r12=iEntrySize
sl@0
   562
	asm("cmp r1, #0 ");					// check anIndex>=0
sl@0
   563
	asm("cmpge r2, r1 ");				// if so, check iCount>anIndex
sl@0
   564
	asm("mlagt r0, r1, r12, r3 ");		// if ok, r0=anIndex*iEntrySize+iEntries
sl@0
   565
#ifdef __CPU_ARMV6
sl@0
   566
	asm("ble 1f ");
sl@0
   567
	__JUMP(,lr);
sl@0
   568
#else
sl@0
   569
	__JUMP(gt,lr);
sl@0
   570
#endif
sl@0
   571
	asm("1: ");
sl@0
   572
	asm("b  " CSM_Z18PanicBadArrayIndexv);
sl@0
   573
	}
sl@0
   574
sl@0
   575
EXPORT_C __NAKED__ TInt RArrayBase::Append(const TAny* /*anEntry*/)
sl@0
   576
	{
sl@0
   577
	asm("stmfd sp!, {lr} ");
sl@0
   578
	asm("ldmia r0, {r3,r12} ");			// r3=iCount, r12=iEntries
sl@0
   579
	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iAllocated));
sl@0
   580
	asm("cmp r3, r2 ");
sl@0
   581
	asm("beq simple_append_1 ");
sl@0
   582
	asm("simple_append_0: ");
sl@0
   583
	asm("add r2, r3, #1 ");
sl@0
   584
	asm("str r2, [r0] ");				// iCount++
sl@0
   585
	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iEntrySize));
sl@0
   586
	asm("mla r0, r2, r3, r12 ");		// r0=iEntries+iEntrySize*iCount
sl@0
   587
	asm("bl wordmove ");				// r1=anEntry, r2=iEntrySize, do copy
sl@0
   588
	asm("mov r0, #0 ");					// return KErrNone;
sl@0
   589
	__POPRET("");
sl@0
   590
sl@0
   591
	asm("simple_append_1: ");
sl@0
   592
	asm("stmfd sp!, {r0,r1,r2} ");
sl@0
   593
	asm("bl  " CSM_ZN10RArrayBase4GrowEv);
sl@0
   594
	asm("cmp r0, #0 ");
sl@0
   595
	asm("bne simple_append_2 ");
sl@0
   596
	asm("ldmfd sp!, {r0,r1,r2} ");
sl@0
   597
	asm("ldmia r0, {r3, r12} ");
sl@0
   598
	asm("b simple_append_0 ");
sl@0
   599
	asm("simple_append_2: ");			// error enlarging array
sl@0
   600
	asm("add sp, sp, #12 ");
sl@0
   601
	__POPRET("");
sl@0
   602
	}
sl@0
   603
sl@0
   604
EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/) const
sl@0
   605
	{
sl@0
   606
	asm("ldmia r0, {r0,r2,r3,r12} ");	// r0=count, r2=iEntries, r3=iEntrySize, r12=iKeyOffset
sl@0
   607
	asm("stmfd sp!, {r4,lr} ");			// save r4,lr
sl@0
   608
	asm("subs r0, r0, #1 ");			// r0 = count-1
sl@0
   609
	asm("blt 0f ");						// skip if count was zero
sl@0
   610
	asm("ldr r1, [r1, r12] ");			// r1=key of match entry
sl@0
   611
	asm("sub r4, r0, #1 ");				// r4 = iCount-2
sl@0
   612
	asm("1: ");
sl@0
   613
	asm("ldr lr, [r2, r12] ");			// lr=key of current entry
sl@0
   614
	asm("add r2, r2, r3 ");				// step r2 to next entry
sl@0
   615
	asm("subs r0, r0, #1 ");			// C=1 iff this isn't last entry
sl@0
   616
	asm("teq lr, r1 ");					// check for match - C unaffected
sl@0
   617
	asm("bhi 1b ");						// loop if C=1 & Z=0, i.e. if no match and this isn't last entry
sl@0
   618
	asm("0: ");
sl@0
   619
	asm("mvnne r0, #0 ");				// if no match, return -1
sl@0
   620
	asm("subeq r0, r4, r0 ");			// if match, index = (iCount-2)-r0
sl@0
   621
	__POPRET("r4,");
sl@0
   622
	}
sl@0
   623
sl@0
   624
EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const
sl@0
   625
	{
sl@0
   626
	asm("stmfd sp!, {r4-r10,lr} ");		// save r4-r10,lr
sl@0
   627
	__EH_FRAME_PUSH2(r4-r10,lr)
sl@0
   628
	asm("ldmia r0, {r4,r5,r6} ");		// r4=count, r5=iEntries, r6=iEntrySize
sl@0
   629
	asm("mov r8, r1 ");					// r8=anEntry
sl@0
   630
	asm("mov r9, r2 ");					// r9=anIdentity
sl@0
   631
	asm("sub r7, r4, #1 ");				// r7=iCount-1
sl@0
   632
	asm("b simple_find2_start ");
sl@0
   633
	asm("simple_find2_loop: ");
sl@0
   634
	asm("mov r1, r5 ");					// r1->current entry
sl@0
   635
	asm("mov r0, r8 ");					// r0=anEntry
sl@0
   636
	__JUMPL(9);
sl@0
   637
	asm("cmp r0, #0 ");
sl@0
   638
	asm("bne simple_find2_return ");
sl@0
   639
	asm("add r5, r5, r6 ");				// else step to next entry
sl@0
   640
	asm("simple_find2_start: ");
sl@0
   641
	asm("subs r4, r4, #1 ");
sl@0
   642
	asm("bpl simple_find2_loop ");
sl@0
   643
	asm("add r4, r7, #1 ");				// no match, arrange to return -1
sl@0
   644
	asm("simple_find2_return: ");
sl@0
   645
	asm("sub r0, r7, r4 ");				// index=count-r4
sl@0
   646
	__POPRET("r4-r10,");
sl@0
   647
	}
sl@0
   648
sl@0
   649
EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const
sl@0
   650
	{
sl@0
   651
	asm("mov r3, #0 ");
sl@0
   652
	// fall through
sl@0
   653
	}
sl@0
   654
sl@0
   655
EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const
sl@0
   656
	{
sl@0
   657
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
   658
	__EH_FRAME_PUSH2(r4-r8,lr)
sl@0
   659
	asm("mov r8, r2 ");					// r8=&anIndex
sl@0
   660
	asm("ldmia r0, {r2,r4,r5,r6} ");	// r2=count, r3=iEntries, r5=entry size, r6=key offset
sl@0
   661
	asm("cmp r5, #4 ");					// check for 4 byte entries
sl@0
   662
	asm("ldr r1, [r1, r6] ");			// r1=match key
sl@0
   663
	asm("beq 1f ");						// if 4 byte entries, call simpler routine
sl@0
   664
	asm("bl BinarySearchSignedKey ");	// if not, call general routine
sl@0
   665
	asm("b 2f ");
sl@0
   666
	asm("1: ");
sl@0
   667
	asm("bl BinarySearchSigned ");		// use simpler routine for 4 byte entries
sl@0
   668
	asm("2: ");
sl@0
   669
	asm("str r2, [r8] ");
sl@0
   670
	__POPRET("r4-r8,");
sl@0
   671
sl@0
   672
// Binary search list of signed integers
sl@0
   673
// Match key in r1
sl@0
   674
// List address in r4
sl@0
   675
// Count in r2
sl@0
   676
// Match mode in r3
sl@0
   677
// EntrySize in r5, KeyOffset in r6
sl@0
   678
// Return with: r0=0 if match found, r0 nonzero otherwise
sl@0
   679
//				r2=index of match or next higher
sl@0
   680
// r7 modified
sl@0
   681
	asm("BinarySearchSignedKey: ");
sl@0
   682
#ifdef _DEBUG
sl@0
   683
	asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   684
	asm("bhs PanicBadArrayFindMode ");
sl@0
   685
#endif
sl@0
   686
	asm("mov r3, r3, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   687
	asm("orr r3, r3, #1 ");				// set NOT FOUND flag
sl@0
   688
	asm("cmp r2, #0 ");					// r2 will be right index
sl@0
   689
	asm("beq 0f ");
sl@0
   690
	asm("mov r7, #0 ");					// r7 will be left index
sl@0
   691
	asm("1: ");
sl@0
   692
	asm("add r12, r2, r7 ");
sl@0
   693
	asm("mov r12, r12, lsr #1 ");		// r12 = mid index
sl@0
   694
	asm("mla r0, r12, r5, r6 ");		// r0 = key offset + entry size * mid index
sl@0
   695
	asm("ldr r0, [r4, r0] ");			// r0 = key[mid]
sl@0
   696
	asm("subs r0, r0, r1 ");			// r0 = entry[mid] - match
sl@0
   697
	asm("beq 2f ");						// if match branch out
sl@0
   698
	asm("3: ");
sl@0
   699
	asm("addlt r7, r12, #1 ");			// else if entry<match left=mid+1
sl@0
   700
	asm("movgt r2, r12 ");				// else if entry>match right=mid
sl@0
   701
	asm("subs r0, r2, r7 ");			// right > left ?
sl@0
   702
	asm("bgt 1b ");						// r0 = 0 when loop terminates
sl@0
   703
	asm("0: ");
sl@0
   704
	asm("tst r3, #1 ");					// test not found flag
sl@0
   705
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   706
	__JUMP(,lr);
sl@0
   707
	asm("2: ");
sl@0
   708
	asm("bics r3, r3, #1 ");			// clear NOT FOUND flag, test for find mode ANY (Z set if so)
sl@0
   709
	asm("bne 3b ");						// if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT
sl@0
   710
	asm("mov r2, r12 ");				// if so, r2 = mid
sl@0
   711
	__JUMP(,lr);						// and return with r0 = 0
sl@0
   712
	}
sl@0
   713
sl@0
   714
EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const
sl@0
   715
	{
sl@0
   716
	asm("mov r3, #0 ");
sl@0
   717
	// fall through
sl@0
   718
	}
sl@0
   719
sl@0
   720
EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const
sl@0
   721
	{
sl@0
   722
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
   723
	__EH_FRAME_PUSH2(r4-r8,lr)
sl@0
   724
	asm("mov r8, r2 ");					// r8=&anIndex
sl@0
   725
	asm("ldmia r0, {r2,r4,r5,r6} ");	// r2=count, r4=iEntries, r5=entry size, r6=key offset
sl@0
   726
	asm("cmp r5, #4 ");					// check for 4 byte entries
sl@0
   727
	asm("ldr r1, [r1, r6] ");			// r1=match key
sl@0
   728
	asm("beq 1f ");						// if 4 byte entries, call simpler routine
sl@0
   729
	asm("bl BinarySearchUnsignedKey ");	// if not, call general routine
sl@0
   730
	asm("b 2f ");
sl@0
   731
	asm("1: ");
sl@0
   732
	asm("bl BinarySearchUnsigned ");	// use simpler routine for 4 byte entries
sl@0
   733
	asm("2: ");
sl@0
   734
	asm("str r2, [r8] ");
sl@0
   735
	__POPRET("r4-r8,");
sl@0
   736
sl@0
   737
// Binary search list of unsigned integers
sl@0
   738
// Match key in r1
sl@0
   739
// List address in r4
sl@0
   740
// Count in r2
sl@0
   741
// Match mode in r3
sl@0
   742
// EntrySize in r5, KeyOffset in r6
sl@0
   743
// Return with: r0=0 if match found, r0 nonzero otherwise
sl@0
   744
//				r2=index of match or next higher
sl@0
   745
// r7 modified
sl@0
   746
	asm("BinarySearchUnsignedKey: ");
sl@0
   747
#ifdef _DEBUG
sl@0
   748
	asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   749
	asm("bhs PanicBadArrayFindMode ");
sl@0
   750
#endif
sl@0
   751
	asm("mov r3, r3, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   752
	asm("orr r3, r3, #1 ");				// set NOT FOUND flag
sl@0
   753
	asm("cmp r2, #0 ");					// r2 will be right index
sl@0
   754
	asm("beq 0f ");
sl@0
   755
	asm("mov r7, #0 ");					// r7 will be left index
sl@0
   756
	asm("1: ");
sl@0
   757
	asm("add r12, r2, r7 ");
sl@0
   758
	asm("mov r12, r12, lsr #1 ");		// r12 = mid index
sl@0
   759
	asm("mla r0, r12, r5, r6 ");		// r0 = key offset + entry size * mid index
sl@0
   760
	asm("ldr r0, [r4, r0] ");			// r0 = key[mid]
sl@0
   761
	asm("subs r0, r1, r0 ");			// r0 = match - entry[mid]
sl@0
   762
	asm("beq 2f ");						// if match branch out
sl@0
   763
	asm("3: ");
sl@0
   764
	asm("addhi r7, r12, #1 ");			// else if entry<match left=mid+1	HI = C &~ Z
sl@0
   765
	asm("movlo r2, r12 ");				// else if entry>match right=mid	LO = ~C
sl@0
   766
	asm("subs r0, r2, r7 ");			// right > left ?
sl@0
   767
	asm("bgt 1b ");						// r0 = 0 when loop terminates
sl@0
   768
	asm("0: ");
sl@0
   769
	asm("tst r3, #1 ");					// test not found flag
sl@0
   770
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   771
	__JUMP(,lr);
sl@0
   772
	asm("2: ");							// N=0 Z=1 C=1 V=0 r0=0 here
sl@0
   773
	asm("bics r3, r3, #1 ");			// clear NOT FOUND flag, test for find mode ANY (Z set if so)
sl@0
   774
	asm("cmpne r3, #0x60000000 ");		// HI if LAST, LO if FIRST
sl@0
   775
	asm("bne 3b ");						// if not ANY, branch back
sl@0
   776
	asm("mov r2, r12 ");				// if ANY, r2 = mid
sl@0
   777
	__JUMP(,lr);						// and return with r0 = 0
sl@0
   778
	}
sl@0
   779
sl@0
   780
EXPORT_C __NAKED__ TInt RArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const
sl@0
   781
	{
sl@0
   782
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
   783
	// r3 is caller save
sl@0
   784
	__EH_FRAME_ADDRESS(sp,4)
sl@0
   785
	// we can push the callee save regs
sl@0
   786
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
   787
	asm("ldmia r0, {r5,r6,r11} ");		// r5=count, r6=iEntries, r11=entry size
sl@0
   788
	asm("ldr r9, [sp, #40] ");			// r9 = aMode
sl@0
   789
	asm("mov r4, r1 ");					// r4=anEntry
sl@0
   790
	asm("mov r7, r3 ");					// r7=anOrder
sl@0
   791
	asm("bl BinarySearchEntries ");
sl@0
   792
	asm("str r5, [r2] ");				// store index
sl@0
   793
	__POPRET("r3-r11,");
sl@0
   794
sl@0
   795
	// Binary search list of general entries
sl@0
   796
	// Pointer to match value in r4
sl@0
   797
	// List address in r6
sl@0
   798
	// Count in r5
sl@0
   799
	// Match mode in r9
sl@0
   800
	// Pointer to ordering function in r7
sl@0
   801
	// Entry size in r11
sl@0
   802
	// Return with: r0=0 if match found, r0 nonzero otherwise
sl@0
   803
	//				r5=index of match or next higher
sl@0
   804
	// r9,r10 modified
sl@0
   805
	// r2 preserved
sl@0
   806
	asm("BinarySearchEntries: ");
sl@0
   807
#ifdef _DEBUG
sl@0
   808
	asm("cmp r9, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) );
sl@0
   809
	asm("bhs PanicBadArrayFindMode ");
sl@0
   810
#endif
sl@0
   811
	asm("stmfd sp!, {r2,lr} ");
sl@0
   812
	asm("movs r9, r9, lsl #30 ");		// match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last)
sl@0
   813
	asm("eorne r9, r9, #0xC0000000 ");	// match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last)
sl@0
   814
	asm("orr r9, r9, #1 ");				// set NOT FOUND flag
sl@0
   815
	asm("cmp r5, #0 ");					// r5 will be right index
sl@0
   816
	asm("beq 0f ");
sl@0
   817
	asm("mov r10, #0 ");				// r10 will be left index
sl@0
   818
	asm("1: ");
sl@0
   819
	asm("add r8, r5, r10 ");
sl@0
   820
	asm("mov r8, r8, lsr #1 ");			// r8 = mid index
sl@0
   821
	asm("mla r1, r8, r11, r6 ");		// r1 = r8*entry size + list address = &entry[mid]
sl@0
   822
	asm("mov r0, r4 ");					// r0 points to match value
sl@0
   823
	__JUMPL(7);							// call ordering function (match, entry)
sl@0
   824
	asm("cmp r0, #0 ");
sl@0
   825
	asm("biceq r9, r9, #1 ");			// if match clear NOT FOUND flag
sl@0
   826
	asm("addeqs r0, r0, r9 ");			// and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY)
sl@0
   827
	asm("beq 2f ");						// branch out if match and ANY
sl@0
   828
	asm("addgt r10, r8, #1 ");			// else if match > entry, left = mid + 1
sl@0
   829
	asm("movlt r5, r8 ");				// else if match < entry, right = mid
sl@0
   830
	asm("subs r0, r5, r10 ");			// loop if right > left
sl@0
   831
	asm("bgt 1b ");						// finish loop with r0 = 0
sl@0
   832
	asm("0: ");
sl@0
   833
	asm("tst r9, #1 ");					// test not found flag
sl@0
   834
	asm("mvnnes r0, #0 ");				// if set r0=-1 = KErrNotFound
sl@0
   835
	__POPRET("r2,");
sl@0
   836
	asm("2: ");
sl@0
   837
	asm("mov r5, r8 ");					// if ANY, r8 = mid
sl@0
   838
	__POPRET("r2,");					// and return with r0 = 0, Z=1
sl@0
   839
	}
sl@0
   840
sl@0
   841
EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/) const
sl@0
   842
	{
sl@0
   843
	asm("mov r2, #0 ");
sl@0
   844
	// fall through
sl@0
   845
	}
sl@0
   846
sl@0
   847
EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/, TInt /*aMode*/) const
sl@0
   848
	{
sl@0
   849
#ifdef __EABI__
sl@0
   850
	// sp needs to be aligned correctly
sl@0
   851
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
   852
	__EH_FRAME_PUSH2(r4-r8,lr)
sl@0
   853
#else
sl@0
   854
	asm("stmfd sp!, {r4-r7,lr} ");
sl@0
   855
#endif
sl@0
   856
	asm("mov r3, r2 ");					// r3 = match mode
sl@0
   857
	asm("ldmia r0, {r2,r4,r5,r6} ");	// r2=count, r4=iEntries, r5=entry size, r6=key offset
sl@0
   858
	asm("cmp r5, #4 ");					// check for 4 byte entries
sl@0
   859
	asm("ldr r1, [r1, r6] ");			// r1=match key
sl@0
   860
	asm("beq 1f ");						// use simpler routine for 4 byte entries
sl@0
   861
	asm("bl BinarySearchSignedKey ");	// else call general routine
sl@0
   862
	asm("b 2f ");
sl@0
   863
	asm("1: ");
sl@0
   864
	asm("bl BinarySearchSigned ");
sl@0
   865
	asm("2: ");
sl@0
   866
	asm("moveq r0, r2 ");				// if match r0=index else r0=KErrNotFound
sl@0
   867
#ifdef __EABI__
sl@0
   868
	__POPRET("r4-r8,");
sl@0
   869
#else
sl@0
   870
	__POPRET("r4-r7,");
sl@0
   871
#endif
sl@0
   872
	}
sl@0
   873
sl@0
   874
EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/) const
sl@0
   875
	{
sl@0
   876
	asm("mov r2, #0 ");
sl@0
   877
	// fall through
sl@0
   878
	}
sl@0
   879
sl@0
   880
EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/, TInt /*aMode*/) const
sl@0
   881
	{
sl@0
   882
#ifdef __EABI__
sl@0
   883
	// sp needs to be aligned correctly
sl@0
   884
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
   885
	__EH_FRAME_PUSH2(r4-r8,lr)
sl@0
   886
#else
sl@0
   887
	asm("stmfd sp!, {r4-r7,lr} ");
sl@0
   888
#endif
sl@0
   889
	asm("mov r3, r2 ");					// r3 = match mode
sl@0
   890
	asm("ldmia r0, {r2,r4,r5,r6} ");	// r2=count, r4=iEntries, r5=entry size, r6=key offset
sl@0
   891
	asm("cmp r5, #4 ");					// check for 4 byte entries
sl@0
   892
	asm("ldr r1, [r1, r6] ");			// r1=match key
sl@0
   893
	asm("beq 1f ");						// use simpler routine for 4 byte entries
sl@0
   894
	asm("bl BinarySearchUnsignedKey ");	// else call general routine
sl@0
   895
	asm("b 2f ");
sl@0
   896
	asm("1: ");
sl@0
   897
	asm("bl BinarySearchUnsigned ");
sl@0
   898
	asm("2: ");
sl@0
   899
	asm("moveq r0, r2 ");				// if match r0=index else r0=KErrNotFound
sl@0
   900
#ifdef __EABI__
sl@0
   901
	__POPRET("r4-r8,");
sl@0
   902
#else
sl@0
   903
	__POPRET("r4-r7,");
sl@0
   904
#endif
sl@0
   905
	}
sl@0
   906
sl@0
   907
EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const
sl@0
   908
	{
sl@0
   909
	asm("mov r3, #0 ");
sl@0
   910
	// fall through
sl@0
   911
	}
sl@0
   912
sl@0
   913
EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const
sl@0
   914
	{
sl@0
   915
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
   916
	// r3 is caller save
sl@0
   917
	__EH_FRAME_ADDRESS(sp,4)
sl@0
   918
	// we can push the callee save regs
sl@0
   919
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
   920
	asm("ldmia r0, {r5,r6,r11} ");		// r5=count, r6=iEntries, r11=entry size
sl@0
   921
	asm("mov r4, r1 ");					// r4=anEntry
sl@0
   922
	asm("mov r7, r2 ");					// r7=anOrder
sl@0
   923
	asm("mov r9, r3 ");					// r9 = aMode
sl@0
   924
	asm("bl BinarySearchEntries ");
sl@0
   925
	asm("moveq r0, r5 ");				// if match r0=index
sl@0
   926
	__POPRET("r3-r11,");
sl@0
   927
	}
sl@0
   928
sl@0
   929
#ifndef __KERNEL_MODE__
sl@0
   930
EXPORT_C __NAKED__ void RArrayBase::HeapSortSigned()
sl@0
   931
	{
sl@0
   932
#ifdef __EABI__
sl@0
   933
	// need sp aligned correctly
sl@0
   934
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
   935
	__EH_FRAME_ADDRESS(sp,4)
sl@0
   936
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
   937
#else
sl@0
   938
	asm("stmfd sp!, {r4-r11,lr} ");
sl@0
   939
#endif
sl@0
   940
	asm("ldmia r0, {r4,r5,r10,r11} ");	// r4=iCount, r5=iEntries, r10=entry size, r11=key offset
sl@0
   941
	asm("cmp r10, #4 ");
sl@0
   942
	asm("bleq HeapSortSigned ");
sl@0
   943
	asm("cmp r10, #4 ");
sl@0
   944
	asm("blne HeapSortSignedKey ");
sl@0
   945
#ifdef __EABI__
sl@0
   946
	__POPRET("r3-r11,");
sl@0
   947
#else
sl@0
   948
	__POPRET("r4-r11,");
sl@0
   949
#endif
sl@0
   950
sl@0
   951
	// Heap sort list of entries by signed key
sl@0
   952
	// List address in r5, count in r4, entry size in r10, key offset in r11
sl@0
   953
	// r4=ss, r6=sh
sl@0
   954
	// r8,r9 modified
sl@0
   955
	asm("HeapSortSignedKey: ");
sl@0
   956
	asm("cmp r4, #1 ");
sl@0
   957
	__JUMP(le,lr);
sl@0
   958
	asm("mov r7, lr ");					// save lr in r7
sl@0
   959
	asm("sub sp, sp, r10 ");			// get some temporary space on the stack
sl@0
   960
	asm("mov r6, r4, lsr #1 ");
sl@0
   961
	asm("hssk_loop_start1: ");
sl@0
   962
	asm("sub r6, r6, #1 ");
sl@0
   963
	asm("mla r1, r6, r10, r5 ");		// [sp]=entry[r6]
sl@0
   964
	asm("mov r0, sp ");
sl@0
   965
	asm("mov r2, r10 ");
sl@0
   966
	asm("bl wordmove ");
sl@0
   967
	asm("mov r8, r6 ");
sl@0
   968
	asm("mov r9, r6 ");
sl@0
   969
	asm("b hssk_loop_start2 ");
sl@0
   970
	asm("hssk_loop_inner: ");
sl@0
   971
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
   972
	asm("mla r1, r8, r10, r5 ");		// r1=&entry[r8]
sl@0
   973
	asm("mov r2, r10 ");
sl@0
   974
	asm("bl wordmove ");				// entry[r9]=entry[r8]
sl@0
   975
	asm("mov r9, r8 ");
sl@0
   976
	asm("hssk_loop_start2: ");
sl@0
   977
	asm("add r8, r8, #1 ");
sl@0
   978
	asm("add r8, r8, r8 ");
sl@0
   979
	asm("cmp r8, r4 ");
sl@0
   980
	asm("bgt hssk_loop_inner_end ");
sl@0
   981
	asm("mla r0, r8, r10, r5 ");
sl@0
   982
	asm("ldrne r2, [r0, r11]! ");		// r2=key[r8]
sl@0
   983
	asm("addeq r0, r0, r11 ");
sl@0
   984
	asm("ldr r1, [r0, -r10] ");			// r1=key[r8-1]
sl@0
   985
	asm("subeq r8, r8, #1 ");
sl@0
   986
	asm("beq hssk_loop_inner2 ");
sl@0
   987
	asm("cmp r1, r2 ");
sl@0
   988
	asm("subgt r8, r8, #1 ");
sl@0
   989
	asm("movle r1, r2 ");
sl@0
   990
	asm("hssk_loop_inner2: ");
sl@0
   991
	asm("ldr r2, [sp, r11] ");			// r2=key[sp]
sl@0
   992
	asm("cmp r1, r2 ");
sl@0
   993
	asm("bgt hssk_loop_inner ");
sl@0
   994
	asm("hssk_loop_inner_end: ");
sl@0
   995
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
   996
	asm("mov r1, sp ");
sl@0
   997
	asm("mov r2, r10 ");
sl@0
   998
	asm("bl wordmove ");				// entry[r9]=[sp]
sl@0
   999
	asm("cmp r6, #0 ");
sl@0
  1000
	asm("bne hssk_loop_start1 ");
sl@0
  1001
	asm("sub r4, r4, #1 ");
sl@0
  1002
	asm("mla r1, r4, r10, r5 ");		// r1=&entry[r4]
sl@0
  1003
	asm("mov r0, sp ");
sl@0
  1004
	asm("mov r2, r10 ");
sl@0
  1005
	asm("bl wordmove ");				// [sp]=entry[r4]
sl@0
  1006
	asm("mla r0, r4, r10, r5 ");		// r0=&entry[r4]
sl@0
  1007
	asm("mov r1, r5 ");					// r1=&entry[0]
sl@0
  1008
	asm("mov r2, r10 ");
sl@0
  1009
	asm("bl wordmove ");				// entry[0]=entry[r4]
sl@0
  1010
	asm("cmp r4, #1 ");
sl@0
  1011
	asm("mov r8, r6 ");
sl@0
  1012
	asm("mov r9, r6 ");
sl@0
  1013
	asm("bgt hssk_loop_start2 ");
sl@0
  1014
	asm("mov r0, r5 ");					// r0=&entry[0]
sl@0
  1015
	asm("mov r1, sp ");
sl@0
  1016
	asm("mov r2, r10 ");
sl@0
  1017
	asm("bl wordmove ");				// entry[0]=[sp]
sl@0
  1018
	asm("add sp, sp, r10 ");			// free temporary stack space
sl@0
  1019
	__JUMP(,r7);
sl@0
  1020
	}
sl@0
  1021
sl@0
  1022
EXPORT_C __NAKED__ void RArrayBase::HeapSortUnsigned()
sl@0
  1023
	{
sl@0
  1024
#ifdef __EABI__
sl@0
  1025
	// need sp aligned correctly
sl@0
  1026
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
  1027
	__EH_FRAME_ADDRESS(sp,4)
sl@0
  1028
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
  1029
#else
sl@0
  1030
	asm("stmfd sp!, {r4-r11,lr} ");
sl@0
  1031
#endif
sl@0
  1032
	asm("ldmia r0, {r4,r5,r10,r11} ");	// r4=iCount, r5=iEntries, r10=entry size, r11=key offset
sl@0
  1033
	asm("cmp r10, #4 ");
sl@0
  1034
	asm("bleq HeapSortUnsigned ");
sl@0
  1035
	asm("cmp r10, #4 ");
sl@0
  1036
	asm("blne HeapSortUnsignedKey ");
sl@0
  1037
#ifdef __EABI__
sl@0
  1038
	__POPRET("r3-r11,");
sl@0
  1039
#else
sl@0
  1040
	__POPRET("r4-r11,");
sl@0
  1041
#endif
sl@0
  1042
sl@0
  1043
	// Heap sort list of entries by unsigned key
sl@0
  1044
	// List address in r5, count in r4, entry size in r10, key offset in r11
sl@0
  1045
	// r4=ss, r6=sh
sl@0
  1046
	// r8,r9 modified
sl@0
  1047
	asm("HeapSortUnsignedKey: ");
sl@0
  1048
	asm("cmp r4, #1 ");
sl@0
  1049
	__JUMP(le,lr);
sl@0
  1050
	asm("mov r7, lr ");					// save lr in r7
sl@0
  1051
	asm("sub sp, sp, r10 ");			// get some temporary space on the stack
sl@0
  1052
	asm("mov r6, r4, lsr #1 ");
sl@0
  1053
	asm("hsuk_loop_start1: ");
sl@0
  1054
	asm("sub r6, r6, #1 ");
sl@0
  1055
	asm("mla r1, r6, r10, r5 ");		// [sp]=entry[r6]
sl@0
  1056
	asm("mov r0, sp ");
sl@0
  1057
	asm("mov r2, r10 ");
sl@0
  1058
	asm("bl wordmove ");
sl@0
  1059
	asm("mov r8, r6 ");
sl@0
  1060
	asm("mov r9, r6 ");
sl@0
  1061
	asm("b hsuk_loop_start2 ");
sl@0
  1062
	asm("hsuk_loop_inner: ");
sl@0
  1063
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
  1064
	asm("mla r1, r8, r10, r5 ");		// r1=&entry[r8]
sl@0
  1065
	asm("mov r2, r10 ");
sl@0
  1066
	asm("bl wordmove ");				// entry[r9]=entry[r8]
sl@0
  1067
	asm("mov r9, r8 ");
sl@0
  1068
	asm("hsuk_loop_start2: ");
sl@0
  1069
	asm("add r8, r8, #1 ");
sl@0
  1070
	asm("add r8, r8, r8 ");
sl@0
  1071
	asm("cmp r8, r4 ");
sl@0
  1072
	asm("bgt hsuk_loop_inner_end ");
sl@0
  1073
	asm("mla r0, r8, r10, r5 ");
sl@0
  1074
	asm("ldrne r2, [r0, r11]! ");		// r2=key[r8]
sl@0
  1075
	asm("addeq r0, r0, r11 ");
sl@0
  1076
	asm("ldr r1, [r0, -r10] ");			// r1=key[r8-1]
sl@0
  1077
	asm("subeq r8, r8, #1 ");
sl@0
  1078
	asm("beq hsuk_loop_inner2 ");
sl@0
  1079
	asm("cmp r1, r2 ");
sl@0
  1080
	asm("subhi r8, r8, #1 ");
sl@0
  1081
	asm("movls r1, r2 ");
sl@0
  1082
	asm("hsuk_loop_inner2: ");
sl@0
  1083
	asm("ldr r2, [sp, r11] ");			// r2=key[sp]
sl@0
  1084
	asm("cmp r1, r2 ");
sl@0
  1085
	asm("bhi hsuk_loop_inner ");
sl@0
  1086
	asm("hsuk_loop_inner_end: ");
sl@0
  1087
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
  1088
	asm("mov r1, sp ");
sl@0
  1089
	asm("mov r2, r10 ");
sl@0
  1090
	asm("bl wordmove ");				// entry[r9]=[sp]
sl@0
  1091
	asm("cmp r6, #0 ");
sl@0
  1092
	asm("bne hsuk_loop_start1 ");
sl@0
  1093
	asm("sub r4, r4, #1 ");
sl@0
  1094
	asm("mla r1, r4, r10, r5 ");		// r1=&entry[r4]
sl@0
  1095
	asm("mov r0, sp ");
sl@0
  1096
	asm("mov r2, r10 ");
sl@0
  1097
	asm("bl wordmove ");				// [sp]=entry[r4]
sl@0
  1098
	asm("mla r0, r4, r10, r5 ");		// r0=&entry[r4]
sl@0
  1099
	asm("mov r1, r5 ");					// r1=&entry[0]
sl@0
  1100
	asm("mov r2, r10 ");
sl@0
  1101
	asm("bl wordmove ");				// entry[0]=entry[r4]
sl@0
  1102
	asm("cmp r4, #1 ");
sl@0
  1103
	asm("mov r8, r6 ");
sl@0
  1104
	asm("mov r9, r6 ");
sl@0
  1105
	asm("bgt hsuk_loop_start2 ");
sl@0
  1106
	asm("mov r0, r5 ");					// r0=&entry[0]
sl@0
  1107
	asm("mov r1, sp ");
sl@0
  1108
	asm("mov r2, r10 ");
sl@0
  1109
	asm("bl wordmove ");				// entry[0]=[sp]
sl@0
  1110
	asm("add sp, sp, r10 ");			// free temporary stack space
sl@0
  1111
	__JUMP(,r7);
sl@0
  1112
	}
sl@0
  1113
sl@0
  1114
EXPORT_C __NAKED__ void RArrayBase::HeapSort(TGeneralLinearOrder anOrder)
sl@0
  1115
	{
sl@0
  1116
#ifdef __EABI__
sl@0
  1117
	// need sp aligned correctly
sl@0
  1118
	asm("stmfd sp!, {r3-r11,lr} ");
sl@0
  1119
	__EH_FRAME_ADDRESS(sp,4)
sl@0
  1120
	__EH_FRAME_PUSH2(r4-r11,lr)
sl@0
  1121
#else
sl@0
  1122
	asm("stmfd sp!, {r4-r11,lr} ");
sl@0
  1123
#endif
sl@0
  1124
	asm("ldmia r0, {r4,r5,r10,r11} ");
sl@0
  1125
	asm("mov r7, r1 ");
sl@0
  1126
	asm("bl HeapSortEntries ");
sl@0
  1127
#ifdef __EABI__
sl@0
  1128
	__POPRET("r3-r11,");
sl@0
  1129
#else
sl@0
  1130
	__POPRET("r4-r11,");
sl@0
  1131
#endif
sl@0
  1132
sl@0
  1133
	// Heap sort list of entries
sl@0
  1134
	// List address in r5, count in r4, entry size in r10, key offset in r11
sl@0
  1135
	// Address of ordering function in r7
sl@0
  1136
	// r4=ss, r6=sh
sl@0
  1137
	// r8,r9 modified
sl@0
  1138
	asm("HeapSortEntries: ");
sl@0
  1139
	asm("cmp r4, #1 ");
sl@0
  1140
	__JUMP(le,lr);
sl@0
  1141
	asm("str lr, [sp, #-4]! ");
sl@0
  1142
	asm("mov r8, sp ");					// original SP
sl@0
  1143
	asm("sub sp, sp, r10 ");			// get some temporary space on the stack
sl@0
  1144
	asm("sub sp, sp, #4 ");				// make room for original SP
sl@0
  1145
	asm("bic sp, sp, #4 ");				// align stack to 8 byte boundary
sl@0
  1146
	asm("str r8, [sp, r10] ");			// save original SP
sl@0
  1147
	asm("mov r6, r4, lsr #1 ");
sl@0
  1148
	asm("hse_loop_start1: ");
sl@0
  1149
	asm("sub r6, r6, #1 ");
sl@0
  1150
	asm("mla r1, r6, r10, r5 ");		// [sp]=entry[r6]
sl@0
  1151
	asm("mov r0, sp ");
sl@0
  1152
	asm("mov r2, r10 ");
sl@0
  1153
	asm("bl wordmove ");
sl@0
  1154
	asm("mov r8, r6 ");
sl@0
  1155
	asm("mov r9, r6 ");
sl@0
  1156
	asm("b hse_loop_start2 ");
sl@0
  1157
	asm("hse_loop_inner: ");
sl@0
  1158
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
  1159
	asm("mla r1, r8, r10, r5 ");		// r1=&entry[r8]
sl@0
  1160
	asm("mov r2, r10 ");
sl@0
  1161
	asm("bl wordmove ");				// entry[r9]=entry[r8]
sl@0
  1162
	asm("mov r9, r8 ");
sl@0
  1163
	asm("hse_loop_start2: ");
sl@0
  1164
	asm("add r8, r8, #1 ");
sl@0
  1165
	asm("add r8, r8, r8 ");
sl@0
  1166
	asm("cmp r8, r4 ");
sl@0
  1167
	asm("bgt hse_loop_inner_end ");
sl@0
  1168
	asm("subeq r8, r8, #1 ");
sl@0
  1169
	asm("beq hse_loop_inner2 ");
sl@0
  1170
	asm("mla r1, r8, r10, r5 ");		// r1=&entry[r8]
sl@0
  1171
	asm("sub r0, r1, r10 ");			// r0=&entry[r8-1]
sl@0
  1172
	__JUMPL(7);
sl@0
  1173
	asm("cmp r0, #0 ");					// compare entry[r8-1] with entry[r8]
sl@0
  1174
	asm("subgt r8, r8, #1 ");
sl@0
  1175
	asm("hse_loop_inner2: ");
sl@0
  1176
	asm("mla r0, r8, r10, r5 ");		// r0=&entry[r8]
sl@0
  1177
	asm("mov r1, sp ");
sl@0
  1178
	__JUMPL(7);
sl@0
  1179
	asm("cmp r0, #0 ");					// compare entry[r8] with [sp]
sl@0
  1180
	asm("bgt hse_loop_inner ");
sl@0
  1181
	asm("hse_loop_inner_end: ");
sl@0
  1182
	asm("mla r0, r9, r10, r5 ");		// r0=&entry[r9]
sl@0
  1183
	asm("mov r1, sp ");
sl@0
  1184
	asm("mov r2, r10 ");
sl@0
  1185
	asm("bl wordmove ");				// entry[r9]=[sp]
sl@0
  1186
	asm("cmp r6, #0 ");
sl@0
  1187
	asm("bne hse_loop_start1 ");
sl@0
  1188
	asm("sub r4, r4, #1 ");
sl@0
  1189
	asm("mla r1, r4, r10, r5 ");		// r1=&entry[r4]
sl@0
  1190
	asm("mov r0, sp ");
sl@0
  1191
	asm("mov r2, r10 ");
sl@0
  1192
	asm("bl wordmove ");				// [sp]=entry[r4]
sl@0
  1193
	asm("mla r0, r4, r10, r5 ");		// r0=&entry[r4]
sl@0
  1194
	asm("mov r1, r5 ");					// r1=&entry[0]
sl@0
  1195
	asm("mov r2, r10 ");
sl@0
  1196
	asm("bl wordmove ");				// entry[0]=entry[r4]
sl@0
  1197
	asm("cmp r4, #1 ");
sl@0
  1198
	asm("mov r8, r6 ");
sl@0
  1199
	asm("mov r9, r6 ");
sl@0
  1200
	asm("bgt hse_loop_start2 ");
sl@0
  1201
	asm("mov r0, r5 ");					// r0=&entry[0]
sl@0
  1202
	asm("mov r1, sp ");
sl@0
  1203
	asm("mov r2, r10 ");
sl@0
  1204
	asm("bl wordmove ");				// entry[0]=[sp]
sl@0
  1205
	asm("ldr sp, [sp, r10] ");			// restore stack pointer, freeing temporary stack space
sl@0
  1206
	__POPRET("");
sl@0
  1207
	}
sl@0
  1208
#endif	// __KERNEL_MODE__
sl@0
  1209
#endif	// __ARRAY_MACHINE_CODED__