sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\arm\carray.cia sl@0: // Machine coded arrays for ARM sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include "../common.h" sl@0: sl@0: #ifdef __ARRAY_MACHINE_CODED__ sl@0: extern "C" void PanicBadArrayFindMode(); sl@0: sl@0: EXPORT_C __NAKED__ TAny*& RPointerArrayBase::At(TInt /*anIndex*/) const sl@0: { sl@0: asm("ldmia r0, {r2,r3} "); // r2=iCount, r3=iEntries sl@0: asm("cmp r1, #0 "); // check anIndex>=0 sl@0: asm("cmpge r2, r1 "); // if so, check iCount>anIndex sl@0: asm("addgt r0, r3, r1, lsl #2 "); // address of entry = iEntries+4*anIndex sl@0: #ifdef __CPU_ARMV6 sl@0: asm("ble 1f "); // avoid conditional return on ARMv6 sl@0: __JUMP(,lr); sl@0: #else sl@0: __JUMP(gt,lr); sl@0: #endif sl@0: asm("1: "); sl@0: asm("b " CSM_Z18PanicBadArrayIndexv); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::Append(const TAny* /*anEntry*/) sl@0: { sl@0: asm("ldmia r0, {r2,r3,r12} "); // r2=iCount, r3=iEntries, r12=iAllocated sl@0: asm("cmp r2, r12 "); sl@0: asm("beq ptr_append_1 "); sl@0: asm("ptr_append_0: "); sl@0: asm("str r1, [r3, r2, lsl #2] "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("str r2, [r0] "); sl@0: asm("mov r0, #0 "); sl@0: __JUMP(,lr); sl@0: asm("ptr_append_1: "); sl@0: asm("stmfd sp!, {r0,r1,r2,lr} "); sl@0: asm("bl " CSM_ZN17RPointerArrayBase4GrowEv); sl@0: asm("cmp r0, #0 "); sl@0: asm("bne ptr_append_2 "); sl@0: asm("ldmfd sp!, {r0,r1,r2,lr} "); sl@0: asm("ldmia r0, {r2, r3} "); sl@0: asm("b ptr_append_0 "); sl@0: asm("ptr_append_2: "); // error enlarging array sl@0: asm("add sp, sp, #12 "); sl@0: __POPRET(""); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/) const sl@0: { sl@0: asm("ldmia r0, {r2,r3} "); // r2=iCount, r3=iEntries sl@0: asm("mov r0, #0 "); // r0=0 (will be index+1) sl@0: asm("subs r2, r2, #1 "); // r2=iCount-1 sl@0: asm("blt 0f "); sl@0: asm("1: "); sl@0: asm("ldr r12, [r3], #4 "); // r12=iEntries[r0] sl@0: asm("add r0, r0, #1 "); // r0 = index+1 sl@0: asm("cmp r2, r0 "); // C=1 iff iCount-1>=index+1 iff index<=iCount-2 iff this isn't last entry sl@0: asm("teq r12, r1 "); // check for equality, doesn't affect C sl@0: asm("bhi 1b "); // loop if C=1 & Z=0, i.e. if no match and this isn't last entry sl@0: asm("0: "); sl@0: asm("movne r0, #0 "); // if no match set r0=0 sl@0: asm("sub r0, r0, #1 "); // r0 was index+1, return index sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r8,lr) sl@0: asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries sl@0: asm("mvn r6, #0 "); sl@0: asm("mov r7, r1 "); sl@0: asm("mov r8, r2 "); sl@0: asm("subs r4, r4, #1 "); // r4=iCount-1 sl@0: asm("bmi ptr_find2_return "); // if count=0, return -1 sl@0: asm("ptr_find2_loop: "); sl@0: asm("ldr r1, [r5], #4 "); // r1=pointer to entry r6 sl@0: asm("add r6, r6, #1 "); sl@0: asm("mov r0, r7 "); // r0=anEntry sl@0: __JUMPL(8); sl@0: asm("cmp r0, #0 "); sl@0: asm("bne ptr_find2_return "); // if equal, return r6 sl@0: asm("cmp r6, r4 "); sl@0: asm("blt ptr_find2_loop "); sl@0: asm("mvn r6, #0 "); sl@0: asm("ptr_find2_return: "); // return r6 sl@0: asm("mov r0, r6 "); sl@0: __POPRET("r4-r8,"); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchSigned(TInt /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r6,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r6,lr) sl@0: asm("mov r6, r2 "); // r6=&anIndex sl@0: asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries sl@0: asm("bl BinarySearchSigned "); sl@0: asm("str r2, [r6] "); // store index sl@0: __POPRET("r4-r6,"); sl@0: sl@0: // Binary search list of signed integers sl@0: // Match value in r1 sl@0: // List address in r4 sl@0: // Count in r2 sl@0: // Match mode in r3 sl@0: // Return with: r0=0 if match found, r0=-1 otherwise sl@0: // Z flag set if match found, clear if not sl@0: // r2=index of match or next higher sl@0: // r5 modified sl@0: asm("BinarySearchSigned: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("orr r3, r3, #1 "); // set NOT FOUND flag sl@0: asm("cmp r2, #0 "); // r2 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r5, #0 "); // r5 = left index sl@0: asm("1: "); sl@0: asm("add r12, r2, r5 "); sl@0: asm("mov r12, r12, lsr #1 "); // r12 = mid index sl@0: asm("ldr r0, [r4, r12, lsl #2] "); // r0 = entry[mid] sl@0: asm("subs r0, r0, r1 "); // r0 = entry[mid] - match sl@0: asm("beq 2f "); // if match branch out sl@0: asm("3: "); sl@0: asm("addlt r5, r12, #1 "); // else if entrymatch right=mid sl@0: asm("subs r0, r2, r5 "); // right > left ? sl@0: asm("bgt 1b "); // r0 = 0 when loop terminates sl@0: asm("0: "); sl@0: asm("tst r3, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __JUMP(,lr); sl@0: asm("2: "); sl@0: asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) sl@0: asm("bne 3b "); // if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT sl@0: asm("mov r2, r12 "); // if so, r2 = mid sl@0: __JUMP(,lr); // and return with r0 = 0 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearchUnsigned(TUint /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r6,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r6,lr) sl@0: asm("mov r6, r2 "); // r6=&anIndex sl@0: asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries sl@0: asm("bl BinarySearchUnsigned "); sl@0: asm("str r2, [r6] "); // store index sl@0: __POPRET("r4-r6,"); sl@0: sl@0: // Binary search list of unsigned integers sl@0: // Match value in r1 sl@0: // List address in r4 sl@0: // Count in r2 sl@0: // Match mode in r3 sl@0: // Return with: r0=0 if match found, r0=-1 otherwise sl@0: // Z flag set if match found, clear if not sl@0: // r2=index of match or next higher sl@0: // r5 modified sl@0: asm("BinarySearchUnsigned: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("orr r3, r3, #1 "); // set NOT FOUND flag sl@0: asm("cmp r2, #0 "); // r2 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r5, #0 "); // r5 = left index sl@0: asm("1: "); sl@0: asm("add r12, r2, r5 "); sl@0: asm("mov r12, r12, lsr #1 "); // r12 = mid index sl@0: asm("ldr r0, [r4, r12, lsl #2] "); // r0 = entry[mid] sl@0: asm("subs r0, r1, r0 "); // r0 = match - entry[mid] sl@0: asm("beq 2f "); // if match branch out sl@0: asm("3: "); sl@0: asm("addhi r5, r12, #1 "); // else if entrymatch right=mid LO = ~C sl@0: asm("subs r0, r2, r5 "); // right > left ? sl@0: asm("bgt 1b "); // r0 = 0 when loop terminates sl@0: asm("0: "); sl@0: asm("tst r3, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __JUMP(,lr); sl@0: asm("2: "); // N=0 Z=1 C=1 V=0 r0=0 here sl@0: asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) sl@0: asm("cmpne r3, #0x60000000 "); // HI if LAST, LO if FIRST sl@0: asm("bne 3b "); // if not ANY, branch back sl@0: asm("mov r2, r12 "); // if ANY, r2 = mid sl@0: __JUMP(,lr); // and return with r0 = 0 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r2,r4-r11,lr} "); // store &anIndex, r4-r11, lr sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: asm("ldmia r0, {r5,r6} "); // r5=count, r6=iEntries sl@0: asm("ldr r11, [sp, #40] "); // r11 = aMode sl@0: asm("mov r7, r3 "); // r7=anOrder sl@0: asm("mov r4, r1 "); // r1=anEntry sl@0: asm("bl BinarySearchPointers "); // r0=KErrNone if match, KErrNotFound if not sl@0: asm("ldmfd sp!, {r2,r4} "); // r2=&anIndex, restore r4 sl@0: // Dont need to FRAME RESTORE here since there's no barrier here sl@0: asm("str r5, [r2] "); // store index sl@0: __POPRET("r5-r11,"); // restore r5-r11 and return sl@0: sl@0: // Binary search list of pointers sl@0: // Pointer to match value in r4 sl@0: // List address in r6 sl@0: // Count in r5 sl@0: // Pointer to ordering function in r7 sl@0: // r11 = find mode sl@0: // Return with: r0=0 if match found, r0=-1 otherwise sl@0: // Z flag set if match found, clear otherwise sl@0: // r5=index of match or next higher sl@0: // r9,r10,r11 modified sl@0: asm("BinarySearchPointers: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r11, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("movs r11, r11, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("eorne r11, r11, #0xC0000000 "); // match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last) sl@0: asm("orr r11, r11, #1 "); // set NOT FOUND flag sl@0: asm("mov r9, lr "); sl@0: asm("cmp r5, #0 "); // r5 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r10, #0 "); // r10 = left index sl@0: asm("1: "); sl@0: asm("add r8, r5, r10 "); sl@0: asm("mov r8, r8, lsr #1 "); // r8 = mid index sl@0: sl@0: /** the latency of the indirect call should mask the latency of the ldr sl@0: arm1136 requires base register to be valid one cycle early sl@0: */ sl@0: asm("mov r0, r4 "); // r0 points to match value sl@0: asm("ldr r1, [r6, r8, lsl #2] "); // r1 points to entry[mid] sl@0: __JUMPL(7); // call ordering function (match, entry) sl@0: asm("cmp r0, #0 "); sl@0: asm("biceq r11, r11, #1 "); // if match clear NOT FOUND flag sl@0: asm("addeqs r0, r0, r11 "); // and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY) sl@0: asm("beq 2f "); // branch out if match and ANY sl@0: asm("addgt r10, r8, #1 "); // else if match > entry, left = mid + 1 sl@0: asm("movlt r5, r8 "); // else if match < entry, right = mid sl@0: asm("subs r0, r5, r10 "); // loop if right > left sl@0: asm("bgt 1b "); // finish loop with r0 = 0 sl@0: asm("0: "); sl@0: asm("tst r11, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __JUMP(,r9); sl@0: asm("2: "); sl@0: asm("mov r5, r8 "); // if ANY, r8 = mid sl@0: __JUMP(,r9); // and return with r0 = 0, Z=1 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/) const sl@0: { sl@0: asm("mov r2, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqSigned(TInt /*anEntry*/, TInt /*aMode*/) const sl@0: { sl@0: #ifdef __EABI__ sl@0: // sp needs correct alignment sl@0: asm("stmfd sp!, {r4-r6,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r6,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4,r5,lr} "); sl@0: #endif sl@0: asm("mov r3, r2 "); // r3 = match mode sl@0: asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries sl@0: asm("bl BinarySearchSigned "); sl@0: asm("moveq r0, r2 "); // if match r0=match index; if no match, r0=KErrNotFound sl@0: #ifdef __EABI__ sl@0: __POPRET("r4-r6,"); sl@0: #else sl@0: __POPRET("r4,r5,"); sl@0: #endif sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/) const sl@0: { sl@0: asm("mov r2, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsqUnsigned(TUint /*anEntry*/, TInt /*aMode*/) const sl@0: { sl@0: #ifdef __EABI__ sl@0: // sp needs correct alignment sl@0: asm("stmfd sp!, {r4-r6,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r6,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4,r5,lr} "); sl@0: #endif sl@0: asm("mov r3, r2 "); // r3 = match mode sl@0: asm("ldmia r0, {r2,r4} "); // r2=count, r4=iEntries sl@0: asm("bl BinarySearchUnsigned "); sl@0: asm("moveq r0, r2 "); // if match r0=match index; if no match, r0=KErrNotFound sl@0: #ifdef __EABI__ sl@0: __POPRET("r4-r6,"); sl@0: #else sl@0: __POPRET("r4,r5,"); sl@0: #endif sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RPointerArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const sl@0: { sl@0: sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r6,lr) sl@0: asm("ldmia r0, {r5,r6} "); // r5=count, r6=iEntries sl@0: asm("mov r11, r3 "); // r11 = aMode sl@0: asm("mov r7, r2 "); // r7=anOrder sl@0: asm("mov r4, r1 "); // r1=anEntry sl@0: asm("bl BinarySearchPointers "); sl@0: asm("moveq r0, r5 "); // if match, r0=match index sl@0: __POPRET("r3-r11,"); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortSigned() sl@0: { sl@0: #ifdef __EABI__ sl@0: asm("stmfd sp!, {r4-r10,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r10,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r9,lr} "); sl@0: #endif sl@0: asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries sl@0: asm("bl HeapSortSigned "); sl@0: #ifdef __EABI__ sl@0: __POPRET("r4-r10,"); sl@0: #else sl@0: __POPRET("r4-r9,"); sl@0: #endif sl@0: // Heap sort list of signed integers sl@0: // List address in r5, count in r4 sl@0: // r4=ss, r6=sh, r7=si sl@0: // r8,r9 modified sl@0: asm("HeapSortSigned: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hss_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("ldr r7, [r5, r6, lsl #2] "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hss_loop_start2 "); sl@0: asm("hss_loop_inner: "); sl@0: asm("ldr r0, [r5, r8, lsl #2] "); sl@0: asm("str r0, [r5, r9, lsl #2] "); sl@0: asm("mov r9, r8 "); sl@0: asm("hss_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hss_loop_inner_end "); sl@0: asm("add r0, r5, r8, lsl #2 "); sl@0: asm("ldmneda r0, {r1,r2} "); sl@0: asm("ldreq r1, [r0, #-4] "); sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hss_loop_inner2 "); sl@0: asm("cmp r1, r2 "); sl@0: asm("subgt r8, r8, #1 "); sl@0: asm("movle r1, r2 "); sl@0: asm("hss_loop_inner2: "); sl@0: asm("cmp r1, r7 "); sl@0: asm("bgt hss_loop_inner "); sl@0: asm("hss_loop_inner_end: "); sl@0: asm("str r7, [r5, r9, lsl #2] "); sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hss_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("ldr r7, [r5, r4, lsl #2] "); sl@0: asm("ldr r0, [r5, #0] "); sl@0: asm("str r0, [r5, r4, lsl #2] "); sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hss_loop_start2 "); sl@0: asm("str r7, [r5, #0] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ void RPointerArrayBase::HeapSortUnsigned() sl@0: { sl@0: asm("stmfd sp!, {r4-r9,lr} "); sl@0: asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries sl@0: asm("bl HeapSortUnsigned "); sl@0: __POPRET("r4-r9,"); sl@0: } sl@0: #endif // !__KERNEL_MODE__ sl@0: sl@0: __NAKED__ void HeapSortUnsigned(TUint* aEntries,TInt aCount) sl@0: { sl@0: asm("stmfd sp!, {r4-r9,lr} "); sl@0: asm("mov r4,r1"); // r4=iCount sl@0: asm("mov r5,r0"); // r5=iEntries sl@0: asm("bl HeapSortUnsigned "); sl@0: __POPRET("r4-r9,"); sl@0: sl@0: // Heap sort list of unsigned integers sl@0: // List address in r5, count in r4 sl@0: // r4=ss, r6=sh, r7=si sl@0: // r8,r9 modified sl@0: asm("HeapSortUnsigned: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hsu_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("ldr r7, [r5, r6, lsl #2] "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hsu_loop_start2 "); sl@0: asm("hsu_loop_inner: "); sl@0: asm("ldr r0, [r5, r8, lsl #2] "); sl@0: asm("str r0, [r5, r9, lsl #2] "); sl@0: asm("mov r9, r8 "); sl@0: asm("hsu_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hsu_loop_inner_end "); sl@0: asm("add r0, r5, r8, lsl #2 "); sl@0: asm("ldmneda r0, {r1,r2} "); sl@0: asm("ldreq r1, [r0, #-4] "); sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hsu_loop_inner2 "); sl@0: asm("cmp r1, r2 "); sl@0: asm("subhi r8, r8, #1 "); sl@0: asm("movls r1, r2 "); sl@0: asm("hsu_loop_inner2: "); sl@0: asm("cmp r1, r7 "); sl@0: asm("bhi hsu_loop_inner "); sl@0: asm("hsu_loop_inner_end: "); sl@0: asm("str r7, [r5, r9, lsl #2] "); sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hsu_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("ldr r7, [r5, r4, lsl #2] "); sl@0: asm("ldr r0, [r5, #0] "); sl@0: asm("str r0, [r5, r4, lsl #2] "); sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hsu_loop_start2 "); sl@0: asm("str r7, [r5, #0] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: EXPORT_C __NAKED__ void RPointerArrayBase::HeapSort(TGeneralLinearOrder /*anOrder*/) sl@0: { sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: // r3 is caller save sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: // we can push the callee save regs sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: asm("ldmia r0, {r4,r5} "); // r4=iCount, r5=iEntries sl@0: asm("mov r10, r1 "); // r10=anOrder sl@0: asm("bl HeapSortPointers "); sl@0: __POPRET("r3-r11,"); sl@0: sl@0: // Heap sort list of pointers sl@0: // List address in r5, count in r4, r10 points to ordering function sl@0: // r4=ss, r6=sh, r7=si sl@0: // r8,r9,r11 modified sl@0: asm("HeapSortPointers: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("mov r11, lr "); sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hsp_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("ldr r7, [r5, r6, lsl #2] "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hsp_loop_start2 "); sl@0: asm("hsp_loop_inner: "); sl@0: asm("ldr r0, [r5, r8, lsl #2] "); sl@0: asm("str r0, [r5, r9, lsl #2] "); sl@0: asm("mov r9, r8 "); sl@0: asm("hsp_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hsp_loop_inner_end "); sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hsp_loop_inner2 "); sl@0: asm("add r0, r5, r8, lsl #2 "); sl@0: asm("ldmda r0, {r0,r1} "); sl@0: __JUMPL(10); sl@0: asm("cmp r0, #0 "); sl@0: asm("subgt r8, r8, #1 "); sl@0: asm("hsp_loop_inner2: "); sl@0: asm("ldr r0, [r5, r8, lsl #2] "); sl@0: asm("mov r1, r7 "); sl@0: __JUMPL(10); sl@0: asm("cmp r0, #0 "); sl@0: asm("bgt hsp_loop_inner "); sl@0: asm("hsp_loop_inner_end: "); sl@0: asm("str r7, [r5, r9, lsl #2] "); sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hsp_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("ldr r7, [r5, r4, lsl #2] "); sl@0: asm("ldr r0, [r5, #0] "); sl@0: asm("str r0, [r5, r4, lsl #2] "); sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hsp_loop_start2 "); sl@0: asm("str r7, [r5, #0] "); sl@0: __JUMP(,r11); sl@0: } sl@0: #endif // __KERNEL_MODE__ sl@0: sl@0: EXPORT_C __NAKED__ TAny* RArrayBase::At(TInt /*anIndex*/) const sl@0: { sl@0: asm("ldmia r0, {r2,r3,r12} "); // r2=iCount, r3=iEntries, r12=iEntrySize sl@0: asm("cmp r1, #0 "); // check anIndex>=0 sl@0: asm("cmpge r2, r1 "); // if so, check iCount>anIndex sl@0: asm("mlagt r0, r1, r12, r3 "); // if ok, r0=anIndex*iEntrySize+iEntries sl@0: #ifdef __CPU_ARMV6 sl@0: asm("ble 1f "); sl@0: __JUMP(,lr); sl@0: #else sl@0: __JUMP(gt,lr); sl@0: #endif sl@0: asm("1: "); sl@0: asm("b " CSM_Z18PanicBadArrayIndexv); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::Append(const TAny* /*anEntry*/) sl@0: { sl@0: asm("stmfd sp!, {lr} "); sl@0: asm("ldmia r0, {r3,r12} "); // r3=iCount, r12=iEntries sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iAllocated)); sl@0: asm("cmp r3, r2 "); sl@0: asm("beq simple_append_1 "); sl@0: asm("simple_append_0: "); sl@0: asm("add r2, r3, #1 "); sl@0: asm("str r2, [r0] "); // iCount++ sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(RArrayBase,iEntrySize)); sl@0: asm("mla r0, r2, r3, r12 "); // r0=iEntries+iEntrySize*iCount sl@0: asm("bl wordmove "); // r1=anEntry, r2=iEntrySize, do copy sl@0: asm("mov r0, #0 "); // return KErrNone; sl@0: __POPRET(""); sl@0: sl@0: asm("simple_append_1: "); sl@0: asm("stmfd sp!, {r0,r1,r2} "); sl@0: asm("bl " CSM_ZN10RArrayBase4GrowEv); sl@0: asm("cmp r0, #0 "); sl@0: asm("bne simple_append_2 "); sl@0: asm("ldmfd sp!, {r0,r1,r2} "); sl@0: asm("ldmia r0, {r3, r12} "); sl@0: asm("b simple_append_0 "); sl@0: asm("simple_append_2: "); // error enlarging array sl@0: asm("add sp, sp, #12 "); sl@0: __POPRET(""); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/) const sl@0: { sl@0: asm("ldmia r0, {r0,r2,r3,r12} "); // r0=count, r2=iEntries, r3=iEntrySize, r12=iKeyOffset sl@0: asm("stmfd sp!, {r4,lr} "); // save r4,lr sl@0: asm("subs r0, r0, #1 "); // r0 = count-1 sl@0: asm("blt 0f "); // skip if count was zero sl@0: asm("ldr r1, [r1, r12] "); // r1=key of match entry sl@0: asm("sub r4, r0, #1 "); // r4 = iCount-2 sl@0: asm("1: "); sl@0: asm("ldr lr, [r2, r12] "); // lr=key of current entry sl@0: asm("add r2, r2, r3 "); // step r2 to next entry sl@0: asm("subs r0, r0, #1 "); // C=1 iff this isn't last entry sl@0: asm("teq lr, r1 "); // check for match - C unaffected sl@0: asm("bhi 1b "); // loop if C=1 & Z=0, i.e. if no match and this isn't last entry sl@0: asm("0: "); sl@0: asm("mvnne r0, #0 "); // if no match, return -1 sl@0: asm("subeq r0, r4, r0 "); // if match, index = (iCount-2)-r0 sl@0: __POPRET("r4,"); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::Find(const TAny* /*anEntry*/, TGeneralIdentityRelation /*anIdentity*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r10,lr} "); // save r4-r10,lr sl@0: __EH_FRAME_PUSH2(r4-r10,lr) sl@0: asm("ldmia r0, {r4,r5,r6} "); // r4=count, r5=iEntries, r6=iEntrySize sl@0: asm("mov r8, r1 "); // r8=anEntry sl@0: asm("mov r9, r2 "); // r9=anIdentity sl@0: asm("sub r7, r4, #1 "); // r7=iCount-1 sl@0: asm("b simple_find2_start "); sl@0: asm("simple_find2_loop: "); sl@0: asm("mov r1, r5 "); // r1->current entry sl@0: asm("mov r0, r8 "); // r0=anEntry sl@0: __JUMPL(9); sl@0: asm("cmp r0, #0 "); sl@0: asm("bne simple_find2_return "); sl@0: asm("add r5, r5, r6 "); // else step to next entry sl@0: asm("simple_find2_start: "); sl@0: asm("subs r4, r4, #1 "); sl@0: asm("bpl simple_find2_loop "); sl@0: asm("add r4, r7, #1 "); // no match, arrange to return -1 sl@0: asm("simple_find2_return: "); sl@0: asm("sub r0, r7, r4 "); // index=count-r4 sl@0: __POPRET("r4-r10,"); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchSigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r8,lr) sl@0: asm("mov r8, r2 "); // r8=&anIndex sl@0: asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r3=iEntries, r5=entry size, r6=key offset sl@0: asm("cmp r5, #4 "); // check for 4 byte entries sl@0: asm("ldr r1, [r1, r6] "); // r1=match key sl@0: asm("beq 1f "); // if 4 byte entries, call simpler routine sl@0: asm("bl BinarySearchSignedKey "); // if not, call general routine sl@0: asm("b 2f "); sl@0: asm("1: "); sl@0: asm("bl BinarySearchSigned "); // use simpler routine for 4 byte entries sl@0: asm("2: "); sl@0: asm("str r2, [r8] "); sl@0: __POPRET("r4-r8,"); sl@0: sl@0: // Binary search list of signed integers sl@0: // Match key in r1 sl@0: // List address in r4 sl@0: // Count in r2 sl@0: // Match mode in r3 sl@0: // EntrySize in r5, KeyOffset in r6 sl@0: // Return with: r0=0 if match found, r0 nonzero otherwise sl@0: // r2=index of match or next higher sl@0: // r7 modified sl@0: asm("BinarySearchSignedKey: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("orr r3, r3, #1 "); // set NOT FOUND flag sl@0: asm("cmp r2, #0 "); // r2 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r7, #0 "); // r7 will be left index sl@0: asm("1: "); sl@0: asm("add r12, r2, r7 "); sl@0: asm("mov r12, r12, lsr #1 "); // r12 = mid index sl@0: asm("mla r0, r12, r5, r6 "); // r0 = key offset + entry size * mid index sl@0: asm("ldr r0, [r4, r0] "); // r0 = key[mid] sl@0: asm("subs r0, r0, r1 "); // r0 = entry[mid] - match sl@0: asm("beq 2f "); // if match branch out sl@0: asm("3: "); sl@0: asm("addlt r7, r12, #1 "); // else if entrymatch right=mid sl@0: asm("subs r0, r2, r7 "); // right > left ? sl@0: asm("bgt 1b "); // r0 = 0 when loop terminates sl@0: asm("0: "); sl@0: asm("tst r3, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __JUMP(,lr); sl@0: asm("2: "); sl@0: asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) sl@0: asm("bne 3b "); // if not, V=0 (left from subs), N=1 for last, 0 for first, Z=0 => LAST->LT FIRST->GT sl@0: asm("mov r2, r12 "); // if so, r2 = mid sl@0: __JUMP(,lr); // and return with r0 = 0 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::BinarySearchUnsigned(const TAny* /*anEntry*/, TInt& /*anIndex*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r8,lr) sl@0: asm("mov r8, r2 "); // r8=&anIndex sl@0: asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset sl@0: asm("cmp r5, #4 "); // check for 4 byte entries sl@0: asm("ldr r1, [r1, r6] "); // r1=match key sl@0: asm("beq 1f "); // if 4 byte entries, call simpler routine sl@0: asm("bl BinarySearchUnsignedKey "); // if not, call general routine sl@0: asm("b 2f "); sl@0: asm("1: "); sl@0: asm("bl BinarySearchUnsigned "); // use simpler routine for 4 byte entries sl@0: asm("2: "); sl@0: asm("str r2, [r8] "); sl@0: __POPRET("r4-r8,"); sl@0: sl@0: // Binary search list of unsigned integers sl@0: // Match key in r1 sl@0: // List address in r4 sl@0: // Count in r2 sl@0: // Match mode in r3 sl@0: // EntrySize in r5, KeyOffset in r6 sl@0: // Return with: r0=0 if match found, r0 nonzero otherwise sl@0: // r2=index of match or next higher sl@0: // r7 modified sl@0: asm("BinarySearchUnsignedKey: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("mov r3, r3, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("orr r3, r3, #1 "); // set NOT FOUND flag sl@0: asm("cmp r2, #0 "); // r2 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r7, #0 "); // r7 will be left index sl@0: asm("1: "); sl@0: asm("add r12, r2, r7 "); sl@0: asm("mov r12, r12, lsr #1 "); // r12 = mid index sl@0: asm("mla r0, r12, r5, r6 "); // r0 = key offset + entry size * mid index sl@0: asm("ldr r0, [r4, r0] "); // r0 = key[mid] sl@0: asm("subs r0, r1, r0 "); // r0 = match - entry[mid] sl@0: asm("beq 2f "); // if match branch out sl@0: asm("3: "); sl@0: asm("addhi r7, r12, #1 "); // else if entrymatch right=mid LO = ~C sl@0: asm("subs r0, r2, r7 "); // right > left ? sl@0: asm("bgt 1b "); // r0 = 0 when loop terminates sl@0: asm("0: "); sl@0: asm("tst r3, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __JUMP(,lr); sl@0: asm("2: "); // N=0 Z=1 C=1 V=0 r0=0 here sl@0: asm("bics r3, r3, #1 "); // clear NOT FOUND flag, test for find mode ANY (Z set if so) sl@0: asm("cmpne r3, #0x60000000 "); // HI if LAST, LO if FIRST sl@0: asm("bne 3b "); // if not ANY, branch back sl@0: asm("mov r2, r12 "); // if ANY, r2 = mid sl@0: __JUMP(,lr); // and return with r0 = 0 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::BinarySearch(const TAny* /*anEntry*/, TInt& /*anIndex*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: // r3 is caller save sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: // we can push the callee save regs sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: asm("ldmia r0, {r5,r6,r11} "); // r5=count, r6=iEntries, r11=entry size sl@0: asm("ldr r9, [sp, #40] "); // r9 = aMode sl@0: asm("mov r4, r1 "); // r4=anEntry sl@0: asm("mov r7, r3 "); // r7=anOrder sl@0: asm("bl BinarySearchEntries "); sl@0: asm("str r5, [r2] "); // store index sl@0: __POPRET("r3-r11,"); sl@0: sl@0: // Binary search list of general entries sl@0: // Pointer to match value in r4 sl@0: // List address in r6 sl@0: // Count in r5 sl@0: // Match mode in r9 sl@0: // Pointer to ordering function in r7 sl@0: // Entry size in r11 sl@0: // Return with: r0=0 if match found, r0 nonzero otherwise sl@0: // r5=index of match or next higher sl@0: // r9,r10 modified sl@0: // r2 preserved sl@0: asm("BinarySearchEntries: "); sl@0: #ifdef _DEBUG sl@0: asm("cmp r9, #%a0" : : "i" ((TInt)EArrayFindMode_Limit) ); sl@0: asm("bhs PanicBadArrayFindMode "); sl@0: #endif sl@0: asm("stmfd sp!, {r2,lr} "); sl@0: asm("movs r9, r9, lsl #30 "); // match mode -> bits 30,31 (00000000=any, 40000000=first, 80000000=last) sl@0: asm("eorne r9, r9, #0xC0000000 "); // match mode -> bits 30,31 (00000000=any, 80000000=first, 40000000=last) sl@0: asm("orr r9, r9, #1 "); // set NOT FOUND flag sl@0: asm("cmp r5, #0 "); // r5 will be right index sl@0: asm("beq 0f "); sl@0: asm("mov r10, #0 "); // r10 will be left index sl@0: asm("1: "); sl@0: asm("add r8, r5, r10 "); sl@0: asm("mov r8, r8, lsr #1 "); // r8 = mid index sl@0: asm("mla r1, r8, r11, r6 "); // r1 = r8*entry size + list address = &entry[mid] sl@0: asm("mov r0, r4 "); // r0 points to match value sl@0: __JUMPL(7); // call ordering function (match, entry) sl@0: asm("cmp r0, #0 "); sl@0: asm("biceq r9, r9, #1 "); // if match clear NOT FOUND flag sl@0: asm("addeqs r0, r0, r9 "); // and add match mode to r0 (>0 if LAST, <0 if FIRST, 0 if ANY) sl@0: asm("beq 2f "); // branch out if match and ANY sl@0: asm("addgt r10, r8, #1 "); // else if match > entry, left = mid + 1 sl@0: asm("movlt r5, r8 "); // else if match < entry, right = mid sl@0: asm("subs r0, r5, r10 "); // loop if right > left sl@0: asm("bgt 1b "); // finish loop with r0 = 0 sl@0: asm("0: "); sl@0: asm("tst r9, #1 "); // test not found flag sl@0: asm("mvnnes r0, #0 "); // if set r0=-1 = KErrNotFound sl@0: __POPRET("r2,"); sl@0: asm("2: "); sl@0: asm("mov r5, r8 "); // if ANY, r8 = mid sl@0: __POPRET("r2,"); // and return with r0 = 0, Z=1 sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/) const sl@0: { sl@0: asm("mov r2, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsqSigned(const TAny* /*anEntry*/, TInt /*aMode*/) const sl@0: { sl@0: #ifdef __EABI__ sl@0: // sp needs to be aligned correctly sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r8,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r7,lr} "); sl@0: #endif sl@0: asm("mov r3, r2 "); // r3 = match mode sl@0: asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset sl@0: asm("cmp r5, #4 "); // check for 4 byte entries sl@0: asm("ldr r1, [r1, r6] "); // r1=match key sl@0: asm("beq 1f "); // use simpler routine for 4 byte entries sl@0: asm("bl BinarySearchSignedKey "); // else call general routine sl@0: asm("b 2f "); sl@0: asm("1: "); sl@0: asm("bl BinarySearchSigned "); sl@0: asm("2: "); sl@0: asm("moveq r0, r2 "); // if match r0=index else r0=KErrNotFound sl@0: #ifdef __EABI__ sl@0: __POPRET("r4-r8,"); sl@0: #else sl@0: __POPRET("r4-r7,"); sl@0: #endif sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/) const sl@0: { sl@0: asm("mov r2, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsqUnsigned(const TAny* /*anEntry*/, TInt /*aMode*/) const sl@0: { sl@0: #ifdef __EABI__ sl@0: // sp needs to be aligned correctly sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: __EH_FRAME_PUSH2(r4-r8,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r7,lr} "); sl@0: #endif sl@0: asm("mov r3, r2 "); // r3 = match mode sl@0: asm("ldmia r0, {r2,r4,r5,r6} "); // r2=count, r4=iEntries, r5=entry size, r6=key offset sl@0: asm("cmp r5, #4 "); // check for 4 byte entries sl@0: asm("ldr r1, [r1, r6] "); // r1=match key sl@0: asm("beq 1f "); // use simpler routine for 4 byte entries sl@0: asm("bl BinarySearchUnsignedKey "); // else call general routine sl@0: asm("b 2f "); sl@0: asm("1: "); sl@0: asm("bl BinarySearchUnsigned "); sl@0: asm("2: "); sl@0: asm("moveq r0, r2 "); // if match r0=index else r0=KErrNotFound sl@0: #ifdef __EABI__ sl@0: __POPRET("r4-r8,"); sl@0: #else sl@0: __POPRET("r4-r7,"); sl@0: #endif sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/) const sl@0: { sl@0: asm("mov r3, #0 "); sl@0: // fall through sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ TInt RArrayBase::FindIsq(const TAny* /*anEntry*/, TGeneralLinearOrder /*anOrder*/, TInt /*aMode*/) const sl@0: { sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: // r3 is caller save sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: // we can push the callee save regs sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: asm("ldmia r0, {r5,r6,r11} "); // r5=count, r6=iEntries, r11=entry size sl@0: asm("mov r4, r1 "); // r4=anEntry sl@0: asm("mov r7, r2 "); // r7=anOrder sl@0: asm("mov r9, r3 "); // r9 = aMode sl@0: asm("bl BinarySearchEntries "); sl@0: asm("moveq r0, r5 "); // if match r0=index sl@0: __POPRET("r3-r11,"); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: EXPORT_C __NAKED__ void RArrayBase::HeapSortSigned() sl@0: { sl@0: #ifdef __EABI__ sl@0: // need sp aligned correctly sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r11,lr} "); sl@0: #endif sl@0: asm("ldmia r0, {r4,r5,r10,r11} "); // r4=iCount, r5=iEntries, r10=entry size, r11=key offset sl@0: asm("cmp r10, #4 "); sl@0: asm("bleq HeapSortSigned "); sl@0: asm("cmp r10, #4 "); sl@0: asm("blne HeapSortSignedKey "); sl@0: #ifdef __EABI__ sl@0: __POPRET("r3-r11,"); sl@0: #else sl@0: __POPRET("r4-r11,"); sl@0: #endif sl@0: sl@0: // Heap sort list of entries by signed key sl@0: // List address in r5, count in r4, entry size in r10, key offset in r11 sl@0: // r4=ss, r6=sh sl@0: // r8,r9 modified sl@0: asm("HeapSortSignedKey: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("mov r7, lr "); // save lr in r7 sl@0: asm("sub sp, sp, r10 "); // get some temporary space on the stack sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hssk_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hssk_loop_start2 "); sl@0: asm("hssk_loop_inner: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=entry[r8] sl@0: asm("mov r9, r8 "); sl@0: asm("hssk_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hssk_loop_inner_end "); sl@0: asm("mla r0, r8, r10, r5 "); sl@0: asm("ldrne r2, [r0, r11]! "); // r2=key[r8] sl@0: asm("addeq r0, r0, r11 "); sl@0: asm("ldr r1, [r0, -r10] "); // r1=key[r8-1] sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hssk_loop_inner2 "); sl@0: asm("cmp r1, r2 "); sl@0: asm("subgt r8, r8, #1 "); sl@0: asm("movle r1, r2 "); sl@0: asm("hssk_loop_inner2: "); sl@0: asm("ldr r2, [sp, r11] "); // r2=key[sp] sl@0: asm("cmp r1, r2 "); sl@0: asm("bgt hssk_loop_inner "); sl@0: asm("hssk_loop_inner_end: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=[sp] sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hssk_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // [sp]=entry[r4] sl@0: asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] sl@0: asm("mov r1, r5 "); // r1=&entry[0] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=entry[r4] sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hssk_loop_start2 "); sl@0: asm("mov r0, r5 "); // r0=&entry[0] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=[sp] sl@0: asm("add sp, sp, r10 "); // free temporary stack space sl@0: __JUMP(,r7); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ void RArrayBase::HeapSortUnsigned() sl@0: { sl@0: #ifdef __EABI__ sl@0: // need sp aligned correctly sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r11,lr} "); sl@0: #endif sl@0: asm("ldmia r0, {r4,r5,r10,r11} "); // r4=iCount, r5=iEntries, r10=entry size, r11=key offset sl@0: asm("cmp r10, #4 "); sl@0: asm("bleq HeapSortUnsigned "); sl@0: asm("cmp r10, #4 "); sl@0: asm("blne HeapSortUnsignedKey "); sl@0: #ifdef __EABI__ sl@0: __POPRET("r3-r11,"); sl@0: #else sl@0: __POPRET("r4-r11,"); sl@0: #endif sl@0: sl@0: // Heap sort list of entries by unsigned key sl@0: // List address in r5, count in r4, entry size in r10, key offset in r11 sl@0: // r4=ss, r6=sh sl@0: // r8,r9 modified sl@0: asm("HeapSortUnsignedKey: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("mov r7, lr "); // save lr in r7 sl@0: asm("sub sp, sp, r10 "); // get some temporary space on the stack sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hsuk_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hsuk_loop_start2 "); sl@0: asm("hsuk_loop_inner: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=entry[r8] sl@0: asm("mov r9, r8 "); sl@0: asm("hsuk_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hsuk_loop_inner_end "); sl@0: asm("mla r0, r8, r10, r5 "); sl@0: asm("ldrne r2, [r0, r11]! "); // r2=key[r8] sl@0: asm("addeq r0, r0, r11 "); sl@0: asm("ldr r1, [r0, -r10] "); // r1=key[r8-1] sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hsuk_loop_inner2 "); sl@0: asm("cmp r1, r2 "); sl@0: asm("subhi r8, r8, #1 "); sl@0: asm("movls r1, r2 "); sl@0: asm("hsuk_loop_inner2: "); sl@0: asm("ldr r2, [sp, r11] "); // r2=key[sp] sl@0: asm("cmp r1, r2 "); sl@0: asm("bhi hsuk_loop_inner "); sl@0: asm("hsuk_loop_inner_end: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=[sp] sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hsuk_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // [sp]=entry[r4] sl@0: asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] sl@0: asm("mov r1, r5 "); // r1=&entry[0] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=entry[r4] sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hsuk_loop_start2 "); sl@0: asm("mov r0, r5 "); // r0=&entry[0] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=[sp] sl@0: asm("add sp, sp, r10 "); // free temporary stack space sl@0: __JUMP(,r7); sl@0: } sl@0: sl@0: EXPORT_C __NAKED__ void RArrayBase::HeapSort(TGeneralLinearOrder anOrder) sl@0: { sl@0: #ifdef __EABI__ sl@0: // need sp aligned correctly sl@0: asm("stmfd sp!, {r3-r11,lr} "); sl@0: __EH_FRAME_ADDRESS(sp,4) sl@0: __EH_FRAME_PUSH2(r4-r11,lr) sl@0: #else sl@0: asm("stmfd sp!, {r4-r11,lr} "); sl@0: #endif sl@0: asm("ldmia r0, {r4,r5,r10,r11} "); sl@0: asm("mov r7, r1 "); sl@0: asm("bl HeapSortEntries "); sl@0: #ifdef __EABI__ sl@0: __POPRET("r3-r11,"); sl@0: #else sl@0: __POPRET("r4-r11,"); sl@0: #endif sl@0: sl@0: // Heap sort list of entries sl@0: // List address in r5, count in r4, entry size in r10, key offset in r11 sl@0: // Address of ordering function in r7 sl@0: // r4=ss, r6=sh sl@0: // r8,r9 modified sl@0: asm("HeapSortEntries: "); sl@0: asm("cmp r4, #1 "); sl@0: __JUMP(le,lr); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("mov r8, sp "); // original SP sl@0: asm("sub sp, sp, r10 "); // get some temporary space on the stack sl@0: asm("sub sp, sp, #4 "); // make room for original SP sl@0: asm("bic sp, sp, #4 "); // align stack to 8 byte boundary sl@0: asm("str r8, [sp, r10] "); // save original SP sl@0: asm("mov r6, r4, lsr #1 "); sl@0: asm("hse_loop_start1: "); sl@0: asm("sub r6, r6, #1 "); sl@0: asm("mla r1, r6, r10, r5 "); // [sp]=entry[r6] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("b hse_loop_start2 "); sl@0: asm("hse_loop_inner: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=entry[r8] sl@0: asm("mov r9, r8 "); sl@0: asm("hse_loop_start2: "); sl@0: asm("add r8, r8, #1 "); sl@0: asm("add r8, r8, r8 "); sl@0: asm("cmp r8, r4 "); sl@0: asm("bgt hse_loop_inner_end "); sl@0: asm("subeq r8, r8, #1 "); sl@0: asm("beq hse_loop_inner2 "); sl@0: asm("mla r1, r8, r10, r5 "); // r1=&entry[r8] sl@0: asm("sub r0, r1, r10 "); // r0=&entry[r8-1] sl@0: __JUMPL(7); sl@0: asm("cmp r0, #0 "); // compare entry[r8-1] with entry[r8] sl@0: asm("subgt r8, r8, #1 "); sl@0: asm("hse_loop_inner2: "); sl@0: asm("mla r0, r8, r10, r5 "); // r0=&entry[r8] sl@0: asm("mov r1, sp "); sl@0: __JUMPL(7); sl@0: asm("cmp r0, #0 "); // compare entry[r8] with [sp] sl@0: asm("bgt hse_loop_inner "); sl@0: asm("hse_loop_inner_end: "); sl@0: asm("mla r0, r9, r10, r5 "); // r0=&entry[r9] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[r9]=[sp] sl@0: asm("cmp r6, #0 "); sl@0: asm("bne hse_loop_start1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("mla r1, r4, r10, r5 "); // r1=&entry[r4] sl@0: asm("mov r0, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // [sp]=entry[r4] sl@0: asm("mla r0, r4, r10, r5 "); // r0=&entry[r4] sl@0: asm("mov r1, r5 "); // r1=&entry[0] sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=entry[r4] sl@0: asm("cmp r4, #1 "); sl@0: asm("mov r8, r6 "); sl@0: asm("mov r9, r6 "); sl@0: asm("bgt hse_loop_start2 "); sl@0: asm("mov r0, r5 "); // r0=&entry[0] sl@0: asm("mov r1, sp "); sl@0: asm("mov r2, r10 "); sl@0: asm("bl wordmove "); // entry[0]=[sp] sl@0: asm("ldr sp, [sp, r10] "); // restore stack pointer, freeing temporary stack space sl@0: __POPRET(""); sl@0: } sl@0: #endif // __KERNEL_MODE__ sl@0: #endif // __ARRAY_MACHINE_CODED__