sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\klib\arm\cbma.cia sl@0: // Machine coded bitmap allocator for ARM sl@0: // This file is directly included in the test harness t_tbma sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: #ifdef TBMA_TEST_CODE sl@0: sl@0: #include sl@0: sl@0: #ifdef __MARM__ sl@0: #define __TBMA_MACHINE_CODED__ sl@0: #endif sl@0: sl@0: #else sl@0: sl@0: #include sl@0: sl@0: #endif sl@0: sl@0: #ifdef __TBMA_MACHINE_CODED__ sl@0: sl@0: extern void TBmaFault(TInt aLine); sl@0: #define ASM_FAULT_LINE(x) asm("ldr r0, [pc] "); asm("b " CSM_Z9TBmaFaulti ); asm(".word %a0" : : "i" (x)); sl@0: #define ASM_FAULT() ASM_FAULT_LINE(__LINE__) sl@0: sl@0: #ifndef __EABI_CTORS__ sl@0: /** Construct a new TBitMapAllocator object sl@0: sl@0: @param aSize The number of bit positions required sl@0: @param aState TRUE if all bit positions initially free sl@0: FALSE if all bit positions initially allocated sl@0: */ sl@0: EXPORT_C __NAKED__ TBitMapAllocator::TBitMapAllocator(TInt /*aSize*/, TBool /*aState*/) sl@0: { sl@0: asm("cmp r1, #0 "); sl@0: asm("ble 0f "); sl@0: asm("cmp r2, #0 "); sl@0: asm("movne r2, r1 "); // if aState r2=aSize else r2=0 sl@0: asm("str r2, [r0, #0] "); // iAvail=aState?aSize:0 sl@0: asm("add r12, r0, #12 "); // r12=&iMap[0] sl@0: asm("str r1, [r0, #8] "); // iSize=r1 sl@0: asm("add r3, r1, #31 "); sl@0: asm("bic r3, r3, #31 "); // r3=aSize rounded up to multiple of 32 sl@0: asm("sub r3, r3, #32 "); // r3=32*(number of map words-1) sl@0: asm("addeq r12, r12, r3, lsr #3 "); // if !aState r12=&iMap[nmapw-1] sl@0: asm("str r12, [r0, #4] "); // iCheckFirst=aState?&iMap[0]:&iMap[nmapw-1] sl@0: asm("mvnne r2, #0 "); // if aState r2=0xffffffff else r2=0 sl@0: asm("add r12, r0, #12 "); // r12=&iMap[0] sl@0: asm("1: "); sl@0: asm("str r2, [r12], #4 "); // fill map sl@0: asm("subs r1, r1, #32 "); sl@0: asm("bhi 1b "); sl@0: asm("rsbne r1, r1, #0 "); // if aSize not a multiple of 32, r1=number of tail bits to clear sl@0: asm("movne r2, r2, lsl r1 "); // clear unused bits sl@0: asm("strne r2, [r12, #-4] "); sl@0: __JUMP(,lr); sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /** Allocate the next available bit position sl@0: sl@0: @return Number of position allocated, -1 if all positions occupied sl@0: */ sl@0: EXPORT_C __NAKED__ TInt TBitMapAllocator::Alloc() sl@0: { sl@0: asm("ldmia r0, {r1,r2} "); // r1=available, r2=check first address sl@0: asm("subs r1, r1, #1 "); // decrement free count sl@0: asm("mvnmi r0, #0 "); // if none free, return with r0=-1 sl@0: __JUMP(mi,lr); sl@0: asm("str r1, [r0] "); // store decremented free count sl@0: asm("alloc_1: "); sl@0: asm("ldr r3, [r2], #4 "); // check word sl@0: asm("cmp r3, #0 "); // any free entries? sl@0: asm("beq alloc_1 "); // if not, check next word sl@0: #ifdef __CPU_ARM_HAS_CLZ sl@0: CLZ(12, 3); sl@0: #else sl@0: asm("mov ip, #0 "); sl@0: asm("cmp r3, #0x00010000 "); // ip=number of leading zeros in r3 sl@0: asm("movlo r3, r3, lsl #16 "); sl@0: asm("addlo ip, ip, #16 "); sl@0: asm("cmp r3, #0x01000000 "); sl@0: asm("movlo r3, r3, lsl #8 "); sl@0: asm("addlo ip, ip, #8 "); sl@0: asm("cmp r3, #0x10000000 "); sl@0: asm("movlo r3, r3, lsl #4 "); sl@0: asm("addlo ip, ip, #4 "); sl@0: asm("cmp r3, #0x40000000 "); sl@0: asm("movlo r3, r3, lsl #2 "); sl@0: asm("addlo ip, ip, #2 "); sl@0: asm("cmp r3, #0x80000000 "); sl@0: asm("addlo ip, ip, #1 "); sl@0: #endif sl@0: asm("ldr r3, [r2, #-4]! "); sl@0: asm("mov r1, #0x80000000 "); sl@0: asm("bic r3, r3, r1, lsr ip "); // clear bit in allocator word sl@0: asm("str r3, [r2] "); sl@0: asm("str r2, [r0, #4] "); // update check first address sl@0: asm("sub r0, r2, r0 "); sl@0: asm("sub r0, r0, #12 "); // r0=offset of word from iMap[0] sl@0: asm("adds r0, ip, r0, lsl #3 "); // multiply by 8 and add bit position sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Free the specified bit position sl@0: sl@0: @param aPos Number of bit position to be freed; must be currently allocated. sl@0: */ sl@0: EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aPos*/) sl@0: { sl@0: asm("ldr r3, [r0, #8] "); // r3=iSize sl@0: asm("mov r2, r1, lsr #5 "); // r2=word index sl@0: asm("add r2, r0, r2, lsl #2 "); // r2=address of word-12 sl@0: asm("cmp r1, r3 "); sl@0: asm("bhs free_error "); sl@0: asm("and ip, r1, #0x1f "); // ip=bit number in word sl@0: asm("ldr r3, [r2, #12]! "); // r3=allocator word sl@0: asm("mov r1, #0x80000000 "); sl@0: asm("tst r3, r1, lsr ip "); // test bit sl@0: asm("bne free_error "); // if already free, error sl@0: asm("orr r3, r3, r1, lsr ip "); // set free bit sl@0: asm("str r3, [r2] "); sl@0: asm("ldmia r0, {r1,r3} "); // r1=available count, r3=first free address sl@0: asm("cmp r1, #1 "); // check original free count sl@0: asm("add r1, r1, #1 "); // increment available count sl@0: asm("str r1, [r0, #0] "); sl@0: asm("cmpcs r2, r3 "); // compare word address with first free sl@0: asm("strcc r2, [r0, #4] "); // if lower, update first free sl@0: __JUMP(,lr); sl@0: sl@0: asm("free_error: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Allocate a specific range of bit positions sl@0: Specified range must lie within the total range for this allocator and all sl@0: the positions must currently be free. sl@0: sl@0: @param aStart First position to allocate sl@0: @param aLength Number of consecutive positions to allocate, must be >0 sl@0: */ sl@0: EXPORT_C __NAKED__ void TBitMapAllocator::Alloc(TInt /*aStart*/, TInt /*aLength*/) sl@0: { sl@0: asm("ldr ip, [r0, #8] "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("adds lr, r1, r2 "); sl@0: asm("bcs 0f "); sl@0: asm("cmp lr, ip "); sl@0: asm("bhi 0f "); sl@0: asm("mov r3, r1, lsr #5 "); sl@0: asm("ldr ip, [r0] "); sl@0: asm("and r1, r1, #0x1f "); sl@0: asm("add r3, r0, r3, lsl #2 "); sl@0: asm("sub ip, ip, r2 "); // reduce free count sl@0: asm("str ip, [r0] "); sl@0: asm("add ip, r2, r1 "); sl@0: asm("cmp ip, #32 "); sl@0: asm("bhi 1f "); sl@0: asm("mvn ip, #0 "); sl@0: asm("ldr r0, [r3, #12]! "); sl@0: asm("mvn ip, ip, lsr r2 "); sl@0: asm("mov ip, ip, lsr r1 "); sl@0: asm("orr lr, r0, ip "); sl@0: asm("cmp lr, r0 "); sl@0: asm("bne 0f "); sl@0: asm("bic r0, r0, ip "); sl@0: asm("str r0, [r3] "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: asm("1: "); sl@0: asm("add r3, r3, #12 "); sl@0: asm("mvn r2, #0 "); sl@0: asm("mov r2, r2, lsr r1 "); sl@0: asm("2: "); sl@0: asm("ldr r1, [r3] "); sl@0: asm("orr lr, r1, r2 "); sl@0: asm("cmp lr, r1 "); sl@0: asm("bne 0f "); sl@0: asm("bic r1, r1, r2 "); sl@0: asm("str r1, [r3], #4 "); sl@0: asm("mvn r2, #0 "); sl@0: asm("subs ip, ip, #32 "); sl@0: asm("ldrls pc, [sp], #4 "); sl@0: asm("cmp ip, #32 "); sl@0: asm("mvncc r2, r2, lsr ip "); sl@0: asm("b 2b "); sl@0: sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Free a specific range of bit positions sl@0: Specified range must lie within the total range for this allocator and all sl@0: the positions must currently be allocated. sl@0: sl@0: @param aStart First position to free sl@0: @param aLength Number of consecutive positions to free, must be >0 sl@0: */ sl@0: EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aStart*/, TInt /*aLength*/) sl@0: { sl@0: asm("ldr ip, [r0, #8] "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("adds lr, r1, r2 "); sl@0: asm("bcs 0f "); sl@0: asm("cmp lr, ip "); sl@0: asm("bhi 0f "); sl@0: asm("mov r3, r1, lsr #5 "); sl@0: asm("and r1, r1, #0x1f "); sl@0: asm("add r3, r0, r3, lsl #2 "); sl@0: asm("ldmia r0, {ip,lr} "); // ip=free count, lr=first check addr sl@0: asm("add r3, r3, #12 "); sl@0: asm("cmp ip, #1 "); // check original free count sl@0: asm("add ip, ip, r2 "); // increase free count sl@0: asm("cmpcs r3, lr "); // if none free originally, always update address sl@0: asm("str ip, [r0] "); sl@0: asm("strcc r3, [r0, #4] "); // update first check addr if necessary sl@0: asm("add lr, r2, r1 "); sl@0: asm("cmp lr, #32 "); sl@0: asm("bhi 1f "); sl@0: asm("mvn lr, #0 "); sl@0: asm("ldr r0, [r3] "); sl@0: asm("mvn lr, lr, lsr r2 "); sl@0: asm("mov lr, lr, lsr r1 "); sl@0: asm("tst r0, lr "); sl@0: asm("bne 0f "); sl@0: asm("orr r0, r0, lr "); sl@0: asm("str r0, [r3] "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: asm("1: "); sl@0: asm("mvn r2, #0 "); sl@0: asm("mov r2, r2, lsr r1 "); sl@0: asm("2: "); sl@0: asm("ldr r1, [r3] "); sl@0: asm("tst r1, r2 "); sl@0: asm("bne 0f "); sl@0: asm("orr r1, r1, r2 "); sl@0: asm("str r1, [r3], #4 "); sl@0: asm("mvn r2, #0 "); sl@0: asm("subs lr, lr, #32 "); sl@0: asm("ldrls pc, [sp], #4 "); sl@0: asm("cmp lr, #32 "); sl@0: asm("mvncc r2, r2, lsr lr "); sl@0: asm("b 2b "); sl@0: sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Free a specific range of bit positions sl@0: Specified range must lie within the total range for this allocator but it is sl@0: not necessary that all the positions are currently allocated. sl@0: sl@0: @param aStart First position to free sl@0: @param aLength Number of consecutive positions to free, must be >0 sl@0: */ sl@0: EXPORT_C __NAKED__ void TBitMapAllocator::SelectiveFree(TInt /*aStart*/, TInt /*aLength*/) sl@0: { sl@0: asm("ldr r3, [r0, #8] "); sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: asm("adds lr, r1, r2 "); sl@0: asm("bcs 0f "); sl@0: asm("cmp lr, r3 "); sl@0: asm("bhi 0f "); sl@0: asm("mov r7, r0 "); // r7 -> this sl@0: asm("mov r4, r1, lsr #5 "); sl@0: asm("and r1, r1, #0x1f "); sl@0: asm("ldmia r7, {r6,r8} "); // r6=free count, r8=first check addr sl@0: asm("add r4, r7, r4, lsl #2 "); sl@0: asm("add r4, r4, #12 "); sl@0: asm("cmp r6, #1 "); // check original free count sl@0: asm("add r6, r6, r2 "); // r6=new free count assuming no positions already free sl@0: asm("cmpcs r4, r8 "); // if none free originally, always update address sl@0: asm("strcc r4, [r7, #4] "); // update first check addr if necessary sl@0: asm("add r8, r2, r1 "); sl@0: asm("cmp r8, #32 "); sl@0: asm("bhi sfree_cross_bdry "); sl@0: asm("mvn r8, #0 "); sl@0: asm("mvn r8, r8, lsr r2 "); sl@0: asm("mov r8, r8, lsr r1 "); sl@0: asm("ldr r1, [r4] "); sl@0: asm("ands r0, r1, r8 "); // r0 has 1's in positions which are already free sl@0: asm("orr r1, r1, r8 "); sl@0: asm("str r1, [r4] "); // store new bit mask sl@0: asm("beq sfree_0 "); // if none were already free, finished sl@0: asm("bl " CSM_CFUNC(__e32_bit_count_32)); sl@0: asm("sub r6, r6, r0 "); sl@0: asm("sfree_0: "); sl@0: asm("str r6, [r7] "); // store free count sl@0: asm("ldmfd sp!, {r4-r8,pc} "); // return sl@0: sl@0: asm("sfree_cross_bdry: "); sl@0: asm("mvn r5, #0 "); sl@0: asm("mov r5, r5, lsr r1 "); sl@0: asm("sfree_cross_bdry_1: "); sl@0: asm("ldr r1, [r4] "); // original bit mask sl@0: asm("ands r0, r1, r5 "); // r0 has 1's in bit positions which are already free sl@0: asm("orr r1, r1, r5 "); sl@0: asm("str r1, [r4], #4 "); // store new bit mask sl@0: asm("beq sfree_2 "); // skip if none were already free sl@0: asm("bl " CSM_CFUNC(__e32_bit_count_32)); sl@0: asm("sub r6, r6, r0 "); sl@0: asm("sfree_2: "); sl@0: asm("mvn r5, #0 "); sl@0: asm("subs r8, r8, #32 "); sl@0: asm("bls sfree_0 "); sl@0: asm("cmp r8, #32 "); sl@0: asm("mvncc r5, r5, lsr r8 "); sl@0: asm("b sfree_cross_bdry_1 "); sl@0: sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Tests if a specific range of bit positions are all free sl@0: Specified range must lie within the total range for this allocator. sl@0: sl@0: @param aStart First position to check sl@0: @param aLength Number of consecutive positions to check, must be >0 sl@0: @return FALSE if all positions free, TRUE if at least one is occupied. sl@0: */ sl@0: EXPORT_C __NAKED__ TBool TBitMapAllocator::NotFree(TInt /*aStart*/, TInt /*aLength*/) const sl@0: { sl@0: // Inverse logic - returns 0 if all positions free, nonzero otherwise sl@0: asm("ldr r3, [r0, #8] "); sl@0: asm("adds ip, r1, r2 "); sl@0: asm("bcs 0f "); sl@0: asm("cmp ip, r3 "); sl@0: asm("bhi 0f "); sl@0: asm("mov r3, r1, lsr #5 "); sl@0: asm("and r1, r1, #0x1f "); sl@0: asm("add r3, r0, r3, lsl #2 "); sl@0: asm("add ip, r2, r1 "); sl@0: asm("add r3, r3, #12 "); sl@0: asm("cmp ip, #32 "); sl@0: asm("bhi 1f "); sl@0: asm("mvn ip, #0 "); sl@0: asm("ldr r0, [r3] "); sl@0: asm("mvn ip, ip, lsr r2 "); sl@0: asm("mov ip, ip, lsr r1 "); sl@0: asm("eor r0, r0, ip "); sl@0: asm("ands r0, r0, ip "); sl@0: __JUMP(,lr); sl@0: asm("1: "); sl@0: asm("mvn r2, #0 "); sl@0: asm("mov r2, r2, lsr r1 "); sl@0: asm("2: "); sl@0: asm("ldr r1, [r3], #4 "); sl@0: asm("eor r0, r1, r2 "); sl@0: asm("ands r0, r0, r2 "); sl@0: __JUMP(ne,lr); sl@0: asm("mvn r2, #0 "); sl@0: asm("subs ip, ip, #32 "); sl@0: __JUMP(ls,lr); sl@0: asm("cmp ip, #32 "); sl@0: asm("mvncc r2, r2, lsr ip "); sl@0: asm("b 2b "); sl@0: sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Tests if a specific range of bit positions are all occupied sl@0: Specified range must lie within the total range for this allocator. sl@0: sl@0: @param aStart First position to check sl@0: @param aLength Number of consecutive positions to check, must be >0 sl@0: @return FALSE if all positions occupied, TRUE if at least one is free. sl@0: */ sl@0: EXPORT_C __NAKED__ TBool TBitMapAllocator::NotAllocated(TInt /*aStart*/, TInt /*aLength*/) const sl@0: { sl@0: // Inverse logic - returns 0 if all positions allocated, nonzero otherwise sl@0: asm("ldr r3, [r0, #8] "); sl@0: asm("adds ip, r1, r2 "); sl@0: asm("bcs 0f "); sl@0: asm("cmp ip, r3 "); sl@0: asm("bhi 0f "); sl@0: asm("mov r3, r1, lsr #5 "); sl@0: asm("and r1, r1, #0x1f "); sl@0: asm("add r3, r0, r3, lsl #2 "); sl@0: asm("add ip, r2, r1 "); sl@0: asm("add r3, r3, #12 "); sl@0: asm("cmp ip, #32 "); sl@0: asm("bhi 1f "); sl@0: asm("mvn ip, #0 "); sl@0: asm("ldr r0, [r3] "); sl@0: asm("mvn ip, ip, lsr r2 "); sl@0: asm("ands r0, r0, ip, lsr r1 "); sl@0: __JUMP(,lr); sl@0: asm("1: "); sl@0: asm("mvn r2, #0 "); sl@0: asm("mov r2, r2, lsr r1 "); sl@0: asm("2: "); sl@0: asm("ldr r1, [r3], #4 "); sl@0: asm("ands r0, r1, r2 "); sl@0: __JUMP(ne,lr); sl@0: asm("mvn r2, #0 "); sl@0: asm("subs ip, ip, #32 "); sl@0: __JUMP(ls,lr); sl@0: asm("cmp ip, #32 "); sl@0: asm("mvncc r2, r2, lsr ip "); sl@0: asm("b 2b "); sl@0: sl@0: asm("0: "); sl@0: ASM_FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Allocate up to a specified number of available bit positions sl@0: The allocated positions are not required to bear any relationship to sl@0: each other. sl@0: If the number of free positions is less than the number requested, sl@0: allocate all currently free positions. sl@0: sl@0: @param aLength Maximum number of positions to allocate. sl@0: @param aList Pointer to memory area where allocated bit numbers should be stored. sl@0: @return The number of positions allocated sl@0: */ sl@0: EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocList(TInt /*aLength*/, TInt* /*aList*/) sl@0: { sl@0: asm("ldmia r0, {r3,ip} "); // r3=iAvail, ip=first check word sl@0: asm("stmfd sp!, {r4-r5,lr} "); sl@0: asm("cmp r1, r3 "); sl@0: asm("movgt r1, r3 "); // if aLength>iAvail, aLength=iAvail sl@0: asm("movs r5, r1 "); // r5 counts allocations sl@0: asm("beq 0f "); // if length 0, exit sl@0: asm("sub r3, r3, r1 "); // reduce available count sl@0: asm("sub r4, ip, r0 "); sl@0: asm("sub r4, r4, #12 "); // r4=offset of first check word from iMap[0]; sl@0: asm("str r3, [r0] "); sl@0: asm("mov r4, r4, lsl #3 "); // r4=bit number of MSB of first check word sl@0: asm("1: "); sl@0: asm("ldr lr, [ip], #4 "); // lr=next word sl@0: asm("cmp lr, #0 "); sl@0: asm("addeq r4, r4, #32 "); // if word=0, increment bit number by 32 and check next word sl@0: asm("beq 1b "); sl@0: asm("mov r3, #1 "); sl@0: asm("sub r4, r4, #1 "); sl@0: asm("2: "); sl@0: asm("mov r3, r3, ror #1 "); // shift mask right one sl@0: asm("add r4, r4, #1 "); // and increment bit number sl@0: asm("tst lr, r3 "); // check next bit sl@0: asm("beq 2b "); sl@0: asm("str r4, [r2], #4 "); // bit=1, so store bit number in list sl@0: asm("subs r5, r5, #1 "); // check if we are finished sl@0: asm("beq 4f "); // branch if we are sl@0: asm("bics lr, lr, r3 "); // clear bit and see if word now empty sl@0: asm("bne 2b "); // if word not empty, get next bit sl@0: asm("str lr, [ip, #-4] "); // word empty - clear word sl@0: asm("add r4, r4, #32 "); // word empty - step bit number on to next word sl@0: asm("bic r4, r4, #31 "); sl@0: asm("b 1b "); // and go to check next word sl@0: asm("4: "); sl@0: asm("bics lr, lr, r3 "); // clear bit sl@0: asm("str lr, [ip, #-4] "); // we are finished - store modified word sl@0: asm("subne ip, ip, #4 "); // if word not empty, first check=last read word sl@0: asm("str ip, [r0, #4] "); // update first check word sl@0: asm("0: "); sl@0: asm("mov r0, r1 "); // return number of positions allocated sl@0: asm("ldmfd sp!, {r4-r5,pc} "); sl@0: } sl@0: sl@0: sl@0: /** Find a set of consecutive bit positions with specified alignment, with sl@0: support for chaining multiple allocators. sl@0: Note that this function does not mark the positions as allocated. sl@0: sl@0: @param aLength number of consecutive bit positions to allocate sl@0: @param aAlign logarithm to base 2 of the alignment required sl@0: @param aBase the alignment of the first bit of this allocator - only significant modulo 2^aAlign sl@0: @param aBestFit TRUE for best fit allocation strategy, FALSE for first fit sl@0: @param aCarry carry in/carry out sl@0: @param aRunLength Holds best run length found so far. This will be set to KMaxTInt when no sl@0: suitable run length has been found. In best fit mode aCarry should also be sl@0: checked as aRunLength will not be set if aCarry is the only suitable run length sl@0: found. sl@0: @param aOffset The bit position to start the search from, set to 0 to search all bit positions. sl@0: aOffset will be aligned so all bits before an aligned aOffset will be sl@0: ignored. This can only be non-zero if aCarry is zero as any carry in bits will be sl@0: ignored if aOffset is non-zero. sl@0: sl@0: @return Start position if a suitable run was found sl@0: @return KErrNotFound if no suitable run was found sl@0: @return KErrOverflow, if all positions free and best fit mode, or if all positions free sl@0: in first fit mode and length requested > number of positions available. sl@0: sl@0: @see TBitMapAllocator::AllocConsecutive(TInt aLength, TBool aBestFit) sl@0: @see TBitMapAllocator::AllocAligned(TInt aLength, TInt aAlign, TInt aBase, TBool aBestFit) sl@0: @see ..\bma.cpp for more details sl@0: */ sl@0: EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocAligned(TInt /*aLength*/, TInt /*aAlign*/, TInt /*aBase*/, sl@0: TBool /*aBestFit*/, TInt& /*aCarry*/, TInt& /*aRunLength*/, sl@0: TUint /*aOffset*/) const sl@0: { sl@0: // r0=this, r1=aLength, r2=aAlign, r3=aBase, [sp+0]=aBestFit, [sp+4]=&aCarry, [sp+8]=&aRunLength sl@0: // [sp+12] = aOffset. sl@0: asm("ldr r12, [sp, #0] "); // r12=aBestFit sl@0: asm("cmp r1, #0 "); sl@0: asm("ble aa_inv "); // __ASSERT_ALWAYS(aLength>0, TBMA_FAULT()) sl@0: asm("cmp r2, #31 "); sl@0: asm("bhs aa_inv "); // __ASSERT_ALWAYS(TUint(aAlign)<31, TBMA_FAULT()) sl@0: asm("stmfd sp!, {r4-r11,lr} "); sl@0: asm("movs r8, r12 "); // sl@0: asm("ldr r11, [sp, #40] "); // r11=&aCarry sl@0: asm("mvnne r8, #0x80000000 "); // if (aBestFit) r8=7fffffff else r8=0 sl@0: asm("ldmia r0!, {r4-r6} "); // r4=iAvail, r5=iCheckFirst, r6=iSize, r0->iMap[0] sl@0: asm("ldr r12, [sp, #48] "); // r12 = aOffset; sl@0: asm("cmp r6, r12 "); sl@0: asm("bls aa_inv "); // __ASSERT_ALWAYS(aOffset < (TUint)iSize, TBMA_FAULT()) sl@0: asm("ldr r9, [r11] "); // r9=aCarry sl@0: asm("cmp r9, #0 "); sl@0: asm("cmpne r12, #0 "); sl@0: asm("bne aa_inv "); //__ASSERT_ALWAYS(!aCarry || !aOffset, TBMA_FAULT()) sl@0: asm("mov r12, #1 "); sl@0: asm("mov r12, r12, lsl r2 "); // r12=alignsize = 1<iMap, r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer sl@0: // r6=iSize, r7=, r8=saved run length, r9=run start pos sl@0: // r10=end address of bitmap, r11=state sl@0: asm("ldr r7, [sp, #52] "); // r7 = aOffset; sl@0: asm("cmp r7, #0 "); // if (aOffset) sl@0: asm("beq aa_word "); sl@0: asm("add r7, r7, r3 "); // r7 = aOffset + aBase sl@0: asm("add r7, r7, r2 "); // r7 = aOffset + aBase + alignmask sl@0: asm("bic r7, r7, r2 "); // r7 = (aOffset + aBase + alignmask) & ~alignmask sl@0: asm("sub r7, r7, r3 "); // r7 -= aBase sl@0: asm("mov r12, r7, lsr #5 "); // r12 = aOffset >> 5 (number of pointer increments required) sl@0: asm("add r0, r0, r12, lsl #2 "); // r0 = offsetWord = iMap + (aOffset >> 5) (pointer add so shift=2) sl@0: asm("cmp r0, r5 "); // if (offsetWord >= pW) sl@0: asm("movpl r5, r0 "); // r5 = pW = offsetWord sl@0: asm("andpl r4, r7, #0xffffffe0 "); // r4 = n = aOffset & 0xffffffe0 sl@0: asm("andpl r7, r7, #31 "); // r7 = aOffset & 31 sl@0: asm("mov r0, #0xffffffff "); // r0 = 0xffffffff sl@0: asm("mov r7, r0, lsr r7 "); // r7 = offsetMask = 0xffffffff >> (aOffset & 31) sl@0: sl@0: // registers: r0=bit to check (b), r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer sl@0: // r6=iSize, r7=offsetMask, r8=saved run length, r9=run start pos sl@0: // r10=end address of bitmap, r11=state, r12=word sl@0: asm("aa_word: "); // while (pW < pE) sl@0: asm("cmp r5, r10 "); // reached end? sl@0: asm("ldrlo r12, [r5], #4 "); // if not, r12=next word (=*pW++) sl@0: asm("bhs aa_end_loop "); // if end, branch out sl@0: sl@0: asm("cmp r7, #0 "); // if (offsetMask) sl@0: asm("andne r12, r12, r7 "); // r12 = word &= offsetMask sl@0: asm("movne r7, #0 "); // offsetmask = 0; sl@0: sl@0: asm("eors r12, r12, r11 "); // r12=w^s, test if any of required bit present sl@0: asm("addeq r4, r4, #32 "); // if not, increment bit # by 32 sl@0: asm("beq aa_word "); // and do next word sl@0: asm("mov r0, #0x80000000 "); // bit to check (b) sl@0: sl@0: asm("aa_bit: "); // if ((word ^ s) & b) sl@0: asm("tst r12, r0 "); // does bit have required state? sl@0: asm("bne aa_bit_found "); sl@0: asm("aa_end_for: "); sl@0: asm("add r4, r4, #1 "); // increment bit number sl@0: asm("movs r0, r0, lsr #1 "); // next bit sl@0: asm("bne aa_bit "); // if all bits not done, do next sl@0: asm("b aa_word "); // else do next word sl@0: sl@0: asm("aa_bit_found: "); sl@0: asm("mvns r12, r12 "); // Invert r12 to invert search bit sl@0: asm("mvns r14, r11 "); // if (s) sl@0: asm("cmpeq r4, r6 "); // && n==iSize sl@0: asm("beq aa_end_loop "); // ... finished sl@0: asm("mvns r11, r11 "); // else s=~s sl@0: asm("movne r9, r4 "); // if (s) q=n (1 found so save position) sl@0: asm("bne aa_end_for "); sl@0: sl@0: asm("sub r14, r4, r9 "); // r14 = run length = n - q sl@0: asm("stmdb sp!, {r0,r12} "); // store b (r0) and word (r12) on stack sl@0: asm("add r12, r9, r3 "); // r12 = q + aBase sl@0: asm("add r12, r12, r2 "); // r12 = q + aBase + alignmask sl@0: asm("bic r12, r12, r2 "); // r12 = (q + aBase + alignmask) & ~alignmask sl@0: asm("sub r12, r12, r3 "); // r12 = alignedStartPos = r12 - aBase sl@0: asm("sub r0, r12, r9 "); // r0 = lost = alignedStartPos - q sl@0: asm("sub r0, r14, r0 "); // r0 = run length - lost sl@0: asm("cmp r0, r1 "); // if (run length - lost >= aLength) sl@0: asm("ldmltia sp!, {r0,r12} "); // if aligned length too short: r0 = b and r12 = word from stack sl@0: asm("blt aa_end_for "); // (run length - lost) too short (must be signed comparison) sl@0: sl@0: // if (rl-lost>=aLength) sl@0: sl@0: asm("cmp r1, r14 "); // check for exact run length match (if (run length == aLength)) sl@0: asm("cmpne r8, #0 "); // check for best fit (r8 only ever set if (aBestfit)) sl@0: asm("beq aa_found_it "); // exact match or not in best fit mode sl@0: sl@0: // if (r1= 0)? alignedStartPos : 0 sl@0: asm("cmp r14, r8 "); // Compare run length with current minimum sl@0: asm("movlo r8, r14 "); // if shorter, replace sl@0: asm("strlo r12, [sp, #8] "); // save alignedStartPos (p = (alignedStartPos >= 0)? alignedStartPos : 0) sl@0: asm("ldmia sp!, {r0,r12} "); // r0 = b and r12 = word from stack sl@0: asm("b aa_end_for "); // next bit sl@0: // end {if (r1= 0? sl@0: asm("movmi r0, #0 "); // if alignedStartPos < 0 r0=0 sl@0: asm("str r14, [r7] "); // aRunLength = run length sl@0: asm("mov r14, #0 "); sl@0: asm("strge r14, [r1] "); // if (alignedStartPos >= 0), aCarry=0 sl@0: asm("ldmfd sp!, {r1-r11,pc} "); // return sl@0: // end {if (!aBestFit || run length == aLength)} sl@0: sl@0: // end {if (rl-lost>=aLength)} sl@0: sl@0: asm("aa_end_loop: "); sl@0: asm("ldr r10, [sp, #48] "); // r10=&aRunLength sl@0: sl@0: // registers: r2 = alignmask, r3 = aBase, r4=current bit number(n), sl@0: // r9=run start pos(q), r10=&aRunLength, r11 = state(s), r14 = run length(rl) sl@0: asm("cmp r8, r1 "); // compare min rl with aLength sl@0: asm("beq aa_end_loop2 "); // if exact match, skip sl@0: sl@0: // if (minrl != aLength) sl@0: asm("ldr r12, [sp, #44] "); // r12=&aCarry sl@0: asm("mov r14, #0 "); // r14 = run length = 0 sl@0: asm("cmp r11, #0 "); sl@0: asm("beq aa_end_loop3 "); // if (!s) no final run sl@0: asm("sub r14, r4, r9 "); // rl4 = run length = n-q sl@0: asm("cmp r8, #0 "); // if (!aBestFit) (r8 only and always set when best fit mode) sl@0: asm("bne aa_end_loop3 "); // if best fit, don't count final run sl@0: sl@0: // if (!aBestFit) sl@0: asm("add r0, r9, r3 "); // r0 = q + aBase sl@0: asm("add r0, r0, r2 "); // r0 = q + aBase + alignmask sl@0: asm("bic r0, r0, r2 "); // r0 = (q + aBase + alignmask) & ~alignmask sl@0: asm("sub r0, r0, r3 "); // r0 = alignedStartPos = r0 -= aBase sl@0: asm("sub r2, r0, r9 "); // r2 = lost = alignedStartPos - q sl@0: asm("sub r2, r14, r2 "); // r2 = run length - lost sl@0: asm("cmp r2, r1 "); // if (run length - lost >= aLength) sl@0: asm("blt aa_end_loop3 "); sl@0: sl@0: // if (run length - lost >= aLength) sl@0: asm("mov r8, r14 "); // r8 = run length (ready to be stored in return) sl@0: asm("mov r14, #0 "); // r14 = 0 (aCarry on return) sl@0: asm("str r0, [sp, #0] "); // Save alignedStartPos on stack ready for return sl@0: sl@0: // end {if (run length - lost >= aLength)} sl@0: // end {if (!aBestFit)} sl@0: sl@0: asm("aa_end_loop3: "); sl@0: asm("str r14, [r12] "); // Save aCarry = run length = r14 sl@0: // end {if (minrl != aLength)} sl@0: sl@0: asm("aa_end_loop2: "); sl@0: asm("str r8, [r10] "); // aRunLength = minrl sl@0: asm("ldmfd sp!, {r0,r4-r11,pc} "); // return saved pos sl@0: sl@0: // r1 = aLength r2 = alignmask, r3 = aBase, r4 = iAvail, r6 = iSize, r9 = aCarry, r11 = &aCarry sl@0: asm("aa_all_free: "); sl@0: asm("ldr r12, [sp, #48] "); // r12 = aOffset; sl@0: asm("cmp r12, #0 "); // if (aOffset) sl@0: asm("addne r12, r12, r3 "); // r12 = aOffset + aBase sl@0: asm("addne r12, r12, r2 "); // r12 = aOffset + aBase + alignmask sl@0: asm("bicne r12, r12, r2 "); // r12 = (aOffset + aBase + alignmask)&~alignmask sl@0: asm("subne r12, r12, r3 "); // r12 = ((aOffset + aBase + alignmask)&~alignmask) - aBase sl@0: asm("subs r10, r6, r12 "); // r10 = runLength = iSize - aOffset sl@0: asm("movmi r10, #0 "); // if (aOffset < (TUint)iSize) runLength = 0; sl@0: sl@0: asm("movs r0, r8 "); // best fit? if not, r0=0 sl@0: asm("bne aa_all_free2 "); // skip if best fit mode sl@0: asm("sub r6, r12, r9 "); // r6=aOffset-aCarry sl@0: asm("add r6, r6, r3 "); // r6=aOffset-aCarry+aBase sl@0: asm("add r6, r6, r2 "); // r6=aOffset-aCarry+aBase+alignmask sl@0: asm("bic r6, r6, r2 "); // r6=(aOffset-aCarry+aBase+alignmask)&~alignmask sl@0: asm("sub r6, r6, r3 "); // r6 = alignedStartPos sl@0: asm("sub r3, r12, r9 "); // r3 = aOffset - aCarry sl@0: asm("sub r3, r6, r3 "); // r3 = lost = alignedStartPos - (aOffset - aCarry) sl@0: asm("add r2, r10, r9 "); // r2 = aRunLength + aCarry sl@0: asm("sub r2, r2, r3 "); // r2 -= lost sl@0: asm("cmp r2, r1 "); // if (aRunLength + aCarry - lost >= aLength) sl@0: asm("blt aa_all_free2 "); sl@0: asm("cmp r6, #0 "); sl@0: asm("ldr r5, [sp, #44] "); // r5 = &RunLength sl@0: asm("str r10, [r5] "); // Save aRunLength (aRunLength = runLength) sl@0: asm("movge r9, #0 "); // if (alignedStartPos >= 0) aCarry = 0; sl@0: asm("str r9, [r11] "); // Save aCarry sl@0: asm("movge r0, r6 "); // r0 = (alignedStartPos >= 0)? alignedStartPos : 0 sl@0: asm("ldmfd sp!, {r4-r11,pc} "); // return r0 sl@0: sl@0: asm("aa_all_free2: "); sl@0: asm("ldr r12, [sp, #48] "); // r12 = aOffset; sl@0: asm("cmp r12, #0 "); // if (aOffset) sl@0: asm("movne r9, r10 "); // r9 = aCarry = runLength sl@0: asm("addeq r9, r9, r4 "); // r9 = aCarry + iAvail sl@0: asm("str r9, [r11] "); // Save aCarry sl@0: asm("ldr r5, [sp, #44] "); // r5 = &RunLength sl@0: asm("mov r0, #%a0" : : "i" ((TInt)KMaxTInt)); sl@0: asm("str r0, [r5] "); // aRunLength = KMaxTInt sl@0: asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow)); sl@0: asm("ldmfd sp!, {r4-r11,pc} "); // return KErrOverflow sl@0: sl@0: asm("aa_inv: "); sl@0: ASM_FAULT(); sl@0: } sl@0: #endif