1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/klib/arm/cbma.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,743 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\klib\arm\cbma.cia
1.18 +// Machine coded bitmap allocator for ARM
1.19 +// This file is directly included in the test harness t_tbma
1.20 +//
1.21 +//
1.22 +
1.23 +#include <kernel/kbma.h>
1.24 +#include <cpudefs.h>
1.25 +#include <e32cia.h>
1.26 +
1.27 +#ifdef TBMA_TEST_CODE
1.28 +
1.29 +#include <e32atomics.h>
1.30 +
1.31 +#ifdef __MARM__
1.32 +#define __TBMA_MACHINE_CODED__
1.33 +#endif
1.34 +
1.35 +#else
1.36 +
1.37 +#include <kernel/kern_priv.h>
1.38 +
1.39 +#endif
1.40 +
1.41 +#ifdef __TBMA_MACHINE_CODED__
1.42 +
1.43 +extern void TBmaFault(TInt aLine);
1.44 +#define ASM_FAULT_LINE(x) asm("ldr r0, [pc] "); asm("b " CSM_Z9TBmaFaulti ); asm(".word %a0" : : "i" (x));
1.45 +#define ASM_FAULT() ASM_FAULT_LINE(__LINE__)
1.46 +
1.47 +#ifndef __EABI_CTORS__
1.48 +/** Construct a new TBitMapAllocator object
1.49 +
1.50 + @param aSize The number of bit positions required
1.51 + @param aState TRUE if all bit positions initially free
1.52 + FALSE if all bit positions initially allocated
1.53 + */
1.54 +EXPORT_C __NAKED__ TBitMapAllocator::TBitMapAllocator(TInt /*aSize*/, TBool /*aState*/)
1.55 + {
1.56 + asm("cmp r1, #0 ");
1.57 + asm("ble 0f ");
1.58 + asm("cmp r2, #0 ");
1.59 + asm("movne r2, r1 "); // if aState r2=aSize else r2=0
1.60 + asm("str r2, [r0, #0] "); // iAvail=aState?aSize:0
1.61 + asm("add r12, r0, #12 "); // r12=&iMap[0]
1.62 + asm("str r1, [r0, #8] "); // iSize=r1
1.63 + asm("add r3, r1, #31 ");
1.64 + asm("bic r3, r3, #31 "); // r3=aSize rounded up to multiple of 32
1.65 + asm("sub r3, r3, #32 "); // r3=32*(number of map words-1)
1.66 + asm("addeq r12, r12, r3, lsr #3 "); // if !aState r12=&iMap[nmapw-1]
1.67 + asm("str r12, [r0, #4] "); // iCheckFirst=aState?&iMap[0]:&iMap[nmapw-1]
1.68 + asm("mvnne r2, #0 "); // if aState r2=0xffffffff else r2=0
1.69 + asm("add r12, r0, #12 "); // r12=&iMap[0]
1.70 + asm("1: ");
1.71 + asm("str r2, [r12], #4 "); // fill map
1.72 + asm("subs r1, r1, #32 ");
1.73 + asm("bhi 1b ");
1.74 + asm("rsbne r1, r1, #0 "); // if aSize not a multiple of 32, r1=number of tail bits to clear
1.75 + asm("movne r2, r2, lsl r1 "); // clear unused bits
1.76 + asm("strne r2, [r12, #-4] ");
1.77 + __JUMP(,lr);
1.78 + asm("0: ");
1.79 + ASM_FAULT();
1.80 + }
1.81 +#endif
1.82 +
1.83 +
1.84 +/** Allocate the next available bit position
1.85 +
1.86 + @return Number of position allocated, -1 if all positions occupied
1.87 + */
1.88 +EXPORT_C __NAKED__ TInt TBitMapAllocator::Alloc()
1.89 + {
1.90 + asm("ldmia r0, {r1,r2} "); // r1=available, r2=check first address
1.91 + asm("subs r1, r1, #1 "); // decrement free count
1.92 + asm("mvnmi r0, #0 "); // if none free, return with r0=-1
1.93 + __JUMP(mi,lr);
1.94 + asm("str r1, [r0] "); // store decremented free count
1.95 + asm("alloc_1: ");
1.96 + asm("ldr r3, [r2], #4 "); // check word
1.97 + asm("cmp r3, #0 "); // any free entries?
1.98 + asm("beq alloc_1 "); // if not, check next word
1.99 +#ifdef __CPU_ARM_HAS_CLZ
1.100 + CLZ(12, 3);
1.101 +#else
1.102 + asm("mov ip, #0 ");
1.103 + asm("cmp r3, #0x00010000 "); // ip=number of leading zeros in r3
1.104 + asm("movlo r3, r3, lsl #16 ");
1.105 + asm("addlo ip, ip, #16 ");
1.106 + asm("cmp r3, #0x01000000 ");
1.107 + asm("movlo r3, r3, lsl #8 ");
1.108 + asm("addlo ip, ip, #8 ");
1.109 + asm("cmp r3, #0x10000000 ");
1.110 + asm("movlo r3, r3, lsl #4 ");
1.111 + asm("addlo ip, ip, #4 ");
1.112 + asm("cmp r3, #0x40000000 ");
1.113 + asm("movlo r3, r3, lsl #2 ");
1.114 + asm("addlo ip, ip, #2 ");
1.115 + asm("cmp r3, #0x80000000 ");
1.116 + asm("addlo ip, ip, #1 ");
1.117 +#endif
1.118 + asm("ldr r3, [r2, #-4]! ");
1.119 + asm("mov r1, #0x80000000 ");
1.120 + asm("bic r3, r3, r1, lsr ip "); // clear bit in allocator word
1.121 + asm("str r3, [r2] ");
1.122 + asm("str r2, [r0, #4] "); // update check first address
1.123 + asm("sub r0, r2, r0 ");
1.124 + asm("sub r0, r0, #12 "); // r0=offset of word from iMap[0]
1.125 + asm("adds r0, ip, r0, lsl #3 "); // multiply by 8 and add bit position
1.126 + __JUMP(,lr);
1.127 + }
1.128 +
1.129 +
1.130 +/** Free the specified bit position
1.131 +
1.132 + @param aPos Number of bit position to be freed; must be currently allocated.
1.133 + */
1.134 +EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aPos*/)
1.135 + {
1.136 + asm("ldr r3, [r0, #8] "); // r3=iSize
1.137 + asm("mov r2, r1, lsr #5 "); // r2=word index
1.138 + asm("add r2, r0, r2, lsl #2 "); // r2=address of word-12
1.139 + asm("cmp r1, r3 ");
1.140 + asm("bhs free_error ");
1.141 + asm("and ip, r1, #0x1f "); // ip=bit number in word
1.142 + asm("ldr r3, [r2, #12]! "); // r3=allocator word
1.143 + asm("mov r1, #0x80000000 ");
1.144 + asm("tst r3, r1, lsr ip "); // test bit
1.145 + asm("bne free_error "); // if already free, error
1.146 + asm("orr r3, r3, r1, lsr ip "); // set free bit
1.147 + asm("str r3, [r2] ");
1.148 + asm("ldmia r0, {r1,r3} "); // r1=available count, r3=first free address
1.149 + asm("cmp r1, #1 "); // check original free count
1.150 + asm("add r1, r1, #1 "); // increment available count
1.151 + asm("str r1, [r0, #0] ");
1.152 + asm("cmpcs r2, r3 "); // compare word address with first free
1.153 + asm("strcc r2, [r0, #4] "); // if lower, update first free
1.154 + __JUMP(,lr);
1.155 +
1.156 + asm("free_error: ");
1.157 + ASM_FAULT();
1.158 + }
1.159 +
1.160 +
1.161 +/** Allocate a specific range of bit positions
1.162 + Specified range must lie within the total range for this allocator and all
1.163 + the positions must currently be free.
1.164 +
1.165 + @param aStart First position to allocate
1.166 + @param aLength Number of consecutive positions to allocate, must be >0
1.167 + */
1.168 +EXPORT_C __NAKED__ void TBitMapAllocator::Alloc(TInt /*aStart*/, TInt /*aLength*/)
1.169 + {
1.170 + asm("ldr ip, [r0, #8] ");
1.171 + asm("str lr, [sp, #-4]! ");
1.172 + asm("adds lr, r1, r2 ");
1.173 + asm("bcs 0f ");
1.174 + asm("cmp lr, ip ");
1.175 + asm("bhi 0f ");
1.176 + asm("mov r3, r1, lsr #5 ");
1.177 + asm("ldr ip, [r0] ");
1.178 + asm("and r1, r1, #0x1f ");
1.179 + asm("add r3, r0, r3, lsl #2 ");
1.180 + asm("sub ip, ip, r2 "); // reduce free count
1.181 + asm("str ip, [r0] ");
1.182 + asm("add ip, r2, r1 ");
1.183 + asm("cmp ip, #32 ");
1.184 + asm("bhi 1f ");
1.185 + asm("mvn ip, #0 ");
1.186 + asm("ldr r0, [r3, #12]! ");
1.187 + asm("mvn ip, ip, lsr r2 ");
1.188 + asm("mov ip, ip, lsr r1 ");
1.189 + asm("orr lr, r0, ip ");
1.190 + asm("cmp lr, r0 ");
1.191 + asm("bne 0f ");
1.192 + asm("bic r0, r0, ip ");
1.193 + asm("str r0, [r3] ");
1.194 + asm("ldr pc, [sp], #4 ");
1.195 + asm("1: ");
1.196 + asm("add r3, r3, #12 ");
1.197 + asm("mvn r2, #0 ");
1.198 + asm("mov r2, r2, lsr r1 ");
1.199 + asm("2: ");
1.200 + asm("ldr r1, [r3] ");
1.201 + asm("orr lr, r1, r2 ");
1.202 + asm("cmp lr, r1 ");
1.203 + asm("bne 0f ");
1.204 + asm("bic r1, r1, r2 ");
1.205 + asm("str r1, [r3], #4 ");
1.206 + asm("mvn r2, #0 ");
1.207 + asm("subs ip, ip, #32 ");
1.208 + asm("ldrls pc, [sp], #4 ");
1.209 + asm("cmp ip, #32 ");
1.210 + asm("mvncc r2, r2, lsr ip ");
1.211 + asm("b 2b ");
1.212 +
1.213 + asm("0: ");
1.214 + ASM_FAULT();
1.215 + }
1.216 +
1.217 +
1.218 +/** Free a specific range of bit positions
1.219 + Specified range must lie within the total range for this allocator and all
1.220 + the positions must currently be allocated.
1.221 +
1.222 + @param aStart First position to free
1.223 + @param aLength Number of consecutive positions to free, must be >0
1.224 + */
1.225 +EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aStart*/, TInt /*aLength*/)
1.226 + {
1.227 + asm("ldr ip, [r0, #8] ");
1.228 + asm("str lr, [sp, #-4]! ");
1.229 + asm("adds lr, r1, r2 ");
1.230 + asm("bcs 0f ");
1.231 + asm("cmp lr, ip ");
1.232 + asm("bhi 0f ");
1.233 + asm("mov r3, r1, lsr #5 ");
1.234 + asm("and r1, r1, #0x1f ");
1.235 + asm("add r3, r0, r3, lsl #2 ");
1.236 + asm("ldmia r0, {ip,lr} "); // ip=free count, lr=first check addr
1.237 + asm("add r3, r3, #12 ");
1.238 + asm("cmp ip, #1 "); // check original free count
1.239 + asm("add ip, ip, r2 "); // increase free count
1.240 + asm("cmpcs r3, lr "); // if none free originally, always update address
1.241 + asm("str ip, [r0] ");
1.242 + asm("strcc r3, [r0, #4] "); // update first check addr if necessary
1.243 + asm("add lr, r2, r1 ");
1.244 + asm("cmp lr, #32 ");
1.245 + asm("bhi 1f ");
1.246 + asm("mvn lr, #0 ");
1.247 + asm("ldr r0, [r3] ");
1.248 + asm("mvn lr, lr, lsr r2 ");
1.249 + asm("mov lr, lr, lsr r1 ");
1.250 + asm("tst r0, lr ");
1.251 + asm("bne 0f ");
1.252 + asm("orr r0, r0, lr ");
1.253 + asm("str r0, [r3] ");
1.254 + asm("ldr pc, [sp], #4 ");
1.255 + asm("1: ");
1.256 + asm("mvn r2, #0 ");
1.257 + asm("mov r2, r2, lsr r1 ");
1.258 + asm("2: ");
1.259 + asm("ldr r1, [r3] ");
1.260 + asm("tst r1, r2 ");
1.261 + asm("bne 0f ");
1.262 + asm("orr r1, r1, r2 ");
1.263 + asm("str r1, [r3], #4 ");
1.264 + asm("mvn r2, #0 ");
1.265 + asm("subs lr, lr, #32 ");
1.266 + asm("ldrls pc, [sp], #4 ");
1.267 + asm("cmp lr, #32 ");
1.268 + asm("mvncc r2, r2, lsr lr ");
1.269 + asm("b 2b ");
1.270 +
1.271 + asm("0: ");
1.272 + ASM_FAULT();
1.273 + }
1.274 +
1.275 +
1.276 +/** Free a specific range of bit positions
1.277 + Specified range must lie within the total range for this allocator but it is
1.278 + not necessary that all the positions are currently allocated.
1.279 +
1.280 + @param aStart First position to free
1.281 + @param aLength Number of consecutive positions to free, must be >0
1.282 + */
1.283 +EXPORT_C __NAKED__ void TBitMapAllocator::SelectiveFree(TInt /*aStart*/, TInt /*aLength*/)
1.284 + {
1.285 + asm("ldr r3, [r0, #8] ");
1.286 + asm("stmfd sp!, {r4-r8,lr} ");
1.287 + asm("adds lr, r1, r2 ");
1.288 + asm("bcs 0f ");
1.289 + asm("cmp lr, r3 ");
1.290 + asm("bhi 0f ");
1.291 + asm("mov r7, r0 "); // r7 -> this
1.292 + asm("mov r4, r1, lsr #5 ");
1.293 + asm("and r1, r1, #0x1f ");
1.294 + asm("ldmia r7, {r6,r8} "); // r6=free count, r8=first check addr
1.295 + asm("add r4, r7, r4, lsl #2 ");
1.296 + asm("add r4, r4, #12 ");
1.297 + asm("cmp r6, #1 "); // check original free count
1.298 + asm("add r6, r6, r2 "); // r6=new free count assuming no positions already free
1.299 + asm("cmpcs r4, r8 "); // if none free originally, always update address
1.300 + asm("strcc r4, [r7, #4] "); // update first check addr if necessary
1.301 + asm("add r8, r2, r1 ");
1.302 + asm("cmp r8, #32 ");
1.303 + asm("bhi sfree_cross_bdry ");
1.304 + asm("mvn r8, #0 ");
1.305 + asm("mvn r8, r8, lsr r2 ");
1.306 + asm("mov r8, r8, lsr r1 ");
1.307 + asm("ldr r1, [r4] ");
1.308 + asm("ands r0, r1, r8 "); // r0 has 1's in positions which are already free
1.309 + asm("orr r1, r1, r8 ");
1.310 + asm("str r1, [r4] "); // store new bit mask
1.311 + asm("beq sfree_0 "); // if none were already free, finished
1.312 + asm("bl " CSM_CFUNC(__e32_bit_count_32));
1.313 + asm("sub r6, r6, r0 ");
1.314 + asm("sfree_0: ");
1.315 + asm("str r6, [r7] "); // store free count
1.316 + asm("ldmfd sp!, {r4-r8,pc} "); // return
1.317 +
1.318 + asm("sfree_cross_bdry: ");
1.319 + asm("mvn r5, #0 ");
1.320 + asm("mov r5, r5, lsr r1 ");
1.321 + asm("sfree_cross_bdry_1: ");
1.322 + asm("ldr r1, [r4] "); // original bit mask
1.323 + asm("ands r0, r1, r5 "); // r0 has 1's in bit positions which are already free
1.324 + asm("orr r1, r1, r5 ");
1.325 + asm("str r1, [r4], #4 "); // store new bit mask
1.326 + asm("beq sfree_2 "); // skip if none were already free
1.327 + asm("bl " CSM_CFUNC(__e32_bit_count_32));
1.328 + asm("sub r6, r6, r0 ");
1.329 + asm("sfree_2: ");
1.330 + asm("mvn r5, #0 ");
1.331 + asm("subs r8, r8, #32 ");
1.332 + asm("bls sfree_0 ");
1.333 + asm("cmp r8, #32 ");
1.334 + asm("mvncc r5, r5, lsr r8 ");
1.335 + asm("b sfree_cross_bdry_1 ");
1.336 +
1.337 + asm("0: ");
1.338 + ASM_FAULT();
1.339 + }
1.340 +
1.341 +
1.342 +/** Tests if a specific range of bit positions are all free
1.343 + Specified range must lie within the total range for this allocator.
1.344 +
1.345 + @param aStart First position to check
1.346 + @param aLength Number of consecutive positions to check, must be >0
1.347 + @return FALSE if all positions free, TRUE if at least one is occupied.
1.348 + */
1.349 +EXPORT_C __NAKED__ TBool TBitMapAllocator::NotFree(TInt /*aStart*/, TInt /*aLength*/) const
1.350 + {
1.351 + // Inverse logic - returns 0 if all positions free, nonzero otherwise
1.352 + asm("ldr r3, [r0, #8] ");
1.353 + asm("adds ip, r1, r2 ");
1.354 + asm("bcs 0f ");
1.355 + asm("cmp ip, r3 ");
1.356 + asm("bhi 0f ");
1.357 + asm("mov r3, r1, lsr #5 ");
1.358 + asm("and r1, r1, #0x1f ");
1.359 + asm("add r3, r0, r3, lsl #2 ");
1.360 + asm("add ip, r2, r1 ");
1.361 + asm("add r3, r3, #12 ");
1.362 + asm("cmp ip, #32 ");
1.363 + asm("bhi 1f ");
1.364 + asm("mvn ip, #0 ");
1.365 + asm("ldr r0, [r3] ");
1.366 + asm("mvn ip, ip, lsr r2 ");
1.367 + asm("mov ip, ip, lsr r1 ");
1.368 + asm("eor r0, r0, ip ");
1.369 + asm("ands r0, r0, ip ");
1.370 + __JUMP(,lr);
1.371 + asm("1: ");
1.372 + asm("mvn r2, #0 ");
1.373 + asm("mov r2, r2, lsr r1 ");
1.374 + asm("2: ");
1.375 + asm("ldr r1, [r3], #4 ");
1.376 + asm("eor r0, r1, r2 ");
1.377 + asm("ands r0, r0, r2 ");
1.378 + __JUMP(ne,lr);
1.379 + asm("mvn r2, #0 ");
1.380 + asm("subs ip, ip, #32 ");
1.381 + __JUMP(ls,lr);
1.382 + asm("cmp ip, #32 ");
1.383 + asm("mvncc r2, r2, lsr ip ");
1.384 + asm("b 2b ");
1.385 +
1.386 + asm("0: ");
1.387 + ASM_FAULT();
1.388 + }
1.389 +
1.390 +
1.391 +/** Tests if a specific range of bit positions are all occupied
1.392 + Specified range must lie within the total range for this allocator.
1.393 +
1.394 + @param aStart First position to check
1.395 + @param aLength Number of consecutive positions to check, must be >0
1.396 + @return FALSE if all positions occupied, TRUE if at least one is free.
1.397 + */
1.398 +EXPORT_C __NAKED__ TBool TBitMapAllocator::NotAllocated(TInt /*aStart*/, TInt /*aLength*/) const
1.399 + {
1.400 + // Inverse logic - returns 0 if all positions allocated, nonzero otherwise
1.401 + asm("ldr r3, [r0, #8] ");
1.402 + asm("adds ip, r1, r2 ");
1.403 + asm("bcs 0f ");
1.404 + asm("cmp ip, r3 ");
1.405 + asm("bhi 0f ");
1.406 + asm("mov r3, r1, lsr #5 ");
1.407 + asm("and r1, r1, #0x1f ");
1.408 + asm("add r3, r0, r3, lsl #2 ");
1.409 + asm("add ip, r2, r1 ");
1.410 + asm("add r3, r3, #12 ");
1.411 + asm("cmp ip, #32 ");
1.412 + asm("bhi 1f ");
1.413 + asm("mvn ip, #0 ");
1.414 + asm("ldr r0, [r3] ");
1.415 + asm("mvn ip, ip, lsr r2 ");
1.416 + asm("ands r0, r0, ip, lsr r1 ");
1.417 + __JUMP(,lr);
1.418 + asm("1: ");
1.419 + asm("mvn r2, #0 ");
1.420 + asm("mov r2, r2, lsr r1 ");
1.421 + asm("2: ");
1.422 + asm("ldr r1, [r3], #4 ");
1.423 + asm("ands r0, r1, r2 ");
1.424 + __JUMP(ne,lr);
1.425 + asm("mvn r2, #0 ");
1.426 + asm("subs ip, ip, #32 ");
1.427 + __JUMP(ls,lr);
1.428 + asm("cmp ip, #32 ");
1.429 + asm("mvncc r2, r2, lsr ip ");
1.430 + asm("b 2b ");
1.431 +
1.432 + asm("0: ");
1.433 + ASM_FAULT();
1.434 + }
1.435 +
1.436 +
1.437 +/** Allocate up to a specified number of available bit positions
1.438 + The allocated positions are not required to bear any relationship to
1.439 + each other.
1.440 + If the number of free positions is less than the number requested,
1.441 + allocate all currently free positions.
1.442 +
1.443 + @param aLength Maximum number of positions to allocate.
1.444 + @param aList Pointer to memory area where allocated bit numbers should be stored.
1.445 + @return The number of positions allocated
1.446 + */
1.447 +EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocList(TInt /*aLength*/, TInt* /*aList*/)
1.448 + {
1.449 + asm("ldmia r0, {r3,ip} "); // r3=iAvail, ip=first check word
1.450 + asm("stmfd sp!, {r4-r5,lr} ");
1.451 + asm("cmp r1, r3 ");
1.452 + asm("movgt r1, r3 "); // if aLength>iAvail, aLength=iAvail
1.453 + asm("movs r5, r1 "); // r5 counts allocations
1.454 + asm("beq 0f "); // if length 0, exit
1.455 + asm("sub r3, r3, r1 "); // reduce available count
1.456 + asm("sub r4, ip, r0 ");
1.457 + asm("sub r4, r4, #12 "); // r4=offset of first check word from iMap[0];
1.458 + asm("str r3, [r0] ");
1.459 + asm("mov r4, r4, lsl #3 "); // r4=bit number of MSB of first check word
1.460 + asm("1: ");
1.461 + asm("ldr lr, [ip], #4 "); // lr=next word
1.462 + asm("cmp lr, #0 ");
1.463 + asm("addeq r4, r4, #32 "); // if word=0, increment bit number by 32 and check next word
1.464 + asm("beq 1b ");
1.465 + asm("mov r3, #1 ");
1.466 + asm("sub r4, r4, #1 ");
1.467 + asm("2: ");
1.468 + asm("mov r3, r3, ror #1 "); // shift mask right one
1.469 + asm("add r4, r4, #1 "); // and increment bit number
1.470 + asm("tst lr, r3 "); // check next bit
1.471 + asm("beq 2b ");
1.472 + asm("str r4, [r2], #4 "); // bit=1, so store bit number in list
1.473 + asm("subs r5, r5, #1 "); // check if we are finished
1.474 + asm("beq 4f "); // branch if we are
1.475 + asm("bics lr, lr, r3 "); // clear bit and see if word now empty
1.476 + asm("bne 2b "); // if word not empty, get next bit
1.477 + asm("str lr, [ip, #-4] "); // word empty - clear word
1.478 + asm("add r4, r4, #32 "); // word empty - step bit number on to next word
1.479 + asm("bic r4, r4, #31 ");
1.480 + asm("b 1b "); // and go to check next word
1.481 + asm("4: ");
1.482 + asm("bics lr, lr, r3 "); // clear bit
1.483 + asm("str lr, [ip, #-4] "); // we are finished - store modified word
1.484 + asm("subne ip, ip, #4 "); // if word not empty, first check=last read word
1.485 + asm("str ip, [r0, #4] "); // update first check word
1.486 + asm("0: ");
1.487 + asm("mov r0, r1 "); // return number of positions allocated
1.488 + asm("ldmfd sp!, {r4-r5,pc} ");
1.489 + }
1.490 +
1.491 +
1.492 +/** Find a set of consecutive bit positions with specified alignment, with
1.493 + support for chaining multiple allocators.
1.494 + Note that this function does not mark the positions as allocated.
1.495 +
1.496 + @param aLength number of consecutive bit positions to allocate
1.497 + @param aAlign logarithm to base 2 of the alignment required
1.498 + @param aBase the alignment of the first bit of this allocator - only significant modulo 2^aAlign
1.499 + @param aBestFit TRUE for best fit allocation strategy, FALSE for first fit
1.500 + @param aCarry carry in/carry out
1.501 + @param aRunLength Holds best run length found so far. This will be set to KMaxTInt when no
1.502 + suitable run length has been found. In best fit mode aCarry should also be
1.503 + checked as aRunLength will not be set if aCarry is the only suitable run length
1.504 + found.
1.505 + @param aOffset The bit position to start the search from, set to 0 to search all bit positions.
1.506 + aOffset will be aligned so all bits before an aligned aOffset will be
1.507 + ignored. This can only be non-zero if aCarry is zero as any carry in bits will be
1.508 + ignored if aOffset is non-zero.
1.509 +
1.510 + @return Start position if a suitable run was found
1.511 + @return KErrNotFound if no suitable run was found
1.512 + @return KErrOverflow, if all positions free and best fit mode, or if all positions free
1.513 + in first fit mode and length requested > number of positions available.
1.514 +
1.515 + @see TBitMapAllocator::AllocConsecutive(TInt aLength, TBool aBestFit)
1.516 + @see TBitMapAllocator::AllocAligned(TInt aLength, TInt aAlign, TInt aBase, TBool aBestFit)
1.517 + @see ..\bma.cpp for more details
1.518 + */
1.519 +EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocAligned(TInt /*aLength*/, TInt /*aAlign*/, TInt /*aBase*/,
1.520 + TBool /*aBestFit*/, TInt& /*aCarry*/, TInt& /*aRunLength*/,
1.521 + TUint /*aOffset*/) const
1.522 + {
1.523 + // r0=this, r1=aLength, r2=aAlign, r3=aBase, [sp+0]=aBestFit, [sp+4]=&aCarry, [sp+8]=&aRunLength
1.524 + // [sp+12] = aOffset.
1.525 + asm("ldr r12, [sp, #0] "); // r12=aBestFit
1.526 + asm("cmp r1, #0 ");
1.527 + asm("ble aa_inv "); // __ASSERT_ALWAYS(aLength>0, TBMA_FAULT())
1.528 + asm("cmp r2, #31 ");
1.529 + asm("bhs aa_inv "); // __ASSERT_ALWAYS(TUint(aAlign)<31, TBMA_FAULT())
1.530 + asm("stmfd sp!, {r4-r11,lr} ");
1.531 + asm("movs r8, r12 "); //
1.532 + asm("ldr r11, [sp, #40] "); // r11=&aCarry
1.533 + asm("mvnne r8, #0x80000000 "); // if (aBestFit) r8=7fffffff else r8=0
1.534 + asm("ldmia r0!, {r4-r6} "); // r4=iAvail, r5=iCheckFirst, r6=iSize, r0->iMap[0]
1.535 + asm("ldr r12, [sp, #48] "); // r12 = aOffset;
1.536 + asm("cmp r6, r12 ");
1.537 + asm("bls aa_inv "); // __ASSERT_ALWAYS(aOffset < (TUint)iSize, TBMA_FAULT())
1.538 + asm("ldr r9, [r11] "); // r9=aCarry
1.539 + asm("cmp r9, #0 ");
1.540 + asm("cmpne r12, #0 ");
1.541 + asm("bne aa_inv "); //__ASSERT_ALWAYS(!aCarry || !aOffset, TBMA_FAULT())
1.542 + asm("mov r12, #1 ");
1.543 + asm("mov r12, r12, lsl r2 "); // r12=alignsize = 1<<aAlign
1.544 + asm("sub r2, r12, #1 "); // r2=alignmask = alignsize-1
1.545 + asm("cmp r4, r6 "); // check for iAvail==iSize
1.546 + asm("beq aa_all_free "); // branch if so
1.547 + asm("rsbs r9, r9, #0 "); // r9=run start=-aCarry
1.548 + asm("movne r5, r0 "); // if carry, pW=iMap
1.549 + asm("sub r4, r5, r0 "); // r4=first check address - &iMap[0]
1.550 + asm("add r12, r6, #31 ");
1.551 + asm("mov r4, r4, lsl #3 "); // r4=bit number of first bit to check
1.552 + asm("bic r12, r12, #31 "); // r12=size rounded up to multiple of 32
1.553 + asm("mvn r7, #0 "); // saved bit number (p)
1.554 + asm("add r10, r0, r12, lsr #3 "); // r10=end address of bitmap
1.555 + asm("str r7, [sp, #-4]! "); // saved bit number (p) onto stack
1.556 + asm("movs r11, r9 ");
1.557 + asm("mvnne r11, #0 "); // if (aCarry) r0=~0 else r0=0
1.558 +
1.559 + // registers: r0=this->iMap, r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
1.560 + // r6=iSize, r7=, r8=saved run length, r9=run start pos
1.561 + // r10=end address of bitmap, r11=state
1.562 + asm("ldr r7, [sp, #52] "); // r7 = aOffset;
1.563 + asm("cmp r7, #0 "); // if (aOffset)
1.564 + asm("beq aa_word ");
1.565 + asm("add r7, r7, r3 "); // r7 = aOffset + aBase
1.566 + asm("add r7, r7, r2 "); // r7 = aOffset + aBase + alignmask
1.567 + asm("bic r7, r7, r2 "); // r7 = (aOffset + aBase + alignmask) & ~alignmask
1.568 + asm("sub r7, r7, r3 "); // r7 -= aBase
1.569 + asm("mov r12, r7, lsr #5 "); // r12 = aOffset >> 5 (number of pointer increments required)
1.570 + asm("add r0, r0, r12, lsl #2 "); // r0 = offsetWord = iMap + (aOffset >> 5) (pointer add so shift=2)
1.571 + asm("cmp r0, r5 "); // if (offsetWord >= pW)
1.572 + asm("movpl r5, r0 "); // r5 = pW = offsetWord
1.573 + asm("andpl r4, r7, #0xffffffe0 "); // r4 = n = aOffset & 0xffffffe0
1.574 + asm("andpl r7, r7, #31 "); // r7 = aOffset & 31
1.575 + asm("mov r0, #0xffffffff "); // r0 = 0xffffffff
1.576 + asm("mov r7, r0, lsr r7 "); // r7 = offsetMask = 0xffffffff >> (aOffset & 31)
1.577 +
1.578 + // registers: r0=bit to check (b), r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
1.579 + // r6=iSize, r7=offsetMask, r8=saved run length, r9=run start pos
1.580 + // r10=end address of bitmap, r11=state, r12=word
1.581 + asm("aa_word: "); // while (pW < pE)
1.582 + asm("cmp r5, r10 "); // reached end?
1.583 + asm("ldrlo r12, [r5], #4 "); // if not, r12=next word (=*pW++)
1.584 + asm("bhs aa_end_loop "); // if end, branch out
1.585 +
1.586 + asm("cmp r7, #0 "); // if (offsetMask)
1.587 + asm("andne r12, r12, r7 "); // r12 = word &= offsetMask
1.588 + asm("movne r7, #0 "); // offsetmask = 0;
1.589 +
1.590 + asm("eors r12, r12, r11 "); // r12=w^s, test if any of required bit present
1.591 + asm("addeq r4, r4, #32 "); // if not, increment bit # by 32
1.592 + asm("beq aa_word "); // and do next word
1.593 + asm("mov r0, #0x80000000 "); // bit to check (b)
1.594 +
1.595 + asm("aa_bit: "); // if ((word ^ s) & b)
1.596 + asm("tst r12, r0 "); // does bit have required state?
1.597 + asm("bne aa_bit_found ");
1.598 + asm("aa_end_for: ");
1.599 + asm("add r4, r4, #1 "); // increment bit number
1.600 + asm("movs r0, r0, lsr #1 "); // next bit
1.601 + asm("bne aa_bit "); // if all bits not done, do next
1.602 + asm("b aa_word "); // else do next word
1.603 +
1.604 + asm("aa_bit_found: ");
1.605 + asm("mvns r12, r12 "); // Invert r12 to invert search bit
1.606 + asm("mvns r14, r11 "); // if (s)
1.607 + asm("cmpeq r4, r6 "); // && n==iSize
1.608 + asm("beq aa_end_loop "); // ... finished
1.609 + asm("mvns r11, r11 "); // else s=~s
1.610 + asm("movne r9, r4 "); // if (s) q=n (1 found so save position)
1.611 + asm("bne aa_end_for ");
1.612 +
1.613 + asm("sub r14, r4, r9 "); // r14 = run length = n - q
1.614 + asm("stmdb sp!, {r0,r12} "); // store b (r0) and word (r12) on stack
1.615 + asm("add r12, r9, r3 "); // r12 = q + aBase
1.616 + asm("add r12, r12, r2 "); // r12 = q + aBase + alignmask
1.617 + asm("bic r12, r12, r2 "); // r12 = (q + aBase + alignmask) & ~alignmask
1.618 + asm("sub r12, r12, r3 "); // r12 = alignedStartPos = r12 - aBase
1.619 + asm("sub r0, r12, r9 "); // r0 = lost = alignedStartPos - q
1.620 + asm("sub r0, r14, r0 "); // r0 = run length - lost
1.621 + asm("cmp r0, r1 "); // if (run length - lost >= aLength)
1.622 + asm("ldmltia sp!, {r0,r12} "); // if aligned length too short: r0 = b and r12 = word from stack
1.623 + asm("blt aa_end_for "); // (run length - lost) too short (must be signed comparison)
1.624 +
1.625 +// if (rl-lost>=aLength)
1.626 +
1.627 + asm("cmp r1, r14 "); // check for exact run length match (if (run length == aLength))
1.628 + asm("cmpne r8, #0 "); // check for best fit (r8 only ever set if (aBestfit))
1.629 + asm("beq aa_found_it "); // exact match or not in best fit mode
1.630 +
1.631 +// if (r1<minrl)
1.632 + asm("cmp r12, #0 ");
1.633 + asm("movmi r12, #0 "); // r12 = (alignedStartPos >= 0)? alignedStartPos : 0
1.634 + asm("cmp r14, r8 "); // Compare run length with current minimum
1.635 + asm("movlo r8, r14 "); // if shorter, replace
1.636 + asm("strlo r12, [sp, #8] "); // save alignedStartPos (p = (alignedStartPos >= 0)? alignedStartPos : 0)
1.637 + asm("ldmia sp!, {r0,r12} "); // r0 = b and r12 = word from stack
1.638 + asm("b aa_end_for "); // next bit
1.639 +// end {if (r1<minrl)}
1.640 +
1.641 +// if (!aBestFit || run length == aLength)
1.642 + // registers: r12 = alignedStartPos, r14 = run length
1.643 + asm("aa_found_it: ");
1.644 + asm("ldr r1, [sp, #52] "); // r1=&aCarry
1.645 + asm("ldr r7, [sp, #56] "); // r7=&aRunLength
1.646 + asm("subs r0, r12, #0 "); // r0 = alignStartPos, alignedStartPos >= 0?
1.647 + asm("movmi r0, #0 "); // if alignedStartPos < 0 r0=0
1.648 + asm("str r14, [r7] "); // aRunLength = run length
1.649 + asm("mov r14, #0 ");
1.650 + asm("strge r14, [r1] "); // if (alignedStartPos >= 0), aCarry=0
1.651 + asm("ldmfd sp!, {r1-r11,pc} "); // return
1.652 +// end {if (!aBestFit || run length == aLength)}
1.653 +
1.654 +// end {if (rl-lost>=aLength)}
1.655 +
1.656 + asm("aa_end_loop: ");
1.657 + asm("ldr r10, [sp, #48] "); // r10=&aRunLength
1.658 +
1.659 +// registers: r2 = alignmask, r3 = aBase, r4=current bit number(n),
1.660 +// r9=run start pos(q), r10=&aRunLength, r11 = state(s), r14 = run length(rl)
1.661 + asm("cmp r8, r1 "); // compare min rl with aLength
1.662 + asm("beq aa_end_loop2 "); // if exact match, skip
1.663 +
1.664 +// if (minrl != aLength)
1.665 + asm("ldr r12, [sp, #44] "); // r12=&aCarry
1.666 + asm("mov r14, #0 "); // r14 = run length = 0
1.667 + asm("cmp r11, #0 ");
1.668 + asm("beq aa_end_loop3 "); // if (!s) no final run
1.669 + asm("sub r14, r4, r9 "); // rl4 = run length = n-q
1.670 + asm("cmp r8, #0 "); // if (!aBestFit) (r8 only and always set when best fit mode)
1.671 + asm("bne aa_end_loop3 "); // if best fit, don't count final run
1.672 +
1.673 +// if (!aBestFit)
1.674 + asm("add r0, r9, r3 "); // r0 = q + aBase
1.675 + asm("add r0, r0, r2 "); // r0 = q + aBase + alignmask
1.676 + asm("bic r0, r0, r2 "); // r0 = (q + aBase + alignmask) & ~alignmask
1.677 + asm("sub r0, r0, r3 "); // r0 = alignedStartPos = r0 -= aBase
1.678 + asm("sub r2, r0, r9 "); // r2 = lost = alignedStartPos - q
1.679 + asm("sub r2, r14, r2 "); // r2 = run length - lost
1.680 + asm("cmp r2, r1 "); // if (run length - lost >= aLength)
1.681 + asm("blt aa_end_loop3 ");
1.682 +
1.683 +// if (run length - lost >= aLength)
1.684 + asm("mov r8, r14 "); // r8 = run length (ready to be stored in return)
1.685 + asm("mov r14, #0 "); // r14 = 0 (aCarry on return)
1.686 + asm("str r0, [sp, #0] "); // Save alignedStartPos on stack ready for return
1.687 +
1.688 +// end {if (run length - lost >= aLength)}
1.689 +// end {if (!aBestFit)}
1.690 +
1.691 + asm("aa_end_loop3: ");
1.692 + asm("str r14, [r12] "); // Save aCarry = run length = r14
1.693 +// end {if (minrl != aLength)}
1.694 +
1.695 + asm("aa_end_loop2: ");
1.696 + asm("str r8, [r10] "); // aRunLength = minrl
1.697 + asm("ldmfd sp!, {r0,r4-r11,pc} "); // return saved pos
1.698 +
1.699 +// r1 = aLength r2 = alignmask, r3 = aBase, r4 = iAvail, r6 = iSize, r9 = aCarry, r11 = &aCarry
1.700 + asm("aa_all_free: ");
1.701 + asm("ldr r12, [sp, #48] "); // r12 = aOffset;
1.702 + asm("cmp r12, #0 "); // if (aOffset)
1.703 + asm("addne r12, r12, r3 "); // r12 = aOffset + aBase
1.704 + asm("addne r12, r12, r2 "); // r12 = aOffset + aBase + alignmask
1.705 + asm("bicne r12, r12, r2 "); // r12 = (aOffset + aBase + alignmask)&~alignmask
1.706 + asm("subne r12, r12, r3 "); // r12 = ((aOffset + aBase + alignmask)&~alignmask) - aBase
1.707 + asm("subs r10, r6, r12 "); // r10 = runLength = iSize - aOffset
1.708 + asm("movmi r10, #0 "); // if (aOffset < (TUint)iSize) runLength = 0;
1.709 +
1.710 + asm("movs r0, r8 "); // best fit? if not, r0=0
1.711 + asm("bne aa_all_free2 "); // skip if best fit mode
1.712 + asm("sub r6, r12, r9 "); // r6=aOffset-aCarry
1.713 + asm("add r6, r6, r3 "); // r6=aOffset-aCarry+aBase
1.714 + asm("add r6, r6, r2 "); // r6=aOffset-aCarry+aBase+alignmask
1.715 + asm("bic r6, r6, r2 "); // r6=(aOffset-aCarry+aBase+alignmask)&~alignmask
1.716 + asm("sub r6, r6, r3 "); // r6 = alignedStartPos
1.717 + asm("sub r3, r12, r9 "); // r3 = aOffset - aCarry
1.718 + asm("sub r3, r6, r3 "); // r3 = lost = alignedStartPos - (aOffset - aCarry)
1.719 + asm("add r2, r10, r9 "); // r2 = aRunLength + aCarry
1.720 + asm("sub r2, r2, r3 "); // r2 -= lost
1.721 + asm("cmp r2, r1 "); // if (aRunLength + aCarry - lost >= aLength)
1.722 + asm("blt aa_all_free2 ");
1.723 + asm("cmp r6, #0 ");
1.724 + asm("ldr r5, [sp, #44] "); // r5 = &RunLength
1.725 + asm("str r10, [r5] "); // Save aRunLength (aRunLength = runLength)
1.726 + asm("movge r9, #0 "); // if (alignedStartPos >= 0) aCarry = 0;
1.727 + asm("str r9, [r11] "); // Save aCarry
1.728 + asm("movge r0, r6 "); // r0 = (alignedStartPos >= 0)? alignedStartPos : 0
1.729 + asm("ldmfd sp!, {r4-r11,pc} "); // return r0
1.730 +
1.731 + asm("aa_all_free2: ");
1.732 + asm("ldr r12, [sp, #48] "); // r12 = aOffset;
1.733 + asm("cmp r12, #0 "); // if (aOffset)
1.734 + asm("movne r9, r10 "); // r9 = aCarry = runLength
1.735 + asm("addeq r9, r9, r4 "); // r9 = aCarry + iAvail
1.736 + asm("str r9, [r11] "); // Save aCarry
1.737 + asm("ldr r5, [sp, #44] "); // r5 = &RunLength
1.738 + asm("mov r0, #%a0" : : "i" ((TInt)KMaxTInt));
1.739 + asm("str r0, [r5] "); // aRunLength = KMaxTInt
1.740 + asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow));
1.741 + asm("ldmfd sp!, {r4-r11,pc} "); // return KErrOverflow
1.742 +
1.743 + asm("aa_inv: ");
1.744 + ASM_FAULT();
1.745 + }
1.746 +#endif