First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\klib\arm\cbma.cia
15 // Machine coded bitmap allocator for ARM
16 // This file is directly included in the test harness t_tbma
20 #include <kernel/kbma.h>
26 #include <e32atomics.h>
29 #define __TBMA_MACHINE_CODED__
34 #include <kernel/kern_priv.h>
38 #ifdef __TBMA_MACHINE_CODED__
40 extern void TBmaFault(TInt aLine);
41 #define ASM_FAULT_LINE(x) asm("ldr r0, [pc] "); asm("b " CSM_Z9TBmaFaulti ); asm(".word %a0" : : "i" (x));
42 #define ASM_FAULT() ASM_FAULT_LINE(__LINE__)
44 #ifndef __EABI_CTORS__
45 /** Construct a new TBitMapAllocator object
47 @param aSize The number of bit positions required
48 @param aState TRUE if all bit positions initially free
49 FALSE if all bit positions initially allocated
51 EXPORT_C __NAKED__ TBitMapAllocator::TBitMapAllocator(TInt /*aSize*/, TBool /*aState*/)
56 asm("movne r2, r1 "); // if aState r2=aSize else r2=0
57 asm("str r2, [r0, #0] "); // iAvail=aState?aSize:0
58 asm("add r12, r0, #12 "); // r12=&iMap[0]
59 asm("str r1, [r0, #8] "); // iSize=r1
60 asm("add r3, r1, #31 ");
61 asm("bic r3, r3, #31 "); // r3=aSize rounded up to multiple of 32
62 asm("sub r3, r3, #32 "); // r3=32*(number of map words-1)
63 asm("addeq r12, r12, r3, lsr #3 "); // if !aState r12=&iMap[nmapw-1]
64 asm("str r12, [r0, #4] "); // iCheckFirst=aState?&iMap[0]:&iMap[nmapw-1]
65 asm("mvnne r2, #0 "); // if aState r2=0xffffffff else r2=0
66 asm("add r12, r0, #12 "); // r12=&iMap[0]
68 asm("str r2, [r12], #4 "); // fill map
69 asm("subs r1, r1, #32 ");
71 asm("rsbne r1, r1, #0 "); // if aSize not a multiple of 32, r1=number of tail bits to clear
72 asm("movne r2, r2, lsl r1 "); // clear unused bits
73 asm("strne r2, [r12, #-4] ");
81 /** Allocate the next available bit position
83 @return Number of position allocated, -1 if all positions occupied
85 EXPORT_C __NAKED__ TInt TBitMapAllocator::Alloc()
87 asm("ldmia r0, {r1,r2} "); // r1=available, r2=check first address
88 asm("subs r1, r1, #1 "); // decrement free count
89 asm("mvnmi r0, #0 "); // if none free, return with r0=-1
91 asm("str r1, [r0] "); // store decremented free count
93 asm("ldr r3, [r2], #4 "); // check word
94 asm("cmp r3, #0 "); // any free entries?
95 asm("beq alloc_1 "); // if not, check next word
96 #ifdef __CPU_ARM_HAS_CLZ
100 asm("cmp r3, #0x00010000 "); // ip=number of leading zeros in r3
101 asm("movlo r3, r3, lsl #16 ");
102 asm("addlo ip, ip, #16 ");
103 asm("cmp r3, #0x01000000 ");
104 asm("movlo r3, r3, lsl #8 ");
105 asm("addlo ip, ip, #8 ");
106 asm("cmp r3, #0x10000000 ");
107 asm("movlo r3, r3, lsl #4 ");
108 asm("addlo ip, ip, #4 ");
109 asm("cmp r3, #0x40000000 ");
110 asm("movlo r3, r3, lsl #2 ");
111 asm("addlo ip, ip, #2 ");
112 asm("cmp r3, #0x80000000 ");
113 asm("addlo ip, ip, #1 ");
115 asm("ldr r3, [r2, #-4]! ");
116 asm("mov r1, #0x80000000 ");
117 asm("bic r3, r3, r1, lsr ip "); // clear bit in allocator word
118 asm("str r3, [r2] ");
119 asm("str r2, [r0, #4] "); // update check first address
120 asm("sub r0, r2, r0 ");
121 asm("sub r0, r0, #12 "); // r0=offset of word from iMap[0]
122 asm("adds r0, ip, r0, lsl #3 "); // multiply by 8 and add bit position
127 /** Free the specified bit position
129 @param aPos Number of bit position to be freed; must be currently allocated.
131 EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aPos*/)
133 asm("ldr r3, [r0, #8] "); // r3=iSize
134 asm("mov r2, r1, lsr #5 "); // r2=word index
135 asm("add r2, r0, r2, lsl #2 "); // r2=address of word-12
137 asm("bhs free_error ");
138 asm("and ip, r1, #0x1f "); // ip=bit number in word
139 asm("ldr r3, [r2, #12]! "); // r3=allocator word
140 asm("mov r1, #0x80000000 ");
141 asm("tst r3, r1, lsr ip "); // test bit
142 asm("bne free_error "); // if already free, error
143 asm("orr r3, r3, r1, lsr ip "); // set free bit
144 asm("str r3, [r2] ");
145 asm("ldmia r0, {r1,r3} "); // r1=available count, r3=first free address
146 asm("cmp r1, #1 "); // check original free count
147 asm("add r1, r1, #1 "); // increment available count
148 asm("str r1, [r0, #0] ");
149 asm("cmpcs r2, r3 "); // compare word address with first free
150 asm("strcc r2, [r0, #4] "); // if lower, update first free
158 /** Allocate a specific range of bit positions
159 Specified range must lie within the total range for this allocator and all
160 the positions must currently be free.
162 @param aStart First position to allocate
163 @param aLength Number of consecutive positions to allocate, must be >0
165 EXPORT_C __NAKED__ void TBitMapAllocator::Alloc(TInt /*aStart*/, TInt /*aLength*/)
167 asm("ldr ip, [r0, #8] ");
168 asm("str lr, [sp, #-4]! ");
169 asm("adds lr, r1, r2 ");
173 asm("mov r3, r1, lsr #5 ");
174 asm("ldr ip, [r0] ");
175 asm("and r1, r1, #0x1f ");
176 asm("add r3, r0, r3, lsl #2 ");
177 asm("sub ip, ip, r2 "); // reduce free count
178 asm("str ip, [r0] ");
179 asm("add ip, r2, r1 ");
183 asm("ldr r0, [r3, #12]! ");
184 asm("mvn ip, ip, lsr r2 ");
185 asm("mov ip, ip, lsr r1 ");
186 asm("orr lr, r0, ip ");
189 asm("bic r0, r0, ip ");
190 asm("str r0, [r3] ");
191 asm("ldr pc, [sp], #4 ");
193 asm("add r3, r3, #12 ");
195 asm("mov r2, r2, lsr r1 ");
197 asm("ldr r1, [r3] ");
198 asm("orr lr, r1, r2 ");
201 asm("bic r1, r1, r2 ");
202 asm("str r1, [r3], #4 ");
204 asm("subs ip, ip, #32 ");
205 asm("ldrls pc, [sp], #4 ");
207 asm("mvncc r2, r2, lsr ip ");
215 /** Free a specific range of bit positions
216 Specified range must lie within the total range for this allocator and all
217 the positions must currently be allocated.
219 @param aStart First position to free
220 @param aLength Number of consecutive positions to free, must be >0
222 EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aStart*/, TInt /*aLength*/)
224 asm("ldr ip, [r0, #8] ");
225 asm("str lr, [sp, #-4]! ");
226 asm("adds lr, r1, r2 ");
230 asm("mov r3, r1, lsr #5 ");
231 asm("and r1, r1, #0x1f ");
232 asm("add r3, r0, r3, lsl #2 ");
233 asm("ldmia r0, {ip,lr} "); // ip=free count, lr=first check addr
234 asm("add r3, r3, #12 ");
235 asm("cmp ip, #1 "); // check original free count
236 asm("add ip, ip, r2 "); // increase free count
237 asm("cmpcs r3, lr "); // if none free originally, always update address
238 asm("str ip, [r0] ");
239 asm("strcc r3, [r0, #4] "); // update first check addr if necessary
240 asm("add lr, r2, r1 ");
244 asm("ldr r0, [r3] ");
245 asm("mvn lr, lr, lsr r2 ");
246 asm("mov lr, lr, lsr r1 ");
249 asm("orr r0, r0, lr ");
250 asm("str r0, [r3] ");
251 asm("ldr pc, [sp], #4 ");
254 asm("mov r2, r2, lsr r1 ");
256 asm("ldr r1, [r3] ");
259 asm("orr r1, r1, r2 ");
260 asm("str r1, [r3], #4 ");
262 asm("subs lr, lr, #32 ");
263 asm("ldrls pc, [sp], #4 ");
265 asm("mvncc r2, r2, lsr lr ");
273 /** Free a specific range of bit positions
274 Specified range must lie within the total range for this allocator but it is
275 not necessary that all the positions are currently allocated.
277 @param aStart First position to free
278 @param aLength Number of consecutive positions to free, must be >0
280 EXPORT_C __NAKED__ void TBitMapAllocator::SelectiveFree(TInt /*aStart*/, TInt /*aLength*/)
282 asm("ldr r3, [r0, #8] ");
283 asm("stmfd sp!, {r4-r8,lr} ");
284 asm("adds lr, r1, r2 ");
288 asm("mov r7, r0 "); // r7 -> this
289 asm("mov r4, r1, lsr #5 ");
290 asm("and r1, r1, #0x1f ");
291 asm("ldmia r7, {r6,r8} "); // r6=free count, r8=first check addr
292 asm("add r4, r7, r4, lsl #2 ");
293 asm("add r4, r4, #12 ");
294 asm("cmp r6, #1 "); // check original free count
295 asm("add r6, r6, r2 "); // r6=new free count assuming no positions already free
296 asm("cmpcs r4, r8 "); // if none free originally, always update address
297 asm("strcc r4, [r7, #4] "); // update first check addr if necessary
298 asm("add r8, r2, r1 ");
300 asm("bhi sfree_cross_bdry ");
302 asm("mvn r8, r8, lsr r2 ");
303 asm("mov r8, r8, lsr r1 ");
304 asm("ldr r1, [r4] ");
305 asm("ands r0, r1, r8 "); // r0 has 1's in positions which are already free
306 asm("orr r1, r1, r8 ");
307 asm("str r1, [r4] "); // store new bit mask
308 asm("beq sfree_0 "); // if none were already free, finished
309 asm("bl " CSM_CFUNC(__e32_bit_count_32));
310 asm("sub r6, r6, r0 ");
312 asm("str r6, [r7] "); // store free count
313 asm("ldmfd sp!, {r4-r8,pc} "); // return
315 asm("sfree_cross_bdry: ");
317 asm("mov r5, r5, lsr r1 ");
318 asm("sfree_cross_bdry_1: ");
319 asm("ldr r1, [r4] "); // original bit mask
320 asm("ands r0, r1, r5 "); // r0 has 1's in bit positions which are already free
321 asm("orr r1, r1, r5 ");
322 asm("str r1, [r4], #4 "); // store new bit mask
323 asm("beq sfree_2 "); // skip if none were already free
324 asm("bl " CSM_CFUNC(__e32_bit_count_32));
325 asm("sub r6, r6, r0 ");
328 asm("subs r8, r8, #32 ");
331 asm("mvncc r5, r5, lsr r8 ");
332 asm("b sfree_cross_bdry_1 ");
339 /** Tests if a specific range of bit positions are all free
340 Specified range must lie within the total range for this allocator.
342 @param aStart First position to check
343 @param aLength Number of consecutive positions to check, must be >0
344 @return FALSE if all positions free, TRUE if at least one is occupied.
346 EXPORT_C __NAKED__ TBool TBitMapAllocator::NotFree(TInt /*aStart*/, TInt /*aLength*/) const
348 // Inverse logic - returns 0 if all positions free, nonzero otherwise
349 asm("ldr r3, [r0, #8] ");
350 asm("adds ip, r1, r2 ");
354 asm("mov r3, r1, lsr #5 ");
355 asm("and r1, r1, #0x1f ");
356 asm("add r3, r0, r3, lsl #2 ");
357 asm("add ip, r2, r1 ");
358 asm("add r3, r3, #12 ");
362 asm("ldr r0, [r3] ");
363 asm("mvn ip, ip, lsr r2 ");
364 asm("mov ip, ip, lsr r1 ");
365 asm("eor r0, r0, ip ");
366 asm("ands r0, r0, ip ");
370 asm("mov r2, r2, lsr r1 ");
372 asm("ldr r1, [r3], #4 ");
373 asm("eor r0, r1, r2 ");
374 asm("ands r0, r0, r2 ");
377 asm("subs ip, ip, #32 ");
380 asm("mvncc r2, r2, lsr ip ");
388 /** Tests if a specific range of bit positions are all occupied
389 Specified range must lie within the total range for this allocator.
391 @param aStart First position to check
392 @param aLength Number of consecutive positions to check, must be >0
393 @return FALSE if all positions occupied, TRUE if at least one is free.
395 EXPORT_C __NAKED__ TBool TBitMapAllocator::NotAllocated(TInt /*aStart*/, TInt /*aLength*/) const
397 // Inverse logic - returns 0 if all positions allocated, nonzero otherwise
398 asm("ldr r3, [r0, #8] ");
399 asm("adds ip, r1, r2 ");
403 asm("mov r3, r1, lsr #5 ");
404 asm("and r1, r1, #0x1f ");
405 asm("add r3, r0, r3, lsl #2 ");
406 asm("add ip, r2, r1 ");
407 asm("add r3, r3, #12 ");
411 asm("ldr r0, [r3] ");
412 asm("mvn ip, ip, lsr r2 ");
413 asm("ands r0, r0, ip, lsr r1 ");
417 asm("mov r2, r2, lsr r1 ");
419 asm("ldr r1, [r3], #4 ");
420 asm("ands r0, r1, r2 ");
423 asm("subs ip, ip, #32 ");
426 asm("mvncc r2, r2, lsr ip ");
434 /** Allocate up to a specified number of available bit positions
435 The allocated positions are not required to bear any relationship to
437 If the number of free positions is less than the number requested,
438 allocate all currently free positions.
440 @param aLength Maximum number of positions to allocate.
441 @param aList Pointer to memory area where allocated bit numbers should be stored.
442 @return The number of positions allocated
444 EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocList(TInt /*aLength*/, TInt* /*aList*/)
446 asm("ldmia r0, {r3,ip} "); // r3=iAvail, ip=first check word
447 asm("stmfd sp!, {r4-r5,lr} ");
449 asm("movgt r1, r3 "); // if aLength>iAvail, aLength=iAvail
450 asm("movs r5, r1 "); // r5 counts allocations
451 asm("beq 0f "); // if length 0, exit
452 asm("sub r3, r3, r1 "); // reduce available count
453 asm("sub r4, ip, r0 ");
454 asm("sub r4, r4, #12 "); // r4=offset of first check word from iMap[0];
455 asm("str r3, [r0] ");
456 asm("mov r4, r4, lsl #3 "); // r4=bit number of MSB of first check word
458 asm("ldr lr, [ip], #4 "); // lr=next word
460 asm("addeq r4, r4, #32 "); // if word=0, increment bit number by 32 and check next word
463 asm("sub r4, r4, #1 ");
465 asm("mov r3, r3, ror #1 "); // shift mask right one
466 asm("add r4, r4, #1 "); // and increment bit number
467 asm("tst lr, r3 "); // check next bit
469 asm("str r4, [r2], #4 "); // bit=1, so store bit number in list
470 asm("subs r5, r5, #1 "); // check if we are finished
471 asm("beq 4f "); // branch if we are
472 asm("bics lr, lr, r3 "); // clear bit and see if word now empty
473 asm("bne 2b "); // if word not empty, get next bit
474 asm("str lr, [ip, #-4] "); // word empty - clear word
475 asm("add r4, r4, #32 "); // word empty - step bit number on to next word
476 asm("bic r4, r4, #31 ");
477 asm("b 1b "); // and go to check next word
479 asm("bics lr, lr, r3 "); // clear bit
480 asm("str lr, [ip, #-4] "); // we are finished - store modified word
481 asm("subne ip, ip, #4 "); // if word not empty, first check=last read word
482 asm("str ip, [r0, #4] "); // update first check word
484 asm("mov r0, r1 "); // return number of positions allocated
485 asm("ldmfd sp!, {r4-r5,pc} ");
489 /** Find a set of consecutive bit positions with specified alignment, with
490 support for chaining multiple allocators.
491 Note that this function does not mark the positions as allocated.
493 @param aLength number of consecutive bit positions to allocate
494 @param aAlign logarithm to base 2 of the alignment required
495 @param aBase the alignment of the first bit of this allocator - only significant modulo 2^aAlign
496 @param aBestFit TRUE for best fit allocation strategy, FALSE for first fit
497 @param aCarry carry in/carry out
498 @param aRunLength Holds best run length found so far. This will be set to KMaxTInt when no
499 suitable run length has been found. In best fit mode aCarry should also be
500 checked as aRunLength will not be set if aCarry is the only suitable run length
502 @param aOffset The bit position to start the search from, set to 0 to search all bit positions.
503 aOffset will be aligned so all bits before an aligned aOffset will be
504 ignored. This can only be non-zero if aCarry is zero as any carry in bits will be
505 ignored if aOffset is non-zero.
507 @return Start position if a suitable run was found
508 @return KErrNotFound if no suitable run was found
509 @return KErrOverflow, if all positions free and best fit mode, or if all positions free
510 in first fit mode and length requested > number of positions available.
512 @see TBitMapAllocator::AllocConsecutive(TInt aLength, TBool aBestFit)
513 @see TBitMapAllocator::AllocAligned(TInt aLength, TInt aAlign, TInt aBase, TBool aBestFit)
514 @see ..\bma.cpp for more details
516 EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocAligned(TInt /*aLength*/, TInt /*aAlign*/, TInt /*aBase*/,
517 TBool /*aBestFit*/, TInt& /*aCarry*/, TInt& /*aRunLength*/,
518 TUint /*aOffset*/) const
520 // r0=this, r1=aLength, r2=aAlign, r3=aBase, [sp+0]=aBestFit, [sp+4]=&aCarry, [sp+8]=&aRunLength
521 // [sp+12] = aOffset.
522 asm("ldr r12, [sp, #0] "); // r12=aBestFit
524 asm("ble aa_inv "); // __ASSERT_ALWAYS(aLength>0, TBMA_FAULT())
526 asm("bhs aa_inv "); // __ASSERT_ALWAYS(TUint(aAlign)<31, TBMA_FAULT())
527 asm("stmfd sp!, {r4-r11,lr} ");
528 asm("movs r8, r12 "); //
529 asm("ldr r11, [sp, #40] "); // r11=&aCarry
530 asm("mvnne r8, #0x80000000 "); // if (aBestFit) r8=7fffffff else r8=0
531 asm("ldmia r0!, {r4-r6} "); // r4=iAvail, r5=iCheckFirst, r6=iSize, r0->iMap[0]
532 asm("ldr r12, [sp, #48] "); // r12 = aOffset;
534 asm("bls aa_inv "); // __ASSERT_ALWAYS(aOffset < (TUint)iSize, TBMA_FAULT())
535 asm("ldr r9, [r11] "); // r9=aCarry
537 asm("cmpne r12, #0 ");
538 asm("bne aa_inv "); //__ASSERT_ALWAYS(!aCarry || !aOffset, TBMA_FAULT())
540 asm("mov r12, r12, lsl r2 "); // r12=alignsize = 1<<aAlign
541 asm("sub r2, r12, #1 "); // r2=alignmask = alignsize-1
542 asm("cmp r4, r6 "); // check for iAvail==iSize
543 asm("beq aa_all_free "); // branch if so
544 asm("rsbs r9, r9, #0 "); // r9=run start=-aCarry
545 asm("movne r5, r0 "); // if carry, pW=iMap
546 asm("sub r4, r5, r0 "); // r4=first check address - &iMap[0]
547 asm("add r12, r6, #31 ");
548 asm("mov r4, r4, lsl #3 "); // r4=bit number of first bit to check
549 asm("bic r12, r12, #31 "); // r12=size rounded up to multiple of 32
550 asm("mvn r7, #0 "); // saved bit number (p)
551 asm("add r10, r0, r12, lsr #3 "); // r10=end address of bitmap
552 asm("str r7, [sp, #-4]! "); // saved bit number (p) onto stack
553 asm("movs r11, r9 ");
554 asm("mvnne r11, #0 "); // if (aCarry) r0=~0 else r0=0
556 // registers: r0=this->iMap, r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
557 // r6=iSize, r7=, r8=saved run length, r9=run start pos
558 // r10=end address of bitmap, r11=state
559 asm("ldr r7, [sp, #52] "); // r7 = aOffset;
560 asm("cmp r7, #0 "); // if (aOffset)
562 asm("add r7, r7, r3 "); // r7 = aOffset + aBase
563 asm("add r7, r7, r2 "); // r7 = aOffset + aBase + alignmask
564 asm("bic r7, r7, r2 "); // r7 = (aOffset + aBase + alignmask) & ~alignmask
565 asm("sub r7, r7, r3 "); // r7 -= aBase
566 asm("mov r12, r7, lsr #5 "); // r12 = aOffset >> 5 (number of pointer increments required)
567 asm("add r0, r0, r12, lsl #2 "); // r0 = offsetWord = iMap + (aOffset >> 5) (pointer add so shift=2)
568 asm("cmp r0, r5 "); // if (offsetWord >= pW)
569 asm("movpl r5, r0 "); // r5 = pW = offsetWord
570 asm("andpl r4, r7, #0xffffffe0 "); // r4 = n = aOffset & 0xffffffe0
571 asm("andpl r7, r7, #31 "); // r7 = aOffset & 31
572 asm("mov r0, #0xffffffff "); // r0 = 0xffffffff
573 asm("mov r7, r0, lsr r7 "); // r7 = offsetMask = 0xffffffff >> (aOffset & 31)
575 // registers: r0=bit to check (b), r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
576 // r6=iSize, r7=offsetMask, r8=saved run length, r9=run start pos
577 // r10=end address of bitmap, r11=state, r12=word
578 asm("aa_word: "); // while (pW < pE)
579 asm("cmp r5, r10 "); // reached end?
580 asm("ldrlo r12, [r5], #4 "); // if not, r12=next word (=*pW++)
581 asm("bhs aa_end_loop "); // if end, branch out
583 asm("cmp r7, #0 "); // if (offsetMask)
584 asm("andne r12, r12, r7 "); // r12 = word &= offsetMask
585 asm("movne r7, #0 "); // offsetmask = 0;
587 asm("eors r12, r12, r11 "); // r12=w^s, test if any of required bit present
588 asm("addeq r4, r4, #32 "); // if not, increment bit # by 32
589 asm("beq aa_word "); // and do next word
590 asm("mov r0, #0x80000000 "); // bit to check (b)
592 asm("aa_bit: "); // if ((word ^ s) & b)
593 asm("tst r12, r0 "); // does bit have required state?
594 asm("bne aa_bit_found ");
596 asm("add r4, r4, #1 "); // increment bit number
597 asm("movs r0, r0, lsr #1 "); // next bit
598 asm("bne aa_bit "); // if all bits not done, do next
599 asm("b aa_word "); // else do next word
601 asm("aa_bit_found: ");
602 asm("mvns r12, r12 "); // Invert r12 to invert search bit
603 asm("mvns r14, r11 "); // if (s)
604 asm("cmpeq r4, r6 "); // && n==iSize
605 asm("beq aa_end_loop "); // ... finished
606 asm("mvns r11, r11 "); // else s=~s
607 asm("movne r9, r4 "); // if (s) q=n (1 found so save position)
608 asm("bne aa_end_for ");
610 asm("sub r14, r4, r9 "); // r14 = run length = n - q
611 asm("stmdb sp!, {r0,r12} "); // store b (r0) and word (r12) on stack
612 asm("add r12, r9, r3 "); // r12 = q + aBase
613 asm("add r12, r12, r2 "); // r12 = q + aBase + alignmask
614 asm("bic r12, r12, r2 "); // r12 = (q + aBase + alignmask) & ~alignmask
615 asm("sub r12, r12, r3 "); // r12 = alignedStartPos = r12 - aBase
616 asm("sub r0, r12, r9 "); // r0 = lost = alignedStartPos - q
617 asm("sub r0, r14, r0 "); // r0 = run length - lost
618 asm("cmp r0, r1 "); // if (run length - lost >= aLength)
619 asm("ldmltia sp!, {r0,r12} "); // if aligned length too short: r0 = b and r12 = word from stack
620 asm("blt aa_end_for "); // (run length - lost) too short (must be signed comparison)
622 // if (rl-lost>=aLength)
624 asm("cmp r1, r14 "); // check for exact run length match (if (run length == aLength))
625 asm("cmpne r8, #0 "); // check for best fit (r8 only ever set if (aBestfit))
626 asm("beq aa_found_it "); // exact match or not in best fit mode
630 asm("movmi r12, #0 "); // r12 = (alignedStartPos >= 0)? alignedStartPos : 0
631 asm("cmp r14, r8 "); // Compare run length with current minimum
632 asm("movlo r8, r14 "); // if shorter, replace
633 asm("strlo r12, [sp, #8] "); // save alignedStartPos (p = (alignedStartPos >= 0)? alignedStartPos : 0)
634 asm("ldmia sp!, {r0,r12} "); // r0 = b and r12 = word from stack
635 asm("b aa_end_for "); // next bit
636 // end {if (r1<minrl)}
638 // if (!aBestFit || run length == aLength)
639 // registers: r12 = alignedStartPos, r14 = run length
640 asm("aa_found_it: ");
641 asm("ldr r1, [sp, #52] "); // r1=&aCarry
642 asm("ldr r7, [sp, #56] "); // r7=&aRunLength
643 asm("subs r0, r12, #0 "); // r0 = alignStartPos, alignedStartPos >= 0?
644 asm("movmi r0, #0 "); // if alignedStartPos < 0 r0=0
645 asm("str r14, [r7] "); // aRunLength = run length
647 asm("strge r14, [r1] "); // if (alignedStartPos >= 0), aCarry=0
648 asm("ldmfd sp!, {r1-r11,pc} "); // return
649 // end {if (!aBestFit || run length == aLength)}
651 // end {if (rl-lost>=aLength)}
653 asm("aa_end_loop: ");
654 asm("ldr r10, [sp, #48] "); // r10=&aRunLength
656 // registers: r2 = alignmask, r3 = aBase, r4=current bit number(n),
657 // r9=run start pos(q), r10=&aRunLength, r11 = state(s), r14 = run length(rl)
658 asm("cmp r8, r1 "); // compare min rl with aLength
659 asm("beq aa_end_loop2 "); // if exact match, skip
661 // if (minrl != aLength)
662 asm("ldr r12, [sp, #44] "); // r12=&aCarry
663 asm("mov r14, #0 "); // r14 = run length = 0
665 asm("beq aa_end_loop3 "); // if (!s) no final run
666 asm("sub r14, r4, r9 "); // rl4 = run length = n-q
667 asm("cmp r8, #0 "); // if (!aBestFit) (r8 only and always set when best fit mode)
668 asm("bne aa_end_loop3 "); // if best fit, don't count final run
671 asm("add r0, r9, r3 "); // r0 = q + aBase
672 asm("add r0, r0, r2 "); // r0 = q + aBase + alignmask
673 asm("bic r0, r0, r2 "); // r0 = (q + aBase + alignmask) & ~alignmask
674 asm("sub r0, r0, r3 "); // r0 = alignedStartPos = r0 -= aBase
675 asm("sub r2, r0, r9 "); // r2 = lost = alignedStartPos - q
676 asm("sub r2, r14, r2 "); // r2 = run length - lost
677 asm("cmp r2, r1 "); // if (run length - lost >= aLength)
678 asm("blt aa_end_loop3 ");
680 // if (run length - lost >= aLength)
681 asm("mov r8, r14 "); // r8 = run length (ready to be stored in return)
682 asm("mov r14, #0 "); // r14 = 0 (aCarry on return)
683 asm("str r0, [sp, #0] "); // Save alignedStartPos on stack ready for return
685 // end {if (run length - lost >= aLength)}
686 // end {if (!aBestFit)}
688 asm("aa_end_loop3: ");
689 asm("str r14, [r12] "); // Save aCarry = run length = r14
690 // end {if (minrl != aLength)}
692 asm("aa_end_loop2: ");
693 asm("str r8, [r10] "); // aRunLength = minrl
694 asm("ldmfd sp!, {r0,r4-r11,pc} "); // return saved pos
696 // r1 = aLength r2 = alignmask, r3 = aBase, r4 = iAvail, r6 = iSize, r9 = aCarry, r11 = &aCarry
697 asm("aa_all_free: ");
698 asm("ldr r12, [sp, #48] "); // r12 = aOffset;
699 asm("cmp r12, #0 "); // if (aOffset)
700 asm("addne r12, r12, r3 "); // r12 = aOffset + aBase
701 asm("addne r12, r12, r2 "); // r12 = aOffset + aBase + alignmask
702 asm("bicne r12, r12, r2 "); // r12 = (aOffset + aBase + alignmask)&~alignmask
703 asm("subne r12, r12, r3 "); // r12 = ((aOffset + aBase + alignmask)&~alignmask) - aBase
704 asm("subs r10, r6, r12 "); // r10 = runLength = iSize - aOffset
705 asm("movmi r10, #0 "); // if (aOffset < (TUint)iSize) runLength = 0;
707 asm("movs r0, r8 "); // best fit? if not, r0=0
708 asm("bne aa_all_free2 "); // skip if best fit mode
709 asm("sub r6, r12, r9 "); // r6=aOffset-aCarry
710 asm("add r6, r6, r3 "); // r6=aOffset-aCarry+aBase
711 asm("add r6, r6, r2 "); // r6=aOffset-aCarry+aBase+alignmask
712 asm("bic r6, r6, r2 "); // r6=(aOffset-aCarry+aBase+alignmask)&~alignmask
713 asm("sub r6, r6, r3 "); // r6 = alignedStartPos
714 asm("sub r3, r12, r9 "); // r3 = aOffset - aCarry
715 asm("sub r3, r6, r3 "); // r3 = lost = alignedStartPos - (aOffset - aCarry)
716 asm("add r2, r10, r9 "); // r2 = aRunLength + aCarry
717 asm("sub r2, r2, r3 "); // r2 -= lost
718 asm("cmp r2, r1 "); // if (aRunLength + aCarry - lost >= aLength)
719 asm("blt aa_all_free2 ");
721 asm("ldr r5, [sp, #44] "); // r5 = &RunLength
722 asm("str r10, [r5] "); // Save aRunLength (aRunLength = runLength)
723 asm("movge r9, #0 "); // if (alignedStartPos >= 0) aCarry = 0;
724 asm("str r9, [r11] "); // Save aCarry
725 asm("movge r0, r6 "); // r0 = (alignedStartPos >= 0)? alignedStartPos : 0
726 asm("ldmfd sp!, {r4-r11,pc} "); // return r0
728 asm("aa_all_free2: ");
729 asm("ldr r12, [sp, #48] "); // r12 = aOffset;
730 asm("cmp r12, #0 "); // if (aOffset)
731 asm("movne r9, r10 "); // r9 = aCarry = runLength
732 asm("addeq r9, r9, r4 "); // r9 = aCarry + iAvail
733 asm("str r9, [r11] "); // Save aCarry
734 asm("ldr r5, [sp, #44] "); // r5 = &RunLength
735 asm("mov r0, #%a0" : : "i" ((TInt)KMaxTInt));
736 asm("str r0, [r5] "); // aRunLength = KMaxTInt
737 asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow));
738 asm("ldmfd sp!, {r4-r11,pc} "); // return KErrOverflow