os/kernelhwsrv/kernel/eka/klib/arm/cbma.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\klib\arm\cbma.cia
sl@0
    15
// Machine coded bitmap allocator for ARM
sl@0
    16
// This file is directly included in the test harness t_tbma
sl@0
    17
// 
sl@0
    18
//
sl@0
    19
sl@0
    20
#include <kernel/kbma.h>
sl@0
    21
#include <cpudefs.h>
sl@0
    22
#include <e32cia.h>
sl@0
    23
sl@0
    24
#ifdef TBMA_TEST_CODE
sl@0
    25
sl@0
    26
#include <e32atomics.h>
sl@0
    27
sl@0
    28
#ifdef __MARM__
sl@0
    29
#define __TBMA_MACHINE_CODED__
sl@0
    30
#endif
sl@0
    31
sl@0
    32
#else
sl@0
    33
sl@0
    34
#include <kernel/kern_priv.h>
sl@0
    35
sl@0
    36
#endif
sl@0
    37
sl@0
    38
#ifdef __TBMA_MACHINE_CODED__
sl@0
    39
sl@0
    40
extern void TBmaFault(TInt aLine);
sl@0
    41
#define	ASM_FAULT_LINE(x)	asm("ldr r0, [pc] "); asm("b " CSM_Z9TBmaFaulti ); asm(".word %a0" : : "i" (x));
sl@0
    42
#define	ASM_FAULT()		ASM_FAULT_LINE(__LINE__)
sl@0
    43
sl@0
    44
#ifndef __EABI_CTORS__
sl@0
    45
/**	Construct a new TBitMapAllocator object
sl@0
    46
sl@0
    47
	@param	aSize The number of bit positions required
sl@0
    48
	@param	aState	TRUE if all bit positions initially free
sl@0
    49
					FALSE if all bit positions initially allocated
sl@0
    50
 */
sl@0
    51
EXPORT_C __NAKED__ TBitMapAllocator::TBitMapAllocator(TInt /*aSize*/, TBool /*aState*/)
sl@0
    52
	{
sl@0
    53
	asm("cmp r1, #0 ");
sl@0
    54
	asm("ble 0f ");
sl@0
    55
	asm("cmp r2, #0 ");
sl@0
    56
	asm("movne r2, r1 ");				// if aState r2=aSize else r2=0
sl@0
    57
	asm("str r2, [r0, #0] ");			// iAvail=aState?aSize:0
sl@0
    58
	asm("add r12, r0, #12 ");			// r12=&iMap[0]
sl@0
    59
	asm("str r1, [r0, #8] ");			// iSize=r1
sl@0
    60
	asm("add r3, r1, #31 ");
sl@0
    61
	asm("bic r3, r3, #31 ");			// r3=aSize rounded up to multiple of 32
sl@0
    62
	asm("sub r3, r3, #32 ");			// r3=32*(number of map words-1)
sl@0
    63
	asm("addeq r12, r12, r3, lsr #3 ");	// if !aState r12=&iMap[nmapw-1]
sl@0
    64
	asm("str r12, [r0, #4] ");			// iCheckFirst=aState?&iMap[0]:&iMap[nmapw-1]
sl@0
    65
	asm("mvnne r2, #0 ");				// if aState r2=0xffffffff else r2=0
sl@0
    66
	asm("add r12, r0, #12 ");			// r12=&iMap[0]
sl@0
    67
	asm("1: ");
sl@0
    68
	asm("str r2, [r12], #4 ");			// fill map
sl@0
    69
	asm("subs r1, r1, #32 ");
sl@0
    70
	asm("bhi 1b ");
sl@0
    71
	asm("rsbne r1, r1, #0 ");			// if aSize not a multiple of 32, r1=number of tail bits to clear
sl@0
    72
	asm("movne r2, r2, lsl r1 ");		// clear unused bits
sl@0
    73
	asm("strne r2, [r12, #-4] ");
sl@0
    74
	__JUMP(,lr);
sl@0
    75
	asm("0: ");
sl@0
    76
	ASM_FAULT();
sl@0
    77
	}
sl@0
    78
#endif
sl@0
    79
sl@0
    80
sl@0
    81
/**	Allocate the next available bit position
sl@0
    82
sl@0
    83
	@return	Number of position allocated, -1 if all positions occupied
sl@0
    84
 */
sl@0
    85
EXPORT_C __NAKED__ TInt TBitMapAllocator::Alloc()
sl@0
    86
	{
sl@0
    87
	asm("ldmia r0, {r1,r2} ");			// r1=available, r2=check first address
sl@0
    88
	asm("subs r1, r1, #1 ");			// decrement free count
sl@0
    89
	asm("mvnmi r0, #0 ");				// if none free, return with r0=-1
sl@0
    90
	__JUMP(mi,lr);
sl@0
    91
	asm("str r1, [r0] ");				// store decremented free count
sl@0
    92
	asm("alloc_1: ");
sl@0
    93
	asm("ldr r3, [r2], #4 ");			// check word
sl@0
    94
	asm("cmp r3, #0 ");					// any free entries?
sl@0
    95
	asm("beq alloc_1 ");				// if not, check next word
sl@0
    96
#ifdef __CPU_ARM_HAS_CLZ
sl@0
    97
	CLZ(12, 3);
sl@0
    98
#else
sl@0
    99
	asm("mov ip, #0 ");
sl@0
   100
	asm("cmp r3, #0x00010000 ");		// ip=number of leading zeros in r3
sl@0
   101
	asm("movlo r3, r3, lsl #16 ");
sl@0
   102
	asm("addlo ip, ip, #16 ");
sl@0
   103
	asm("cmp r3, #0x01000000 ");
sl@0
   104
	asm("movlo r3, r3, lsl #8 ");
sl@0
   105
	asm("addlo ip, ip, #8 ");
sl@0
   106
	asm("cmp r3, #0x10000000 ");
sl@0
   107
	asm("movlo r3, r3, lsl #4 ");
sl@0
   108
	asm("addlo ip, ip, #4 ");
sl@0
   109
	asm("cmp r3, #0x40000000 ");
sl@0
   110
	asm("movlo r3, r3, lsl #2 ");
sl@0
   111
	asm("addlo ip, ip, #2 ");
sl@0
   112
	asm("cmp r3, #0x80000000 ");
sl@0
   113
	asm("addlo ip, ip, #1 ");
sl@0
   114
#endif
sl@0
   115
	asm("ldr r3, [r2, #-4]! ");
sl@0
   116
	asm("mov r1, #0x80000000 ");
sl@0
   117
	asm("bic r3, r3, r1, lsr ip ");		// clear bit in allocator word
sl@0
   118
	asm("str r3, [r2] ");
sl@0
   119
	asm("str r2, [r0, #4] ");			// update check first address
sl@0
   120
	asm("sub r0, r2, r0 ");
sl@0
   121
	asm("sub r0, r0, #12 ");			// r0=offset of word from iMap[0]
sl@0
   122
	asm("adds r0, ip, r0, lsl #3 ");	// multiply by 8 and add bit position
sl@0
   123
	__JUMP(,lr);
sl@0
   124
	}
sl@0
   125
sl@0
   126
sl@0
   127
/**	Free the specified bit position
sl@0
   128
sl@0
   129
	@param	aPos Number of bit position to be freed; must be currently allocated.
sl@0
   130
 */
sl@0
   131
EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aPos*/)
sl@0
   132
	{
sl@0
   133
	asm("ldr r3, [r0, #8] ");			// r3=iSize
sl@0
   134
	asm("mov r2, r1, lsr #5 ");			// r2=word index
sl@0
   135
	asm("add r2, r0, r2, lsl #2 ");		// r2=address of word-12
sl@0
   136
	asm("cmp r1, r3 ");
sl@0
   137
	asm("bhs free_error ");
sl@0
   138
	asm("and ip, r1, #0x1f ");			// ip=bit number in word
sl@0
   139
	asm("ldr r3, [r2, #12]! ");			// r3=allocator word
sl@0
   140
	asm("mov r1, #0x80000000 ");
sl@0
   141
	asm("tst r3, r1, lsr ip ");			// test bit
sl@0
   142
	asm("bne free_error ");				// if already free, error
sl@0
   143
	asm("orr r3, r3, r1, lsr ip ");		// set free bit
sl@0
   144
	asm("str r3, [r2] ");
sl@0
   145
	asm("ldmia r0, {r1,r3} ");			// r1=available count, r3=first free address
sl@0
   146
	asm("cmp r1, #1 ");					// check original free count
sl@0
   147
	asm("add r1, r1, #1 ");				// increment available count
sl@0
   148
	asm("str r1, [r0, #0] ");
sl@0
   149
	asm("cmpcs r2, r3 ");				// compare word address with first free
sl@0
   150
	asm("strcc r2, [r0, #4] ");			// if lower, update first free
sl@0
   151
	__JUMP(,lr);
sl@0
   152
sl@0
   153
	asm("free_error: ");
sl@0
   154
	ASM_FAULT();
sl@0
   155
	}
sl@0
   156
sl@0
   157
sl@0
   158
/**	Allocate a specific range of bit positions
sl@0
   159
	Specified range must lie within the total range for this allocator and all
sl@0
   160
	the positions must currently be free.
sl@0
   161
sl@0
   162
	@param	aStart	First position to allocate
sl@0
   163
	@param	aLength	Number of consecutive positions to allocate, must be >0
sl@0
   164
 */
sl@0
   165
EXPORT_C __NAKED__ void TBitMapAllocator::Alloc(TInt /*aStart*/, TInt /*aLength*/)
sl@0
   166
	{
sl@0
   167
	asm("ldr ip, [r0, #8] ");
sl@0
   168
	asm("str lr, [sp, #-4]! ");
sl@0
   169
	asm("adds lr, r1, r2 ");
sl@0
   170
	asm("bcs 0f ");
sl@0
   171
	asm("cmp lr, ip ");
sl@0
   172
	asm("bhi 0f ");
sl@0
   173
	asm("mov r3, r1, lsr #5 ");
sl@0
   174
	asm("ldr ip, [r0] ");
sl@0
   175
	asm("and r1, r1, #0x1f ");
sl@0
   176
	asm("add r3, r0, r3, lsl #2 ");
sl@0
   177
	asm("sub ip, ip, r2 ");				// reduce free count
sl@0
   178
	asm("str ip, [r0] ");
sl@0
   179
	asm("add ip, r2, r1 ");
sl@0
   180
	asm("cmp ip, #32 ");
sl@0
   181
	asm("bhi 1f ");
sl@0
   182
	asm("mvn ip, #0 ");
sl@0
   183
	asm("ldr r0, [r3, #12]! ");
sl@0
   184
	asm("mvn ip, ip, lsr r2 ");
sl@0
   185
	asm("mov ip, ip, lsr r1 ");
sl@0
   186
	asm("orr lr, r0, ip ");
sl@0
   187
	asm("cmp lr, r0 ");
sl@0
   188
	asm("bne 0f ");
sl@0
   189
	asm("bic r0, r0, ip ");
sl@0
   190
	asm("str r0, [r3] ");
sl@0
   191
	asm("ldr pc, [sp], #4 ");
sl@0
   192
	asm("1: ");
sl@0
   193
	asm("add r3, r3, #12 ");
sl@0
   194
	asm("mvn r2, #0 ");
sl@0
   195
	asm("mov r2, r2, lsr r1 ");
sl@0
   196
	asm("2: ");
sl@0
   197
	asm("ldr r1, [r3] ");
sl@0
   198
	asm("orr lr, r1, r2 ");
sl@0
   199
	asm("cmp lr, r1 ");
sl@0
   200
	asm("bne 0f ");
sl@0
   201
	asm("bic r1, r1, r2 ");
sl@0
   202
	asm("str r1, [r3], #4 ");
sl@0
   203
	asm("mvn r2, #0 ");
sl@0
   204
	asm("subs ip, ip, #32 ");
sl@0
   205
	asm("ldrls pc, [sp], #4 ");
sl@0
   206
	asm("cmp ip, #32 ");
sl@0
   207
	asm("mvncc r2, r2, lsr ip ");
sl@0
   208
	asm("b 2b ");
sl@0
   209
sl@0
   210
	asm("0: ");
sl@0
   211
	ASM_FAULT();
sl@0
   212
	}
sl@0
   213
sl@0
   214
sl@0
   215
/**	Free a specific range of bit positions
sl@0
   216
	Specified range must lie within the total range for this allocator and all
sl@0
   217
	the positions must currently be allocated.
sl@0
   218
sl@0
   219
	@param	aStart	First position to free
sl@0
   220
	@param	aLength	Number of consecutive positions to free, must be >0
sl@0
   221
 */
sl@0
   222
EXPORT_C __NAKED__ void TBitMapAllocator::Free(TInt /*aStart*/, TInt /*aLength*/)
sl@0
   223
	{
sl@0
   224
	asm("ldr ip, [r0, #8] ");
sl@0
   225
	asm("str lr, [sp, #-4]! ");
sl@0
   226
	asm("adds lr, r1, r2 ");
sl@0
   227
	asm("bcs 0f ");
sl@0
   228
	asm("cmp lr, ip ");
sl@0
   229
	asm("bhi 0f ");
sl@0
   230
	asm("mov r3, r1, lsr #5 ");
sl@0
   231
	asm("and r1, r1, #0x1f ");
sl@0
   232
	asm("add r3, r0, r3, lsl #2 ");
sl@0
   233
	asm("ldmia r0, {ip,lr} ");			// ip=free count, lr=first check addr
sl@0
   234
	asm("add r3, r3, #12 ");
sl@0
   235
	asm("cmp ip, #1 ");					// check original free count
sl@0
   236
	asm("add ip, ip, r2 ");				// increase free count
sl@0
   237
	asm("cmpcs r3, lr ");				// if none free originally, always update address
sl@0
   238
	asm("str ip, [r0] ");
sl@0
   239
	asm("strcc r3, [r0, #4] ");			// update first check addr if necessary
sl@0
   240
	asm("add lr, r2, r1 ");
sl@0
   241
	asm("cmp lr, #32 ");
sl@0
   242
	asm("bhi 1f ");
sl@0
   243
	asm("mvn lr, #0 ");
sl@0
   244
	asm("ldr r0, [r3] ");
sl@0
   245
	asm("mvn lr, lr, lsr r2 ");
sl@0
   246
	asm("mov lr, lr, lsr r1 ");
sl@0
   247
	asm("tst r0, lr ");
sl@0
   248
	asm("bne 0f ");
sl@0
   249
	asm("orr r0, r0, lr ");
sl@0
   250
	asm("str r0, [r3] ");
sl@0
   251
	asm("ldr pc, [sp], #4 ");
sl@0
   252
	asm("1: ");
sl@0
   253
	asm("mvn r2, #0 ");
sl@0
   254
	asm("mov r2, r2, lsr r1 ");
sl@0
   255
	asm("2: ");
sl@0
   256
	asm("ldr r1, [r3] ");
sl@0
   257
	asm("tst r1, r2 ");
sl@0
   258
	asm("bne 0f ");
sl@0
   259
	asm("orr r1, r1, r2 ");
sl@0
   260
	asm("str r1, [r3], #4 ");
sl@0
   261
	asm("mvn r2, #0 ");
sl@0
   262
	asm("subs lr, lr, #32 ");
sl@0
   263
	asm("ldrls pc, [sp], #4 ");
sl@0
   264
	asm("cmp lr, #32 ");
sl@0
   265
	asm("mvncc r2, r2, lsr lr ");
sl@0
   266
	asm("b 2b ");
sl@0
   267
sl@0
   268
	asm("0: ");
sl@0
   269
	ASM_FAULT();
sl@0
   270
	}
sl@0
   271
sl@0
   272
sl@0
   273
/**	Free a specific range of bit positions
sl@0
   274
	Specified range must lie within the total range for this allocator but it is
sl@0
   275
	not necessary that all the positions are currently allocated.
sl@0
   276
sl@0
   277
	@param	aStart	First position to free
sl@0
   278
	@param	aLength	Number of consecutive positions to free, must be >0
sl@0
   279
 */
sl@0
   280
EXPORT_C __NAKED__ void TBitMapAllocator::SelectiveFree(TInt /*aStart*/, TInt /*aLength*/)
sl@0
   281
	{
sl@0
   282
	asm("ldr r3, [r0, #8] ");
sl@0
   283
	asm("stmfd sp!, {r4-r8,lr} ");
sl@0
   284
	asm("adds lr, r1, r2 ");
sl@0
   285
	asm("bcs 0f ");
sl@0
   286
	asm("cmp lr, r3 ");
sl@0
   287
	asm("bhi 0f ");
sl@0
   288
	asm("mov r7, r0 ");					// r7 -> this
sl@0
   289
	asm("mov r4, r1, lsr #5 ");
sl@0
   290
	asm("and r1, r1, #0x1f ");
sl@0
   291
	asm("ldmia r7, {r6,r8} ");			// r6=free count, r8=first check addr
sl@0
   292
	asm("add r4, r7, r4, lsl #2 ");
sl@0
   293
	asm("add r4, r4, #12 ");
sl@0
   294
	asm("cmp r6, #1 ");					// check original free count
sl@0
   295
	asm("add r6, r6, r2 ");				// r6=new free count assuming no positions already free
sl@0
   296
	asm("cmpcs r4, r8 ");				// if none free originally, always update address
sl@0
   297
	asm("strcc r4, [r7, #4] ");			// update first check addr if necessary
sl@0
   298
	asm("add r8, r2, r1 ");
sl@0
   299
	asm("cmp r8, #32 ");
sl@0
   300
	asm("bhi sfree_cross_bdry ");
sl@0
   301
	asm("mvn r8, #0 ");
sl@0
   302
	asm("mvn r8, r8, lsr r2 ");
sl@0
   303
	asm("mov r8, r8, lsr r1 ");
sl@0
   304
	asm("ldr r1, [r4] ");
sl@0
   305
	asm("ands r0, r1, r8 ");			// r0 has 1's in positions which are already free
sl@0
   306
	asm("orr r1, r1, r8 ");
sl@0
   307
	asm("str r1, [r4] ");				// store new bit mask
sl@0
   308
	asm("beq sfree_0 ");				// if none were already free, finished
sl@0
   309
	asm("bl " CSM_CFUNC(__e32_bit_count_32));
sl@0
   310
	asm("sub r6, r6, r0 ");
sl@0
   311
	asm("sfree_0: ");
sl@0
   312
	asm("str r6, [r7] ");				// store free count
sl@0
   313
	asm("ldmfd sp!, {r4-r8,pc} ");			// return
sl@0
   314
	
sl@0
   315
	asm("sfree_cross_bdry: ");
sl@0
   316
	asm("mvn r5, #0 ");
sl@0
   317
	asm("mov r5, r5, lsr r1 ");
sl@0
   318
	asm("sfree_cross_bdry_1: ");
sl@0
   319
	asm("ldr r1, [r4] ");				// original bit mask
sl@0
   320
	asm("ands r0, r1, r5 ");			// r0 has 1's in bit positions which are already free
sl@0
   321
	asm("orr r1, r1, r5 ");
sl@0
   322
	asm("str r1, [r4], #4 ");			// store new bit mask
sl@0
   323
	asm("beq sfree_2 ");				// skip if none were already free
sl@0
   324
	asm("bl " CSM_CFUNC(__e32_bit_count_32));
sl@0
   325
	asm("sub r6, r6, r0 ");
sl@0
   326
	asm("sfree_2: ");
sl@0
   327
	asm("mvn r5, #0 ");
sl@0
   328
	asm("subs r8, r8, #32 ");
sl@0
   329
	asm("bls sfree_0 ");
sl@0
   330
	asm("cmp r8, #32 ");
sl@0
   331
	asm("mvncc r5, r5, lsr r8 ");
sl@0
   332
	asm("b sfree_cross_bdry_1 ");
sl@0
   333
sl@0
   334
	asm("0: ");
sl@0
   335
	ASM_FAULT();
sl@0
   336
	}
sl@0
   337
sl@0
   338
sl@0
   339
/**	Tests if a specific range of bit positions are all free
sl@0
   340
	Specified range must lie within the total range for this allocator.
sl@0
   341
sl@0
   342
	@param	aStart	First position to check
sl@0
   343
	@param	aLength	Number of consecutive positions to check, must be >0
sl@0
   344
	@return	FALSE if all positions free, TRUE if at least one is occupied.
sl@0
   345
 */
sl@0
   346
EXPORT_C __NAKED__ TBool TBitMapAllocator::NotFree(TInt /*aStart*/, TInt /*aLength*/) const
sl@0
   347
	{
sl@0
   348
	// Inverse logic - returns 0 if all positions free, nonzero otherwise
sl@0
   349
	asm("ldr r3, [r0, #8] ");
sl@0
   350
	asm("adds ip, r1, r2 ");
sl@0
   351
	asm("bcs 0f ");
sl@0
   352
	asm("cmp ip, r3 ");
sl@0
   353
	asm("bhi 0f ");
sl@0
   354
	asm("mov r3, r1, lsr #5 ");
sl@0
   355
	asm("and r1, r1, #0x1f ");
sl@0
   356
	asm("add r3, r0, r3, lsl #2 ");
sl@0
   357
	asm("add ip, r2, r1 ");
sl@0
   358
	asm("add r3, r3, #12 ");
sl@0
   359
	asm("cmp ip, #32 ");
sl@0
   360
	asm("bhi 1f ");
sl@0
   361
	asm("mvn ip, #0 ");
sl@0
   362
	asm("ldr r0, [r3] ");
sl@0
   363
	asm("mvn ip, ip, lsr r2 ");
sl@0
   364
	asm("mov ip, ip, lsr r1 ");
sl@0
   365
	asm("eor r0, r0, ip ");
sl@0
   366
	asm("ands r0, r0, ip ");
sl@0
   367
	__JUMP(,lr);
sl@0
   368
	asm("1: ");
sl@0
   369
	asm("mvn r2, #0 ");
sl@0
   370
	asm("mov r2, r2, lsr r1 ");
sl@0
   371
	asm("2: ");
sl@0
   372
	asm("ldr r1, [r3], #4 ");
sl@0
   373
	asm("eor r0, r1, r2 ");
sl@0
   374
	asm("ands r0, r0, r2 ");
sl@0
   375
	__JUMP(ne,lr);
sl@0
   376
	asm("mvn r2, #0 ");
sl@0
   377
	asm("subs ip, ip, #32 ");
sl@0
   378
	__JUMP(ls,lr);
sl@0
   379
	asm("cmp ip, #32 ");
sl@0
   380
	asm("mvncc r2, r2, lsr ip ");
sl@0
   381
	asm("b 2b ");
sl@0
   382
sl@0
   383
	asm("0: ");
sl@0
   384
	ASM_FAULT();
sl@0
   385
	}
sl@0
   386
sl@0
   387
sl@0
   388
/**	Tests if a specific range of bit positions are all occupied
sl@0
   389
	Specified range must lie within the total range for this allocator.
sl@0
   390
sl@0
   391
	@param	aStart	First position to check
sl@0
   392
	@param	aLength	Number of consecutive positions to check, must be >0
sl@0
   393
	@return	FALSE if all positions occupied, TRUE if at least one is free.
sl@0
   394
 */
sl@0
   395
EXPORT_C __NAKED__ TBool TBitMapAllocator::NotAllocated(TInt /*aStart*/, TInt /*aLength*/) const
sl@0
   396
	{
sl@0
   397
	// Inverse logic - returns 0 if all positions allocated, nonzero otherwise
sl@0
   398
	asm("ldr r3, [r0, #8] ");
sl@0
   399
	asm("adds ip, r1, r2 ");
sl@0
   400
	asm("bcs 0f ");
sl@0
   401
	asm("cmp ip, r3 ");
sl@0
   402
	asm("bhi 0f ");
sl@0
   403
	asm("mov r3, r1, lsr #5 ");
sl@0
   404
	asm("and r1, r1, #0x1f ");
sl@0
   405
	asm("add r3, r0, r3, lsl #2 ");
sl@0
   406
	asm("add ip, r2, r1 ");
sl@0
   407
	asm("add r3, r3, #12 ");
sl@0
   408
	asm("cmp ip, #32 ");
sl@0
   409
	asm("bhi 1f ");
sl@0
   410
	asm("mvn ip, #0 ");
sl@0
   411
	asm("ldr r0, [r3] ");
sl@0
   412
	asm("mvn ip, ip, lsr r2 ");
sl@0
   413
	asm("ands r0, r0, ip, lsr r1 ");
sl@0
   414
	__JUMP(,lr);
sl@0
   415
	asm("1: ");
sl@0
   416
	asm("mvn r2, #0 ");
sl@0
   417
	asm("mov r2, r2, lsr r1 ");
sl@0
   418
	asm("2: ");
sl@0
   419
	asm("ldr r1, [r3], #4 ");
sl@0
   420
	asm("ands r0, r1, r2 ");
sl@0
   421
	__JUMP(ne,lr);
sl@0
   422
	asm("mvn r2, #0 ");
sl@0
   423
	asm("subs ip, ip, #32 ");
sl@0
   424
	__JUMP(ls,lr);
sl@0
   425
	asm("cmp ip, #32 ");
sl@0
   426
	asm("mvncc r2, r2, lsr ip ");
sl@0
   427
	asm("b 2b ");
sl@0
   428
sl@0
   429
	asm("0: ");
sl@0
   430
	ASM_FAULT();
sl@0
   431
	}
sl@0
   432
sl@0
   433
sl@0
   434
/**	Allocate up to a specified number of available bit positions
sl@0
   435
	The allocated positions are not required to bear any relationship to
sl@0
   436
	each other.
sl@0
   437
	If the number of free positions is less than the number requested,
sl@0
   438
	allocate all currently free positions.
sl@0
   439
sl@0
   440
	@param	aLength	Maximum number of positions to allocate.
sl@0
   441
	@param	aList	Pointer to memory area where allocated bit numbers should be stored.
sl@0
   442
	@return	The number of positions allocated
sl@0
   443
 */
sl@0
   444
EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocList(TInt /*aLength*/, TInt* /*aList*/)
sl@0
   445
	{
sl@0
   446
	asm("ldmia r0, {r3,ip} ");				// r3=iAvail, ip=first check word
sl@0
   447
	asm("stmfd sp!, {r4-r5,lr} ");
sl@0
   448
	asm("cmp r1, r3 ");
sl@0
   449
	asm("movgt r1, r3 ");					// if aLength>iAvail, aLength=iAvail
sl@0
   450
	asm("movs r5, r1 ");					// r5 counts allocations
sl@0
   451
	asm("beq 0f ");							// if length 0, exit
sl@0
   452
	asm("sub r3, r3, r1 ");					// reduce available count
sl@0
   453
	asm("sub r4, ip, r0 ");
sl@0
   454
	asm("sub r4, r4, #12 ");				// r4=offset of first check word from iMap[0];
sl@0
   455
	asm("str r3, [r0] ");
sl@0
   456
	asm("mov r4, r4, lsl #3 ");				// r4=bit number of MSB of first check word
sl@0
   457
	asm("1: ");
sl@0
   458
	asm("ldr lr, [ip], #4 ");				// lr=next word
sl@0
   459
	asm("cmp lr, #0 ");
sl@0
   460
	asm("addeq r4, r4, #32 ");				// if word=0, increment bit number by 32 and check next word
sl@0
   461
	asm("beq 1b ");
sl@0
   462
	asm("mov r3, #1 ");
sl@0
   463
	asm("sub r4, r4, #1 ");
sl@0
   464
	asm("2: ");
sl@0
   465
	asm("mov r3, r3, ror #1 ");				// shift mask right one
sl@0
   466
	asm("add r4, r4, #1 ");					// and increment bit number
sl@0
   467
	asm("tst lr, r3 ");						// check next bit
sl@0
   468
	asm("beq 2b ");
sl@0
   469
	asm("str r4, [r2], #4 ");				// bit=1, so store bit number in list
sl@0
   470
	asm("subs r5, r5, #1 ");				// check if we are finished
sl@0
   471
	asm("beq 4f ");							// branch if we are
sl@0
   472
	asm("bics lr, lr, r3 ");				// clear bit and see if word now empty
sl@0
   473
	asm("bne 2b ");							// if word not empty, get next bit
sl@0
   474
	asm("str lr, [ip, #-4] ");				// word empty - clear word
sl@0
   475
	asm("add r4, r4, #32 ");				// word empty - step bit number on to next word
sl@0
   476
	asm("bic r4, r4, #31 ");
sl@0
   477
	asm("b 1b ");							// and go to check next word
sl@0
   478
	asm("4: ");
sl@0
   479
	asm("bics lr, lr, r3 ");				// clear bit
sl@0
   480
	asm("str lr, [ip, #-4] ");				// we are finished - store modified word
sl@0
   481
	asm("subne ip, ip, #4 ");				// if word not empty, first check=last read word
sl@0
   482
	asm("str ip, [r0, #4] ");				// update first check word
sl@0
   483
	asm("0: ");
sl@0
   484
	asm("mov r0, r1 ");						// return number of positions allocated
sl@0
   485
	asm("ldmfd sp!, {r4-r5,pc} ");
sl@0
   486
	}
sl@0
   487
sl@0
   488
sl@0
   489
/**	Find a set of consecutive bit positions with specified alignment, with
sl@0
   490
	support for chaining multiple allocators.
sl@0
   491
	Note that this function does not mark the positions as allocated.
sl@0
   492
sl@0
   493
	@param	aLength		number of consecutive bit positions to allocate
sl@0
   494
	@param	aAlign		logarithm to base 2 of the alignment required
sl@0
   495
	@param	aBase		the alignment of the first bit of this allocator - only significant modulo 2^aAlign
sl@0
   496
	@param	aBestFit	TRUE for best fit allocation strategy, FALSE for first fit
sl@0
   497
	@param	aCarry		carry in/carry out
sl@0
   498
	@param	aRunLength	Holds best run length found so far.  This will be set to KMaxTInt when no
sl@0
   499
						suitable run length has been found.  In best fit mode aCarry should also be
sl@0
   500
						checked as aRunLength will not be set if aCarry is the only suitable run length
sl@0
   501
						found.
sl@0
   502
	@param	aOffset		The bit position to start the search from, set to 0 to search all bit positions.
sl@0
   503
						aOffset will be aligned so all bits before an aligned aOffset will be
sl@0
   504
						ignored.  This can only be non-zero if aCarry is zero as any carry in bits will be
sl@0
   505
						ignored if aOffset is non-zero.
sl@0
   506
sl@0
   507
	@return	Start position if a suitable run was found
sl@0
   508
	@return	KErrNotFound if no suitable run was found
sl@0
   509
	@return KErrOverflow, if all positions free and best fit mode, or if all positions free 
sl@0
   510
			in first fit mode and length requested > number of positions available.
sl@0
   511
sl@0
   512
	@see	TBitMapAllocator::AllocConsecutive(TInt aLength, TBool aBestFit)
sl@0
   513
	@see	TBitMapAllocator::AllocAligned(TInt aLength, TInt aAlign, TInt aBase, TBool aBestFit)
sl@0
   514
	@see	..\bma.cpp for more details
sl@0
   515
 */
sl@0
   516
EXPORT_C __NAKED__ TInt TBitMapAllocator::AllocAligned(TInt /*aLength*/, TInt /*aAlign*/, TInt /*aBase*/,
sl@0
   517
												TBool /*aBestFit*/, TInt& /*aCarry*/, TInt& /*aRunLength*/,
sl@0
   518
												TUint /*aOffset*/) const
sl@0
   519
	{
sl@0
   520
	// r0=this, r1=aLength, r2=aAlign, r3=aBase, [sp+0]=aBestFit, [sp+4]=&aCarry, [sp+8]=&aRunLength
sl@0
   521
	// [sp+12] = aOffset.
sl@0
   522
	asm("ldr r12, [sp, #0] ");				// r12=aBestFit
sl@0
   523
	asm("cmp r1, #0 ");
sl@0
   524
	asm("ble aa_inv ");						// __ASSERT_ALWAYS(aLength>0, TBMA_FAULT())
sl@0
   525
	asm("cmp r2, #31 ");					
sl@0
   526
	asm("bhs aa_inv ");						// __ASSERT_ALWAYS(TUint(aAlign)<31, TBMA_FAULT())
sl@0
   527
	asm("stmfd sp!, {r4-r11,lr} ");
sl@0
   528
	asm("movs r8, r12 ");					//
sl@0
   529
	asm("ldr r11, [sp, #40] ");				// r11=&aCarry
sl@0
   530
	asm("mvnne r8, #0x80000000 ");			// if (aBestFit) r8=7fffffff else r8=0
sl@0
   531
	asm("ldmia r0!, {r4-r6} ");				// r4=iAvail, r5=iCheckFirst, r6=iSize, r0->iMap[0]
sl@0
   532
	asm("ldr r12, [sp, #48] ");				// r12 = aOffset;
sl@0
   533
	asm("cmp r6, r12 ");
sl@0
   534
	asm("bls aa_inv ");						// __ASSERT_ALWAYS(aOffset < (TUint)iSize, TBMA_FAULT())
sl@0
   535
	asm("ldr r9, [r11] ");					// r9=aCarry
sl@0
   536
	asm("cmp r9, #0 ");
sl@0
   537
	asm("cmpne r12, #0 ");
sl@0
   538
	asm("bne aa_inv ");						//__ASSERT_ALWAYS(!aCarry || !aOffset, TBMA_FAULT())
sl@0
   539
	asm("mov r12, #1 ");
sl@0
   540
	asm("mov r12, r12, lsl r2 ");			// r12=alignsize = 1<<aAlign
sl@0
   541
	asm("sub r2, r12, #1 ");				// r2=alignmask = alignsize-1
sl@0
   542
	asm("cmp r4, r6 ");						// check for iAvail==iSize
sl@0
   543
	asm("beq aa_all_free ");				// branch if so
sl@0
   544
	asm("rsbs r9, r9, #0 ");				// r9=run start=-aCarry
sl@0
   545
	asm("movne r5, r0 ");					// if carry, pW=iMap
sl@0
   546
	asm("sub r4, r5, r0 ");					// r4=first check address - &iMap[0]
sl@0
   547
	asm("add r12, r6, #31 ");
sl@0
   548
	asm("mov r4, r4, lsl #3 ");				// r4=bit number of first bit to check
sl@0
   549
	asm("bic r12, r12, #31 ");				// r12=size rounded up to multiple of 32
sl@0
   550
	asm("mvn r7, #0 ");						// saved bit number (p)
sl@0
   551
	asm("add r10, r0, r12, lsr #3 ");		// r10=end address of bitmap
sl@0
   552
	asm("str r7, [sp, #-4]! ");				// saved bit number (p) onto stack
sl@0
   553
	asm("movs r11, r9 ");
sl@0
   554
	asm("mvnne r11, #0 ");					// if (aCarry) r0=~0 else r0=0
sl@0
   555
sl@0
   556
	// registers:	r0=this->iMap, r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
sl@0
   557
	//				r6=iSize, r7=, r8=saved run length, r9=run start pos
sl@0
   558
	//				r10=end address of bitmap, r11=state
sl@0
   559
	asm("ldr r7, [sp, #52] ");				// r7 = aOffset;
sl@0
   560
	asm("cmp r7, #0 ");						// if (aOffset)
sl@0
   561
	asm("beq aa_word ");
sl@0
   562
	asm("add r7, r7, r3 ");					// r7 = aOffset + aBase
sl@0
   563
	asm("add r7, r7, r2 ");					// r7 = aOffset + aBase + alignmask
sl@0
   564
	asm("bic r7, r7, r2 ");					// r7 = (aOffset + aBase + alignmask) & ~alignmask
sl@0
   565
	asm("sub r7, r7, r3 ");					// r7 -= aBase
sl@0
   566
	asm("mov r12, r7, lsr #5 ");			// r12 = aOffset >> 5 (number of pointer increments required)
sl@0
   567
	asm("add r0, r0, r12, lsl #2 ");		// r0 = offsetWord = iMap + (aOffset >> 5)	(pointer add so shift=2)
sl@0
   568
	asm("cmp r0, r5 ");						// if (offsetWord >= pW)
sl@0
   569
	asm("movpl r5, r0 ");					// r5 = pW = offsetWord
sl@0
   570
	asm("andpl r4, r7, #0xffffffe0 ");		// r4 = n = aOffset & 0xffffffe0
sl@0
   571
	asm("andpl r7, r7, #31 ");				// r7 = aOffset & 31
sl@0
   572
	asm("mov r0, #0xffffffff ");			// r0 = 0xffffffff
sl@0
   573
	asm("mov r7, r0, lsr r7 ");				// r7 = offsetMask = 0xffffffff >> (aOffset & 31)
sl@0
   574
sl@0
   575
	// registers:	r0=bit to check (b), r1=aLength, r2=alignmask, r3=aBase, r4=current bit number, r5=word pointer
sl@0
   576
	//				r6=iSize, r7=offsetMask, r8=saved run length, r9=run start pos
sl@0
   577
	//				r10=end address of bitmap, r11=state, r12=word
sl@0
   578
	asm("aa_word: ");						// while (pW < pE)
sl@0
   579
	asm("cmp r5, r10 ");					// reached end?
sl@0
   580
	asm("ldrlo r12, [r5], #4 ");			// if not, r12=next word (=*pW++)
sl@0
   581
	asm("bhs aa_end_loop ");				// if end, branch out
sl@0
   582
sl@0
   583
	asm("cmp r7, #0 ");						// if (offsetMask)
sl@0
   584
	asm("andne r12, r12, r7 ");				// r12 = word &= offsetMask
sl@0
   585
	asm("movne r7, #0 ");					// offsetmask = 0;
sl@0
   586
	
sl@0
   587
	asm("eors r12, r12, r11 ");				// r12=w^s, test if any of required bit present
sl@0
   588
	asm("addeq r4, r4, #32 ");				// if not, increment bit # by 32
sl@0
   589
	asm("beq aa_word ");					// and do next word
sl@0
   590
	asm("mov r0, #0x80000000 ");			// bit to check (b)
sl@0
   591
sl@0
   592
	asm("aa_bit: ");						// if ((word ^ s) & b)
sl@0
   593
	asm("tst r12, r0 ");					// does bit have required state?
sl@0
   594
	asm("bne aa_bit_found ");
sl@0
   595
	asm("aa_end_for: ");
sl@0
   596
	asm("add r4, r4, #1 ");					// increment bit number
sl@0
   597
	asm("movs r0, r0, lsr #1 ");			// next bit
sl@0
   598
	asm("bne aa_bit ");						// if all bits not done, do next
sl@0
   599
	asm("b aa_word ");						// else do next word
sl@0
   600
sl@0
   601
	asm("aa_bit_found: ");
sl@0
   602
	asm("mvns r12, r12 ");					// Invert r12 to invert search bit
sl@0
   603
	asm("mvns r14, r11 ");					// if (s)
sl@0
   604
	asm("cmpeq r4, r6 ");					// && n==iSize
sl@0
   605
	asm("beq aa_end_loop ");				// ... finished
sl@0
   606
	asm("mvns r11, r11 ");					// else s=~s
sl@0
   607
	asm("movne r9, r4 ");					// if (s) q=n (1 found so save position)
sl@0
   608
	asm("bne aa_end_for ");
sl@0
   609
sl@0
   610
	asm("sub r14, r4, r9 ");				// r14 = run length = n - q
sl@0
   611
	asm("stmdb sp!, {r0,r12} ");			// store b (r0) and word (r12) on stack
sl@0
   612
	asm("add r12, r9, r3 ");				// r12 = q + aBase
sl@0
   613
	asm("add r12, r12, r2 ");				// r12 = q + aBase + alignmask
sl@0
   614
	asm("bic r12, r12, r2 ");				// r12 = (q + aBase + alignmask) & ~alignmask
sl@0
   615
	asm("sub r12, r12, r3 ");				// r12 = alignedStartPos = r12 - aBase
sl@0
   616
	asm("sub r0, r12, r9 ");				// r0 = lost = alignedStartPos - q
sl@0
   617
	asm("sub r0, r14, r0 ");				// r0 = run length - lost
sl@0
   618
	asm("cmp r0, r1 ");						// if (run length - lost >= aLength)
sl@0
   619
	asm("ldmltia sp!, {r0,r12} ");			// if aligned length too short: r0 = b and r12 = word from stack 
sl@0
   620
	asm("blt aa_end_for ");					// (run length - lost) too short (must be signed comparison)
sl@0
   621
sl@0
   622
// if (rl-lost>=aLength)
sl@0
   623
sl@0
   624
	asm("cmp r1, r14 ");					// check for exact run length match (if (run length == aLength))
sl@0
   625
	asm("cmpne r8, #0 ");					// check for best fit (r8 only ever set if (aBestfit))
sl@0
   626
	asm("beq aa_found_it ");				// exact match or not in best fit mode
sl@0
   627
sl@0
   628
// 		if (r1<minrl)
sl@0
   629
	asm("cmp r12, #0 ");
sl@0
   630
	asm("movmi r12, #0 ");					// r12 = (alignedStartPos >= 0)? alignedStartPos : 0
sl@0
   631
	asm("cmp r14, r8 ");					// Compare run length with current minimum
sl@0
   632
	asm("movlo r8, r14 ");					// if shorter, replace
sl@0
   633
	asm("strlo r12, [sp, #8] ");			// save alignedStartPos (p = (alignedStartPos >= 0)? alignedStartPos : 0)
sl@0
   634
	asm("ldmia sp!, {r0,r12} ");			// r0 = b and r12 = word from stack
sl@0
   635
	asm("b aa_end_for ");					// next bit
sl@0
   636
// 		end {if (r1<minrl)}
sl@0
   637
sl@0
   638
// 		if (!aBestFit || run length == aLength)
sl@0
   639
	// registers:	r12 = alignedStartPos, r14 = run length
sl@0
   640
	asm("aa_found_it: ");
sl@0
   641
	asm("ldr r1, [sp, #52] ");				// r1=&aCarry
sl@0
   642
	asm("ldr r7, [sp, #56] ");				// r7=&aRunLength
sl@0
   643
	asm("subs r0, r12, #0 ");				// r0 = alignStartPos, alignedStartPos >= 0?
sl@0
   644
	asm("movmi r0, #0 ");					// if alignedStartPos < 0 r0=0
sl@0
   645
	asm("str r14, [r7] ");					// aRunLength = run length
sl@0
   646
	asm("mov r14, #0 ");
sl@0
   647
	asm("strge r14, [r1] ");				// if (alignedStartPos >= 0), aCarry=0
sl@0
   648
	asm("ldmfd sp!, {r1-r11,pc} ");			// return
sl@0
   649
// 		end {if (!aBestFit || run length == aLength)}
sl@0
   650
sl@0
   651
// end {if (rl-lost>=aLength)}
sl@0
   652
sl@0
   653
	asm("aa_end_loop: ");
sl@0
   654
	asm("ldr r10, [sp, #48] ");				// r10=&aRunLength
sl@0
   655
sl@0
   656
//		registers: 	r2 = alignmask, r3 = aBase, r4=current bit number(n), 
sl@0
   657
//					r9=run start pos(q),  r10=&aRunLength, r11 = state(s), r14 = run length(rl)
sl@0
   658
	asm("cmp r8, r1 ");						// compare min rl with aLength
sl@0
   659
	asm("beq aa_end_loop2 ");				// if exact match, skip
sl@0
   660
sl@0
   661
// if (minrl != aLength)
sl@0
   662
	asm("ldr r12, [sp, #44] ");				// r12=&aCarry
sl@0
   663
	asm("mov r14, #0 ");					// r14 = run length = 0
sl@0
   664
	asm("cmp r11, #0 ");
sl@0
   665
	asm("beq aa_end_loop3 ");				// if (!s) no final run
sl@0
   666
	asm("sub r14, r4, r9 ");				// rl4 = run length = n-q
sl@0
   667
	asm("cmp r8, #0 ");						// if (!aBestFit) (r8 only and always set when best fit mode)
sl@0
   668
	asm("bne aa_end_loop3 ");				// if best fit, don't count final run
sl@0
   669
sl@0
   670
//		if (!aBestFit)
sl@0
   671
	asm("add r0, r9, r3 ");					// r0 = q + aBase
sl@0
   672
	asm("add r0, r0, r2 ");					// r0 = q + aBase + alignmask
sl@0
   673
	asm("bic r0, r0, r2 ");					// r0 = (q + aBase + alignmask) & ~alignmask
sl@0
   674
	asm("sub r0, r0, r3 ");					// r0 = alignedStartPos = r0 -= aBase
sl@0
   675
	asm("sub r2, r0, r9 ");					// r2 = lost = alignedStartPos - q
sl@0
   676
	asm("sub r2, r14, r2 ");				// r2 = run length - lost
sl@0
   677
	asm("cmp r2, r1 ");						// if (run length - lost >= aLength)
sl@0
   678
	asm("blt aa_end_loop3 ");
sl@0
   679
sl@0
   680
//			if (run length - lost >= aLength)
sl@0
   681
	asm("mov r8, r14 ");					// r8 = run length (ready to be stored in return)
sl@0
   682
	asm("mov r14, #0 ");					// r14 = 0 (aCarry on return)
sl@0
   683
	asm("str r0, [sp, #0] ");				// Save alignedStartPos on stack ready for return
sl@0
   684
sl@0
   685
//			end {if (run length - lost >= aLength)}
sl@0
   686
//		end {if (!aBestFit)}
sl@0
   687
sl@0
   688
	asm("aa_end_loop3: ");
sl@0
   689
	asm("str r14, [r12] ");					// Save aCarry = run length = r14
sl@0
   690
// end {if (minrl != aLength)}
sl@0
   691
sl@0
   692
	asm("aa_end_loop2: ");
sl@0
   693
	asm("str r8, [r10] ");					// aRunLength = minrl
sl@0
   694
	asm("ldmfd sp!, {r0,r4-r11,pc} ");		// return saved pos
sl@0
   695
sl@0
   696
// r1 = aLength r2 = alignmask, r3 = aBase,  r4 = iAvail, r6 = iSize, r9 = aCarry, r11 = &aCarry
sl@0
   697
	asm("aa_all_free: ");
sl@0
   698
	asm("ldr r12, [sp, #48] ");				// r12 = aOffset;
sl@0
   699
	asm("cmp r12, #0 ");					// if (aOffset)
sl@0
   700
	asm("addne r12, r12, r3 ");				// r12 = aOffset + aBase
sl@0
   701
	asm("addne r12, r12, r2 ");				// r12 = aOffset + aBase + alignmask
sl@0
   702
	asm("bicne r12, r12, r2 ");				// r12 = (aOffset + aBase + alignmask)&~alignmask
sl@0
   703
	asm("subne r12, r12, r3 ");				// r12 = ((aOffset + aBase + alignmask)&~alignmask) - aBase
sl@0
   704
	asm("subs r10, r6, r12 ");				// r10 = runLength = iSize - aOffset
sl@0
   705
	asm("movmi r10, #0 ");					// if (aOffset < (TUint)iSize) runLength = 0;
sl@0
   706
sl@0
   707
	asm("movs r0, r8 ");					// best fit? if not, r0=0
sl@0
   708
	asm("bne aa_all_free2 ");				// skip if best fit mode
sl@0
   709
	asm("sub r6, r12, r9 ");				// r6=aOffset-aCarry
sl@0
   710
	asm("add r6, r6, r3 ");					// r6=aOffset-aCarry+aBase
sl@0
   711
	asm("add r6, r6, r2 ");					// r6=aOffset-aCarry+aBase+alignmask
sl@0
   712
	asm("bic r6, r6, r2 ");					// r6=(aOffset-aCarry+aBase+alignmask)&~alignmask
sl@0
   713
	asm("sub r6, r6, r3 ");					// r6 = alignedStartPos
sl@0
   714
	asm("sub r3, r12, r9 ");				// r3 = aOffset - aCarry
sl@0
   715
	asm("sub r3, r6, r3 ");					// r3 = lost = alignedStartPos - (aOffset - aCarry)
sl@0
   716
	asm("add r2, r10, r9 ");				// r2 = aRunLength + aCarry
sl@0
   717
	asm("sub r2, r2, r3 ");					// r2 -= lost
sl@0
   718
	asm("cmp r2, r1 ");						// if (aRunLength + aCarry - lost >= aLength)
sl@0
   719
	asm("blt aa_all_free2 ");
sl@0
   720
	asm("cmp r6, #0 ");
sl@0
   721
	asm("ldr r5, [sp, #44] ");				// r5 = &RunLength
sl@0
   722
	asm("str r10, [r5] ");					// Save aRunLength (aRunLength = runLength)
sl@0
   723
	asm("movge r9, #0 ");					// if (alignedStartPos >= 0) aCarry = 0;
sl@0
   724
	asm("str r9, [r11] ");					// Save aCarry
sl@0
   725
	asm("movge r0, r6 ");					// r0 = (alignedStartPos >= 0)? alignedStartPos : 0
sl@0
   726
	asm("ldmfd sp!, {r4-r11,pc} ");			// return r0
sl@0
   727
sl@0
   728
	asm("aa_all_free2: ");
sl@0
   729
	asm("ldr r12, [sp, #48] ");				// r12 = aOffset;
sl@0
   730
	asm("cmp r12, #0 ");					// if (aOffset)
sl@0
   731
	asm("movne r9, r10 ");					// r9 = aCarry = runLength
sl@0
   732
	asm("addeq r9, r9, r4 ");				// r9 = aCarry + iAvail
sl@0
   733
	asm("str r9, [r11] ");					// Save aCarry
sl@0
   734
	asm("ldr r5, [sp, #44] ");				// r5 = &RunLength
sl@0
   735
	asm("mov r0, #%a0" : : "i" ((TInt)KMaxTInt));
sl@0
   736
	asm("str r0, [r5] ");					// aRunLength = KMaxTInt
sl@0
   737
	asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow));
sl@0
   738
	asm("ldmfd sp!, {r4-r11,pc} ");			// return KErrOverflow
sl@0
   739
sl@0
   740
	asm("aa_inv: ");
sl@0
   741
	ASM_FAULT();
sl@0
   742
	}
sl@0
   743
#endif