os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncutilf.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\arm\ncutilf.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <arm.h>
sl@0
    20
#include <arm_gic.h>
sl@0
    21
#include <arm_tmr.h>
sl@0
    22
sl@0
    23
sl@0
    24
sl@0
    25
__NAKED__ void Arm::GetUserSpAndLr(TAny*) 
sl@0
    26
	{
sl@0
    27
	asm("stmia	r0, {r13, r14}^ ");
sl@0
    28
	asm("mov	r0, r0"); // NOP needed between stm^ and banked register access
sl@0
    29
	__JUMP(,	lr);
sl@0
    30
	}
sl@0
    31
sl@0
    32
__NAKED__ void Arm::SetUserSpAndLr(TAny*) 
sl@0
    33
	{
sl@0
    34
	asm("ldmia	r0, {r13, r14}^ ");
sl@0
    35
	asm("mov	r0, r0"); // NOP needed between ldm^ and banked register access
sl@0
    36
	__JUMP(,	lr);
sl@0
    37
	}
sl@0
    38
sl@0
    39
__NAKED__ TUint32 Arm::Dacr()
sl@0
    40
	{
sl@0
    41
	asm("mrc	p15, 0, r0, c3, c0, 0 ");
sl@0
    42
	__JUMP(,	lr);
sl@0
    43
	}
sl@0
    44
sl@0
    45
__NAKED__ void Arm::SetDacr(TUint32)
sl@0
    46
	{
sl@0
    47
	asm("mcr	p15, 0, r0, c3, c0, 0 ");
sl@0
    48
	__INST_SYNC_BARRIER_Z__(r1);
sl@0
    49
	__JUMP(,	lr);
sl@0
    50
	}
sl@0
    51
sl@0
    52
__NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32)
sl@0
    53
	{
sl@0
    54
	asm("mrc	p15, 0, r2, c3, c0, 0 ");
sl@0
    55
	asm("bic	r2, r2, r0 ");
sl@0
    56
	asm("orr	r2, r2, r1 ");
sl@0
    57
	asm("mcr	p15, 0, r2, c3, c0, 0 ");
sl@0
    58
	__INST_SYNC_BARRIER_Z__(r3);
sl@0
    59
	asm("mov	r0, r2 ");
sl@0
    60
	__JUMP(,	lr);
sl@0
    61
	}
sl@0
    62
sl@0
    63
__NAKED__ void Arm::SetCar(TUint32)
sl@0
    64
	{
sl@0
    65
	SET_CAR(,	r0);
sl@0
    66
	__JUMP(,	lr);
sl@0
    67
	}
sl@0
    68
sl@0
    69
sl@0
    70
sl@0
    71
/** Get the CPU's coprocessor access register value
sl@0
    72
sl@0
    73
@return The value of the CAR, 0 if CPU doesn't have CAR
sl@0
    74
sl@0
    75
@publishedPartner
sl@0
    76
@released
sl@0
    77
 */
sl@0
    78
EXPORT_C __NAKED__ TUint32 Arm::Car()
sl@0
    79
	{
sl@0
    80
	GET_CAR(,	r0);
sl@0
    81
	__JUMP(,	lr);
sl@0
    82
	}
sl@0
    83
sl@0
    84
sl@0
    85
sl@0
    86
/** Modify the CPU's coprocessor access register value
sl@0
    87
	Does nothing if CPU does not have CAR.
sl@0
    88
sl@0
    89
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
    90
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
    91
@return The original value of the CAR, 0 if CPU doesn't have CAR
sl@0
    92
sl@0
    93
@publishedPartner
sl@0
    94
@released
sl@0
    95
 */
sl@0
    96
EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
    97
	{
sl@0
    98
	GET_CAR(,	r2);
sl@0
    99
	asm("bic	r0, r2, r0 ");
sl@0
   100
	asm("orr	r0, r0, r1 ");
sl@0
   101
	SET_CAR(,	r0);
sl@0
   102
	asm("mov	r0, r2 ");
sl@0
   103
	__JUMP(,	lr);
sl@0
   104
	}
sl@0
   105
sl@0
   106
sl@0
   107
#ifdef __CPU_HAS_VFP
sl@0
   108
__NAKED__ void Arm::SetFpExc(TUint32)
sl@0
   109
	{
sl@0
   110
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
sl@0
   111
// If we are about to enable VFP, disable dynamic branch prediction
sl@0
   112
// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
sl@0
   113
	asm("mrs	r3, cpsr ");
sl@0
   114
	__ASM_CLI();
sl@0
   115
	asm("mrc	p15, 0, r1, c1, c0, 1 ");
sl@0
   116
	asm("tst	r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
sl@0
   117
	asm("bic	r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
sl@0
   118
	asm("and	r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
sl@0
   119
	asm("orreq	r1, r1, r2, lsl #1 ");		// if VFP is being disabled set DB = RS
sl@0
   120
	asm("mcr	p15, 0, r1, c1, c0, 1 ");
sl@0
   121
	asm("mcr	p15, 0, r2, c7, c5, 6 ");	// flush BTAC
sl@0
   122
	VFP_FMXR(,	VFP_XREG_FPEXC,0);
sl@0
   123
	__INST_SYNC_BARRIER_Z__(r12);
sl@0
   124
	asm("msr	cpsr, r3 ");
sl@0
   125
	__JUMP(,	lr);
sl@0
   126
#else
sl@0
   127
	VFP_FMXR(,	VFP_XREG_FPEXC,0);
sl@0
   128
	__JUMP(,	lr);
sl@0
   129
#endif
sl@0
   130
	}
sl@0
   131
#endif
sl@0
   132
sl@0
   133
sl@0
   134
sl@0
   135
/** Get the value of the VFP FPEXC register
sl@0
   136
sl@0
   137
@return The value of FPEXC, 0 if there is no VFP
sl@0
   138
sl@0
   139
@publishedPartner
sl@0
   140
@released
sl@0
   141
 */
sl@0
   142
EXPORT_C __NAKED__ TUint32 Arm::FpExc()
sl@0
   143
	{
sl@0
   144
#ifdef __CPU_HAS_VFP
sl@0
   145
	VFP_FMRX(,	0,VFP_XREG_FPEXC);
sl@0
   146
#else
sl@0
   147
	asm("mov	r0, #0 ");
sl@0
   148
#endif
sl@0
   149
	__JUMP(,	lr);
sl@0
   150
	}
sl@0
   151
sl@0
   152
sl@0
   153
sl@0
   154
/** Modify the VFP FPEXC register
sl@0
   155
	Does nothing if there is no VFP
sl@0
   156
sl@0
   157
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
   158
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
   159
@return The original value of FPEXC, 0 if no VFP present
sl@0
   160
sl@0
   161
@publishedPartner
sl@0
   162
@released
sl@0
   163
 */
sl@0
   164
EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
   165
	{
sl@0
   166
#ifdef __CPU_HAS_VFP
sl@0
   167
	VFP_FMRX(,	12,VFP_XREG_FPEXC);
sl@0
   168
	asm("bic	r0, r12, r0 ");
sl@0
   169
	asm("orr	r0, r0, r1 ");
sl@0
   170
sl@0
   171
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
sl@0
   172
// If we are about to enable VFP, disable dynamic branch prediction
sl@0
   173
// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
sl@0
   174
	asm("mrs	r3, cpsr ");
sl@0
   175
	__ASM_CLI();
sl@0
   176
	asm("mrc	p15, 0, r1, c1, c0, 1 ");
sl@0
   177
	asm("tst	r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
sl@0
   178
	asm("bic	r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
sl@0
   179
	asm("and	r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
sl@0
   180
	asm("orreq	r1, r1, r2, lsl #1 ");		// if VFP is being disabled set DB = RS
sl@0
   181
	asm("mcr	p15, 0, r1, c1, c0, 1 ");
sl@0
   182
	asm("mcr	p15, 0, r2, c7, c5, 6 ");	// flush BTAC
sl@0
   183
	VFP_FMXR(,	VFP_XREG_FPEXC,0);
sl@0
   184
	__INST_SYNC_BARRIER_Z__(r12);
sl@0
   185
	asm("msr	cpsr, r3 ");
sl@0
   186
#else
sl@0
   187
	VFP_FMXR(,	VFP_XREG_FPEXC,0);
sl@0
   188
#endif	// erratum 351912
sl@0
   189
sl@0
   190
	asm("mov	r0, r12 ");
sl@0
   191
#else	// no vfp
sl@0
   192
	asm("mov	r0, #0 ");
sl@0
   193
#endif
sl@0
   194
	__JUMP(,	lr);
sl@0
   195
	}
sl@0
   196
sl@0
   197
/** Get the value of the VFP FPSCR register
sl@0
   198
sl@0
   199
@return The value of FPSCR, 0 if there is no VFP
sl@0
   200
sl@0
   201
@publishedPartner
sl@0
   202
@released
sl@0
   203
 */
sl@0
   204
EXPORT_C __NAKED__ TUint32 Arm::FpScr()
sl@0
   205
	{
sl@0
   206
#ifdef __CPU_HAS_VFP
sl@0
   207
	VFP_FMRX(,	0,VFP_XREG_FPSCR);
sl@0
   208
#else
sl@0
   209
	asm("mov	r0, #0 ");
sl@0
   210
#endif
sl@0
   211
	__JUMP(,	lr);
sl@0
   212
	}
sl@0
   213
sl@0
   214
sl@0
   215
sl@0
   216
/** Modify the VFP FPSCR register
sl@0
   217
	Does nothing if there is no VFP
sl@0
   218
sl@0
   219
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
   220
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
   221
@return The original value of FPSCR, 0 if no VFP present
sl@0
   222
sl@0
   223
@publishedPartner
sl@0
   224
@released
sl@0
   225
 */
sl@0
   226
EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
   227
	{
sl@0
   228
#ifdef __CPU_HAS_VFP
sl@0
   229
	VFP_FMRX(,	2,VFP_XREG_FPSCR);
sl@0
   230
	asm("bic	r0, r2, r0 ");
sl@0
   231
	asm("orr	r0, r0, r1 ");
sl@0
   232
	VFP_FMXR(,	VFP_XREG_FPSCR,0);
sl@0
   233
	asm("mov	r0, r2 ");
sl@0
   234
#else
sl@0
   235
	asm("mov	r0, #0 ");
sl@0
   236
#endif
sl@0
   237
	__JUMP(,	lr);
sl@0
   238
	}
sl@0
   239
sl@0
   240
sl@0
   241
/** Detect whether NEON is present
sl@0
   242
sl@0
   243
@return ETrue if present, EFalse if not
sl@0
   244
sl@0
   245
@internalTechnology
sl@0
   246
@released
sl@0
   247
 */
sl@0
   248
#if defined(__CPU_HAS_VFP) && defined(__VFP_V3)
sl@0
   249
__NAKED__ TBool Arm::NeonPresent()
sl@0
   250
	{
sl@0
   251
	asm("mov	r0, #0 ");										// Not present
sl@0
   252
	VFP_FMRX(,	1,VFP_XREG_FPEXC);								// Save VFP state
sl@0
   253
	asm("orr	r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN));
sl@0
   254
	VFP_FMXR(,	VFP_XREG_FPEXC,1);								// Enable VFP
sl@0
   255
sl@0
   256
	VFP_FMRX(,	2,VFP_XREG_MVFR0);								// Read MVFR0
sl@0
   257
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
sl@0
   258
	asm("beq	0f ");											// Skip ahead if not
sl@0
   259
	GET_CAR(,	r2);
sl@0
   260
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS));	// Check to see if ASIMD is disabled
sl@0
   261
	asm("bne	0f ");											// Skip ahead if so
sl@0
   262
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if the upper 16 registers are disabled
sl@0
   263
	asm("moveq	r0, #1" );										// If not then eport NEON present
sl@0
   264
sl@0
   265
	asm("0: ");
sl@0
   266
	VFP_FMXR(,VFP_XREG_FPEXC,1);								// Restore VFP state
sl@0
   267
	__JUMP(,	lr);
sl@0
   268
	}
sl@0
   269
#endif
sl@0
   270
sl@0
   271
sl@0
   272
#ifdef __CPU_HAS_MMU
sl@0
   273
__NAKED__ TBool Arm::MmuActive()
sl@0
   274
	{
sl@0
   275
	asm("mrc	p15, 0, r0, c1, c0, 0 ");
sl@0
   276
	asm("and	r0, r0, #1 ");
sl@0
   277
	__JUMP(,	lr);
sl@0
   278
	}
sl@0
   279
sl@0
   280
// Returns the content of Translate Table Base Register 0.
sl@0
   281
// To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes)
sl@0
   282
__NAKED__ TUint32 Arm::MmuTTBR0()
sl@0
   283
	{
sl@0
   284
	asm("mrc	p15, 0, r0, c2, c0, 0 ");
sl@0
   285
	__JUMP(,	lr);
sl@0
   286
	}
sl@0
   287
#endif
sl@0
   288
sl@0
   289
sl@0
   290
sl@0
   291
/** Get the current value of the system timestamp
sl@0
   292
sl@0
   293
@publishedPartner
sl@0
   294
@prototype
sl@0
   295
*/
sl@0
   296
EXPORT_C __NAKED__ TUint64 NKern::Timestamp()
sl@0
   297
	{
sl@0
   298
	asm("ldr	r3, __TheScheduler ");
sl@0
   299
	asm("mrs	r12, cpsr ");				// r12 = saved interrupt mask
sl@0
   300
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TScheduler,i_LocalTimerAddr));	// r2 points to local timer
sl@0
   301
	__ASM_CLI();							// disable all interrupts
sl@0
   302
	GET_RWNO_TID(,r3);						// r3 -> TSubScheduler
sl@0
   303
	asm("ldr	r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));		// r1 = current timer counter
sl@0
   304
	asm("ldr	r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));	// r0 = last value written to timer counter
sl@0
   305
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));		// r2 = scaling factor
sl@0
   306
	asm("sub	r0, r0, r1 ");				// elapsed timer ticks since last timestamp sync
sl@0
   307
	asm("umull	r1, r2, r0, r2 ");			// r2:r1 = elapsed ticks * scaling factor
sl@0
   308
	asm("ldr	r0, [r3, #%a0]!" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));	// r0 = last timestamp sync point, low word
sl@0
   309
	asm("ldr	r3, [r3, #4] ");			// r3 = last timestamp sync point, high word
sl@0
   310
	asm("adds	r1, r1, #0x00800000 ");		// add 2^23 (rounding)
sl@0
   311
	asm("adcs	r2, r2, #0 ");
sl@0
   312
	asm("mov	r1, r1, lsr #24 ");			// divide by 2^24
sl@0
   313
	asm("orr	r1, r1, r2, lsl #8 ");		// r1 = elapsed time since last timestamp sync
sl@0
   314
	asm("msr	cpsr, r12 ");				// restore interrupts
sl@0
   315
	asm("adds	r0, r0, r1 ");				// r1:r0 = last timestamp sync point + elapsed time since last timestamp sync
sl@0
   316
	asm("adcs	r1, r3, #0 ");
sl@0
   317
	__JUMP(,lr);
sl@0
   318
	asm("__TheScheduler: ");
sl@0
   319
	asm(".word	%a0" : : "i" ((TInt)&TheScheduler));
sl@0
   320
	}
sl@0
   321
sl@0
   322
sl@0
   323
extern "C" __NAKED__ TLinAddr get_sp_svc()
sl@0
   324
	{
sl@0
   325
	asm("mrs	r1, cpsr ");
sl@0
   326
	__ASM_CLI_MODE(MODE_SVC);
sl@0
   327
	asm("mov	r0, sp ");
sl@0
   328
	asm("msr	cpsr, r1 ");
sl@0
   329
	__JUMP(,	lr);
sl@0
   330
	}
sl@0
   331
sl@0
   332
extern "C" __NAKED__ TLinAddr get_lr_svc()
sl@0
   333
	{
sl@0
   334
	asm("mrs	r1, cpsr ");
sl@0
   335
	__ASM_CLI_MODE(MODE_SVC);
sl@0
   336
	asm("mov	r0, lr ");
sl@0
   337
	asm("msr	cpsr, r1 ");
sl@0
   338
	__JUMP(,	lr);
sl@0
   339
	}
sl@0
   340
sl@0
   341
sl@0
   342
/** Get the return address from an ISR
sl@0
   343
sl@0
   344
Call only from an ISR
sl@0
   345
sl@0
   346
@internalTechnology
sl@0
   347
*/
sl@0
   348
EXPORT_C __NAKED__ TLinAddr Arm::IrqReturnAddress()
sl@0
   349
	{
sl@0
   350
	asm("mrs	r1, cpsr ");
sl@0
   351
	__ASM_CLI();
sl@0
   352
	asm("and	r0, r1, #0x1f ");
sl@0
   353
	asm("cmp	r0, #0x11 ");				// mode_fiq ?
sl@0
   354
	asm("beq	1f ");
sl@0
   355
	__ASM_CLI_MODE(MODE_SVC);
sl@0
   356
	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   357
	asm("msr	cpsr, r1 ");
sl@0
   358
	__JUMP(,	lr);
sl@0
   359
sl@0
   360
	asm("1:		");
sl@0
   361
	GET_RWNO_TID(,r3);
sl@0
   362
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_FiqStackTop));	// if so, r2->top of FIQ stack
sl@0
   363
	asm("ldr	r0, [r2, #-4] ");			// get return address
sl@0
   364
	asm("msr	cpsr, r1 ");
sl@0
   365
	__JUMP(,	lr);
sl@0
   366
	}
sl@0
   367
sl@0
   368
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
   369
#define	__ASM_CALL(func)						\
sl@0
   370
	asm("str	lr, [sp, #-4]! ");				\
sl@0
   371
	asm("bl "	CSM_CFUNC(func));				\
sl@0
   372
	asm("ldr	lr, [sp], #4 ");
sl@0
   373
sl@0
   374
#define	SPIN_LOCK_ENTRY_CHECK()			__ASM_CALL(spin_lock_entry_check)
sl@0
   375
#define	SPIN_LOCK_MARK_ACQ()			__ASM_CALL(spin_lock_mark_acq)
sl@0
   376
#define	SPIN_UNLOCK_ENTRY_CHECK()		__ASM_CALL(spin_unlock_entry_check)
sl@0
   377
sl@0
   378
#define	RWSPIN_RLOCK_ENTRY_CHECK()		__ASM_CALL(rwspin_rlock_entry_check)
sl@0
   379
#define	RWSPIN_RLOCK_MARK_ACQ()			__ASM_CALL(rwspin_rlock_mark_acq)
sl@0
   380
#define	RWSPIN_RUNLOCK_ENTRY_CHECK()	__ASM_CALL(rwspin_runlock_entry_check)
sl@0
   381
sl@0
   382
#define	RWSPIN_WLOCK_ENTRY_CHECK()		__ASM_CALL(rwspin_wlock_entry_check)
sl@0
   383
#define	RWSPIN_WLOCK_MARK_ACQ()			__ASM_CALL(rwspin_wlock_mark_acq)
sl@0
   384
#define	RWSPIN_WUNLOCK_ENTRY_CHECK()	__ASM_CALL(rwspin_wunlock_entry_check)
sl@0
   385
sl@0
   386
#else
sl@0
   387
#define	SPIN_LOCK_ENTRY_CHECK()
sl@0
   388
#define	SPIN_LOCK_MARK_ACQ()
sl@0
   389
#define	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   390
sl@0
   391
#define	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
   392
#define	RWSPIN_RLOCK_MARK_ACQ()
sl@0
   393
#define	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
   394
sl@0
   395
#define	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
   396
#define	RWSPIN_WLOCK_MARK_ACQ()
sl@0
   397
#define	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
   398
sl@0
   399
#endif
sl@0
   400
sl@0
   401
sl@0
   402
/******************************************************************************
sl@0
   403
 * Spin locks
sl@0
   404
 *
sl@0
   405
 * [this+0]		in count (byte)
sl@0
   406
 * [this+1]		out count (byte)
sl@0
   407
 * [this+6]		order (byte)
sl@0
   408
 * [this+7]		holding CPU (byte)
sl@0
   409
 ******************************************************************************/
sl@0
   410
sl@0
   411
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
   412
extern "C" __NAKED__ void spin_lock_entry_check()
sl@0
   413
	{
sl@0
   414
	/* R0 points to lock */
sl@0
   415
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
   416
	asm("mrs r12, cpsr ");
sl@0
   417
	__ASM_CLI();
sl@0
   418
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   419
	asm("cmp r1, #0 ");
sl@0
   420
	asm("beq slec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   421
	asm("ldrh r2, [r0, #6] ");				/* R2[8:15]=holding CPU, R2[0:7]=order */
sl@0
   422
	asm("tst r2, #0xE0 ");
sl@0
   423
	asm("bne slec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
   424
sl@0
   425
	/* check interrupts disabled, if interrupts/preemption is not disabled 
sl@0
   426
	there is a risk of same core deadlock occuring, hence this check and
sl@0
   427
	run-time assert to ensure code stays safe */
sl@0
   428
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   429
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
   430
	asm("beq slec_1 ");						/* Yes - OK */
sl@0
   431
	__ASM_CRASH();							/* No - die */
sl@0
   432
sl@0
   433
	asm("slec_preemption: ");
sl@0
   434
	asm("and r3, r2, #0xFF ");
sl@0
   435
	asm("cmp r3, #0xFF ");					/* check for EOrderNone */
sl@0
   436
	asm("beq slec_1 ");						/* EOrderNone - don't check interrupts or preemption */
sl@0
   437
	asm("and r3, r12, #0x1F ");
sl@0
   438
	asm("cmp r3, #0x13 ");					/* Make sure we're in mode_svc */
sl@0
   439
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   440
	asm("bne slec_preemption_die ");		/* If not, die */
sl@0
   441
	asm("cmp r3, #0 ");
sl@0
   442
	asm("bne slec_1 ");						/* Preemption disabled - OK */
sl@0
   443
	asm("slec_preemption_die: ");
sl@0
   444
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
   445
sl@0
   446
	asm("slec_1: ");
sl@0
   447
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   448
	asm("cmp r3, r2, lsr #8 ");				/* Test if held by current CPU */
sl@0
   449
	asm("bne slec_2 ");						/* Not already held by this CPU - OK */
sl@0
   450
	__ASM_CRASH();							/* Already held by this CPU - die */
sl@0
   451
sl@0
   452
	asm("slec_2: ");
sl@0
   453
	asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   454
	asm("ldr r1, [r1, #4] ");				/* r3=low word of iSpinLockOrderCheck, r1=high word */
sl@0
   455
	asm("cmp r3, #0 ");
sl@0
   456
	asm("addeq r2, r2, #0x20000000 ");		/* if low word zero, add 32 to LS1 index ... */
sl@0
   457
	asm("moveq r3, r1 ");					/* ... and r3=high word ... */
sl@0
   458
	asm("subs r1, r3, #1 ");				/* R1 = R3 with all bits up to and including LS1 flipped */
sl@0
   459
	asm("beq slec_ok ");					/* If all bits zero, no locks held so OK */
sl@0
   460
	asm("eor r3, r3, r1 ");					/* Clear all bits above LS1 */
sl@0
   461
	CLZ(1,3);								/* R1 = 31 - bit number of LS1 */
sl@0
   462
	asm("rsb r1, r1, #31 ");				/* R1 = bit number of LS1 */
sl@0
   463
	asm("add r1, r1, r2, lsr #24 ");		/* add 32 if we were looking at high word */
sl@0
   464
	asm("mov r2, r2, lsl #24 ");			/* this lock's order value into R2 high byte */
sl@0
   465
	asm("cmp r1, r2, asr #24 ");			/* compare current lowest order lock to sign-extended order value */
sl@0
   466
	asm("bgt slec_ok ");					/* if this lock's order < current lowest, OK */
sl@0
   467
	__ASM_CRASH();							/* otherwise die */
sl@0
   468
sl@0
   469
	asm("slec_ok: ");
sl@0
   470
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   471
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
   472
	__JUMP(,lr);
sl@0
   473
	}
sl@0
   474
sl@0
   475
extern "C" __NAKED__ void spin_lock_mark_acq()
sl@0
   476
	{
sl@0
   477
	/* R0 points to lock */
sl@0
   478
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
   479
	asm("mrs r12, cpsr ");
sl@0
   480
	__ASM_CLI();
sl@0
   481
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   482
	asm("cmp r1, #0 ");
sl@0
   483
	asm("beq slma_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   484
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   485
	asm("ldrb r2, [r0, #6] ");				/* R2 = lock order value */
sl@0
   486
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   487
	asm("strb r3, [r0, #7] ");				/* set byte 7 to holding CPU number */
sl@0
   488
	asm("cmp r2, #0x40 ");
sl@0
   489
	asm("bhs slma_ok ");					/* if EOrderNone, done */
sl@0
   490
	asm("cmp r2, #0x20 ");
sl@0
   491
	asm("addhs r1, r1, #4 ");
sl@0
   492
	asm("and r2, r2, #0x1f ");
sl@0
   493
	asm("mov r3, #1 ");
sl@0
   494
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to set */
sl@0
   495
	asm("ldr r2, [r1] ");
sl@0
   496
	asm("orr r2, r2, r3 ");
sl@0
   497
	asm("str r2, [r1] ");					/* set bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
   498
sl@0
   499
	asm("slma_ok: ");
sl@0
   500
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   501
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
   502
	__JUMP(,lr);
sl@0
   503
	}
sl@0
   504
sl@0
   505
extern "C" __NAKED__ void spin_unlock_entry_check()
sl@0
   506
	{
sl@0
   507
	/* R0 points to lock */
sl@0
   508
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
   509
	asm("mrs r12, cpsr ");
sl@0
   510
	__ASM_CLI();
sl@0
   511
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   512
	asm("cmp r1, #0 ");
sl@0
   513
	asm("beq suec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   514
	asm("ldrh r2, [r0, #6] ");				/* R2[8:15]=holding CPU, R2[0:7]=order */
sl@0
   515
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   516
	asm("eor r2, r2, r3, lsl #8 ");			/* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */
sl@0
   517
	asm("tst r2, #0xE0 ");
sl@0
   518
	asm("bne suec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
   519
sl@0
   520
	/* check interrupts disabled */
sl@0
   521
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   522
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
   523
	asm("beq suec_1 ");						/* Yes - OK */
sl@0
   524
	__ASM_CRASH();							/* No - die */
sl@0
   525
sl@0
   526
	asm("suec_preemption: ");
sl@0
   527
	asm("and r3, r2, #0xFF ");
sl@0
   528
	asm("cmp r3, #0xFF ");					/* check for EOrderNone */
sl@0
   529
	asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   530
	asm("beq suec_1 ");						/* EOrderNone - don't check interrupts or preemption */
sl@0
   531
	asm("cmp r3, #0 ");
sl@0
   532
	asm("bne suec_1 ");						/* Preemption disabled - OK */
sl@0
   533
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
   534
sl@0
   535
	asm("suec_1: ");
sl@0
   536
	asm("tst r2, #0xFF00 ");				/* Check if holding CPU ^ current CPU number == 0 */
sl@0
   537
	asm("beq suec_2 ");						/* Held by this CPU - OK */
sl@0
   538
	__ASM_CRASH();							/* Not held by this CPU - die */
sl@0
   539
sl@0
   540
	asm("suec_2: ");
sl@0
   541
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   542
	asm("mov r3, #0xFF ");
sl@0
   543
	asm("strb r3, [r0, #7] ");				/* reset holding CPU */
sl@0
   544
	asm("cmp r2, #0x40 ");
sl@0
   545
	asm("bhs suec_ok ");					/* if EOrderNone, done */
sl@0
   546
	asm("cmp r2, #0x20 ");
sl@0
   547
	asm("addhs r1, r1, #4 ");
sl@0
   548
	asm("and r2, r2, #0x1F ");
sl@0
   549
	asm("mov r3, #1 ");
sl@0
   550
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to clear */
sl@0
   551
	asm("ldr r2, [r1] ");
sl@0
   552
	asm("tst r2, r3 ");						/* test bit originally set */
sl@0
   553
	asm("bic r2, r2, r3 ");
sl@0
   554
	asm("str r2, [r1] ");					/* clear bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
   555
	asm("bne suec_ok ");					/* if originally set, OK */
sl@0
   556
	__ASM_CRASH();							/* if not, die - something must have got corrupted */
sl@0
   557
sl@0
   558
	asm("suec_ok: ");
sl@0
   559
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   560
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
   561
	__JUMP(,lr);
sl@0
   562
	}
sl@0
   563
#endif
sl@0
   564
sl@0
   565
sl@0
   566
/******************************************************************************
sl@0
   567
 * Plain old spin lock
sl@0
   568
 *
sl@0
   569
 * Fundamental algorithm:
sl@0
   570
 *	lock()		{ old_in = in++; while(out!=old_in) __chill(); }
sl@0
   571
 *	unlock()	{ ++out; }
sl@0
   572
 *
sl@0
   573
 * [this+0]		out count (byte)
sl@0
   574
 * [this+1]		in count (byte)
sl@0
   575
 *
sl@0
   576
 ******************************************************************************/
sl@0
   577
__NAKED__ EXPORT_C void TSpinLock::LockIrq()
sl@0
   578
	{
sl@0
   579
	__ASM_CLI();							/* Disable interrupts */
sl@0
   580
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   581
	asm("1: ");
sl@0
   582
	LDREXH(1,0);
sl@0
   583
	asm("mov r2, r1, lsr #8 ");				/* R2 = original in count */
sl@0
   584
	asm("add r1, r1, #0x100 ");
sl@0
   585
	STREXH(3,1,0);
sl@0
   586
	asm("cmp r3, #0 ");
sl@0
   587
	asm("bne 1b ");
sl@0
   588
	asm("and r1, r1, #0xFF ");				/* R1 = out count */
sl@0
   589
	asm("3: ");
sl@0
   590
	asm("cmp r2, r1 ");						/* out = original in ? */
sl@0
   591
	asm("bne 2f ");							/* no - must wait */
sl@0
   592
	SPIN_LOCK_MARK_ACQ()
sl@0
   593
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
   594
	__JUMP(,lr);
sl@0
   595
sl@0
   596
	asm("2: ");
sl@0
   597
	ARM_WFE;
sl@0
   598
	asm("ldrb r1, [r0, #0] ");				/* read out count again */
sl@0
   599
	asm("b 3b ");
sl@0
   600
	}
sl@0
   601
sl@0
   602
__NAKED__ EXPORT_C void TSpinLock::UnlockIrq()
sl@0
   603
	{
sl@0
   604
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   605
	__DATA_MEMORY_BARRIER_Z__(r1);			/* Ensure accesses don't move outside locked section */
sl@0
   606
	asm("ldrb r2, [r0, #0] ");
sl@0
   607
	asm("add r2, r2, #1 ");
sl@0
   608
	asm("strb r2, [r0, #0] ");				/* ++out */
sl@0
   609
	__DATA_SYNC_BARRIER__(r1);				/* Ensure write to out completes before SEV */
sl@0
   610
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
   611
	__ASM_STI();							/* Enable interrupts */
sl@0
   612
	__JUMP(,lr);
sl@0
   613
	}
sl@0
   614
sl@0
   615
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrq()
sl@0
   616
	{
sl@0
   617
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
   618
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
   619
	asm("ldrh r1, [r0, #0] ");
sl@0
   620
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
   621
	asm("sub r1, r1, r1, lsr #8 ");			/* r1 low byte = (out - in) mod 256 */
sl@0
   622
	asm("and r1, r1, #0xFF ");
sl@0
   623
	asm("cmp r1, #0xFF ");					/* if out - in = -1, no-one else waiting */
sl@0
   624
	asm("addeq r3, r3, #1 ");
sl@0
   625
	asm("cmpeq r3, #1024 ");				/* if no-one waiting for lock, check for pending interrupt */
sl@0
   626
	asm("bne 1f ");							/* branch if someone else waiting */
sl@0
   627
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
   628
	__JUMP(,lr);
sl@0
   629
sl@0
   630
	asm("1: ");
sl@0
   631
	asm("str lr, [sp, #-4]! ");
sl@0
   632
	asm("bl " CSM_ZN9TSpinLock9UnlockIrqEv);
sl@0
   633
	asm("bl " CSM_ZN9TSpinLock7LockIrqEv);
sl@0
   634
	asm("mov r0, #1 ");
sl@0
   635
	asm("ldr pc, [sp], #4 ");
sl@0
   636
	}
sl@0
   637
sl@0
   638
sl@0
   639
__NAKED__ EXPORT_C void TSpinLock::LockOnly()
sl@0
   640
	{
sl@0
   641
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   642
	asm("1: ");
sl@0
   643
	LDREXH(1,0);
sl@0
   644
	asm("mov r2, r1, lsr #8 ");				/* R2 = original in count */
sl@0
   645
	asm("add r1, r1, #0x100 ");
sl@0
   646
	STREXH(3,1,0);
sl@0
   647
	asm("cmp r3, #0 ");
sl@0
   648
	asm("bne 1b ");
sl@0
   649
	asm("and r1, r1, #0xFF ");				/* R1 = out count */
sl@0
   650
	asm("3: ");
sl@0
   651
	asm("cmp r2, r1 ");						/* out = original in ? */
sl@0
   652
	asm("bne 2f ");							/* no - must wait */
sl@0
   653
	SPIN_LOCK_MARK_ACQ()
sl@0
   654
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
   655
	__JUMP(,lr);
sl@0
   656
sl@0
   657
	asm("2: ");
sl@0
   658
	ARM_WFE;
sl@0
   659
	asm("ldrb r1, [r0, #0] ");				/* read out count again */
sl@0
   660
	asm("b 3b ");
sl@0
   661
	}
sl@0
   662
sl@0
   663
__NAKED__ EXPORT_C void TSpinLock::UnlockOnly()
sl@0
   664
	{
sl@0
   665
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   666
	__DATA_MEMORY_BARRIER_Z__(r1);			/* Ensure accesses don't move outside locked section */
sl@0
   667
	asm("ldrb r2, [r0, #0] ");
sl@0
   668
	asm("add r2, r2, #1 ");
sl@0
   669
	asm("strb r2, [r0, #0] ");				/* ++out */
sl@0
   670
	__DATA_SYNC_BARRIER__(r1);				/* Ensure write to out completes before SEV */
sl@0
   671
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
   672
	__JUMP(,lr);
sl@0
   673
	}
sl@0
   674
sl@0
   675
__NAKED__ EXPORT_C TBool TSpinLock::FlashOnly()
sl@0
   676
	{
sl@0
   677
	asm("ldrh r1, [r0, #0] ");
sl@0
   678
	asm("sub r1, r1, r1, lsr #8 ");			/* r1 low byte = (out - in) mod 256 */
sl@0
   679
	asm("and r1, r1, #0xFF ");
sl@0
   680
	asm("cmp r1, #0xFF ");					/* if out - in = -1, no-one else waiting */
sl@0
   681
	asm("bne 1f ");							/* branch if someone else waiting */
sl@0
   682
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
   683
	__JUMP(,lr);
sl@0
   684
sl@0
   685
	asm("1: ");
sl@0
   686
	asm("str lr, [sp, #-4]! ");
sl@0
   687
	asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv);
sl@0
   688
	asm("bl " CSM_ZN9TSpinLock8LockOnlyEv);
sl@0
   689
	asm("mov r0, #1 ");
sl@0
   690
	asm("ldr pc, [sp], #4 ");
sl@0
   691
	}
sl@0
   692
sl@0
   693
sl@0
   694
__NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave()
sl@0
   695
	{
sl@0
   696
	asm("mrs r12, cpsr ");
sl@0
   697
	__ASM_CLI();							/* Disable interrupts */
sl@0
   698
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   699
	asm("1: ");
sl@0
   700
	LDREXH(1,0);
sl@0
   701
	asm("mov r2, r1, lsr #8 ");				/* R2 = original in count */
sl@0
   702
	asm("add r1, r1, #0x100 ");
sl@0
   703
	STREXH(3,1,0);
sl@0
   704
	asm("cmp r3, #0 ");
sl@0
   705
	asm("bne 1b ");
sl@0
   706
	asm("and r1, r1, #0xFF ");				/* R1 = out count */
sl@0
   707
	asm("3: ");
sl@0
   708
	asm("cmp r2, r1 ");						/* out = original in ? */
sl@0
   709
	asm("bne 2f ");							/* no - must wait */
sl@0
   710
	SPIN_LOCK_MARK_ACQ()
sl@0
   711
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
   712
	asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
sl@0
   713
	__JUMP(,lr);
sl@0
   714
sl@0
   715
	asm("2: ");
sl@0
   716
	ARM_WFE;
sl@0
   717
	asm("ldrb r1, [r0, #0] ");				/* read out count again */
sl@0
   718
	asm("b 3b ");
sl@0
   719
	}
sl@0
   720
sl@0
   721
__NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt)
sl@0
   722
	{
sl@0
   723
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   724
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
   725
	asm("ldrb r2, [r0, #0] ");
sl@0
   726
	asm("mrs r12, cpsr ");
sl@0
   727
	asm("add r2, r2, #1 ");
sl@0
   728
	asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   729
	asm("strb r2, [r0, #0] ");				/* ++out */
sl@0
   730
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out completes before SEV */
sl@0
   731
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
   732
	asm("orr r1, r1, r12 ");
sl@0
   733
	asm("msr cpsr, r1 ");					/* restore interrupts */
sl@0
   734
	__JUMP(,lr);
sl@0
   735
	}
sl@0
   736
sl@0
   737
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt)
sl@0
   738
	{
sl@0
   739
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
   740
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
   741
	asm("ldrh r2, [r0, #0] ");
sl@0
   742
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
   743
	asm("sub r2, r2, r2, lsr #8 ");			/* r2 low byte = (out - in) mod 256 */
sl@0
   744
	asm("and r2, r2, #0xFF ");
sl@0
   745
	asm("cmp r2, #0xFF ");					/* if out - in = -1, no-one else waiting */
sl@0
   746
	asm("addeq r3, r3, #1 ");
sl@0
   747
	asm("cmpeq r3, #1024 ");				/* if no-one waiting for lock, check for pending interrupt */
sl@0
   748
	asm("bne 1f ");							/* branch if someone else waiting */
sl@0
   749
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
   750
	__JUMP(,lr);
sl@0
   751
sl@0
   752
	asm("1: ");
sl@0
   753
	asm("str lr, [sp, #-4]! ");
sl@0
   754
	asm("bl " CSM_ZN9TSpinLock16UnlockIrqRestoreEi);
sl@0
   755
	asm("bl " CSM_ZN9TSpinLock7LockIrqEv);
sl@0
   756
	asm("mov r0, #1 ");
sl@0
   757
	asm("ldr pc, [sp], #4 ");
sl@0
   758
	}
sl@0
   759
sl@0
   760
sl@0
   761
__NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt()
sl@0
   762
	{
sl@0
   763
	asm("ldrh r2, [r0, #0] ");
sl@0
   764
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
   765
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
   766
	asm("sub r2, r2, r2, lsr #8 ");			/* r2 low byte = (out - in) mod 256 */
sl@0
   767
	asm("and r2, r2, #0xFF ");
sl@0
   768
	asm("cmp r2, #0xFF ");					/* if out - in = -1, no-one else waiting */
sl@0
   769
	asm("cmpeq r3, #0 ");					/* if no-one else waiting, check if reschedule or IDFCs pending */
sl@0
   770
	asm("bne 1f ");							/* if so or someone else waiting, branch to release lock */
sl@0
   771
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
   772
	__JUMP(,lr);
sl@0
   773
sl@0
   774
	asm("1: ");
sl@0
   775
	asm("stmfd sp!, {r0,lr} ");
sl@0
   776
	asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv);
sl@0
   777
	asm("bl " CSM_ZN5NKern15PreemptionPointEv);
sl@0
   778
	asm("ldr r0, [sp], #4 ");
sl@0
   779
	asm("bl " CSM_ZN9TSpinLock8LockOnlyEv);
sl@0
   780
	asm("mov r0, #1 ");
sl@0
   781
	asm("ldr pc, [sp], #4 ");
sl@0
   782
	}
sl@0
   783
sl@0
   784
sl@0
   785
/******************************************************************************
sl@0
   786
 * Read/Write Spin lock
sl@0
   787
 *
sl@0
   788
 * Structure ( (in.r,in.w) , (out.r,out.w) )
sl@0
   789
 * Fundamental algorithm:
sl@0
   790
 *	lockr()		{ old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); }
sl@0
   791
 *	unlockr()	{ ++out.r; }
sl@0
   792
 *	lockw()		{ old_in = (in.r,in.w++); while(out!=old_in) __chill(); }
sl@0
   793
 *	unlockw()	{ ++out.w; }
sl@0
   794
 *
sl@0
   795
 * [this+0]		in.w
sl@0
   796
 * [this+1]		in.r
sl@0
   797
 * [this+2]		out.w
sl@0
   798
 * [this+3]		out.r
sl@0
   799
 * [this+4]		Bit mask of CPUs which hold read locks
sl@0
   800
 * [this+6]		order value
sl@0
   801
 * [this+7]		CPU number which holds write lock, 0xFF if none
sl@0
   802
 *
sl@0
   803
 ******************************************************************************/
sl@0
   804
sl@0
   805
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
   806
extern "C" __NAKED__ void rwspin_rlock_entry_check()
sl@0
   807
	{
sl@0
   808
	/* R0 points to lock */
sl@0
   809
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
   810
	asm("mrs r12, cpsr ");
sl@0
   811
	__ASM_CLI();							/* Disable interrupts */
sl@0
   812
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   813
	asm("cmp r1, #0 ");
sl@0
   814
	asm("beq rwrlec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   815
	asm("ldr r2, [r0, #4] ");				/* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */
sl@0
   816
	asm("tst r2, #0x00E00000 ");
sl@0
   817
	asm("bne rwrlec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
   818
sl@0
   819
	/* check interrupts disabled */
sl@0
   820
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   821
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
   822
	asm("beq rwrlec_1 ");					/* Yes - OK */
sl@0
   823
	__ASM_CRASH();							/* No - die */
sl@0
   824
sl@0
   825
	asm("rwrlec_preemption: ");
sl@0
   826
	asm("and r3, r2, #0x00FF0000 ");
sl@0
   827
	asm("cmp r3, #0x00FF0000 ");			/* check for EOrderNone */
sl@0
   828
	asm("beq rwrlec_1 ");					/* EOrderNone - don't check interrupts or preemption */
sl@0
   829
	asm("and r3, r12, #0x1F ");
sl@0
   830
	asm("cmp r3, #0x13 ");					/* Make sure we're in mode_svc */
sl@0
   831
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   832
	asm("bne rwrlec_preemption_die ");		/* If not, die */
sl@0
   833
	asm("cmp r3, #0 ");
sl@0
   834
	asm("bne rwrlec_1 ");					/* Preemption disabled - OK */
sl@0
   835
	asm("rwrlec_preemption_die: ");
sl@0
   836
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
   837
sl@0
   838
	asm("rwrlec_1: ");
sl@0
   839
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   840
	asm("eor r3, r2, r3, lsl #24 ");
sl@0
   841
	asm("cmp r3, #0x01000000 ");			/* Held by current CPU for write ? */
sl@0
   842
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   843
	asm("bhs rwrlec_2 ");					/* No - OK */
sl@0
   844
	__ASM_CRASH();							/* Already held by this CPU for write - die */
sl@0
   845
sl@0
   846
	asm("rwrlec_2: ");
sl@0
   847
	asm("tst r2, r3 ");						/* Held by current CPU for read ? */
sl@0
   848
	asm("beq rwrlec_3 ");					/* No - OK */
sl@0
   849
	__ASM_CRASH();							/* Already held by this CPU for read - die */
sl@0
   850
sl@0
   851
	asm("rwrlec_3: ");
sl@0
   852
	asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   853
	asm("mov r2, r2, lsr #16 ");
sl@0
   854
	asm("and r2, r2, #0xFF ");				/* r2 = lock order */
sl@0
   855
	asm("ldr r1, [r1, #4] ");				/* r3=low word of iSpinLockOrderCheck, r1=high word */
sl@0
   856
	asm("cmp r3, #0 ");
sl@0
   857
	asm("addeq r2, r2, #0x20000000 ");		/* if low word zero, add 32 to LS1 index ... */
sl@0
   858
	asm("moveq r3, r1 ");					/* ... and r3=high word ... */
sl@0
   859
	asm("subs r1, r3, #1 ");				/* R1 = R3 with all bits up to and including LS1 flipped */
sl@0
   860
	asm("beq rwrlec_ok ");					/* If all bits zero, no locks held so OK */
sl@0
   861
	asm("eor r3, r3, r1 ");					/* Clear all bits above LS1 */
sl@0
   862
	CLZ(1,3);								/* R1 = 31 - bit number of LS1 */
sl@0
   863
	asm("rsb r1, r1, #31 ");				/* R1 = bit number of LS1 */
sl@0
   864
	asm("add r1, r1, r2, lsr #24 ");		/* add 32 if we were looking at high word */
sl@0
   865
	asm("mov r2, r2, lsl #24 ");			/* this lock's order value into R2 high byte */
sl@0
   866
	asm("cmp r1, r2, asr #24 ");			/* compare current lowest order lock to sign-extended order value */
sl@0
   867
	asm("bgt rwrlec_ok ");					/* if this lock's order < current lowest, OK */
sl@0
   868
	__ASM_CRASH();							/* otherwise die */
sl@0
   869
sl@0
   870
	asm("rwrlec_ok: ");
sl@0
   871
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   872
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
   873
	__JUMP(,lr);
sl@0
   874
	}
sl@0
   875
sl@0
   876
extern "C" __NAKED__ void rwspin_rlock_mark_acq()
sl@0
   877
	{
sl@0
   878
	/* R0 points to lock */
sl@0
   879
	asm("stmfd sp!, {r1-r4,r12} ");
sl@0
   880
	asm("mrs r12, cpsr ");
sl@0
   881
	__ASM_CLI();							/* Disable interrupts */
sl@0
   882
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   883
	asm("cmp r1, #0 ");
sl@0
   884
	asm("beq rwrlma_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   885
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   886
	asm("add r0, r0, #4 ");
sl@0
   887
	asm("1: ");
sl@0
   888
	LDREXB(2,0);							/* rcpu mask */
sl@0
   889
	asm("orr r2, r2, r3 ");					/* set bit corresponding to current CPU */
sl@0
   890
	STREXB(4,2,0);
sl@0
   891
	asm("cmp r4, #0 ");
sl@0
   892
	asm("bne 1b ");
sl@0
   893
	asm("ldrb r2, [r0, #2] ");				/* R2 = lock order value */
sl@0
   894
	asm("sub r0, r0, #4 ");
sl@0
   895
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   896
	asm("cmp r2, #0x40 ");
sl@0
   897
	asm("bhs rwrlma_ok ");					/* if EOrderNone, done */
sl@0
   898
	asm("cmp r2, #0x20 ");
sl@0
   899
	asm("addhs r1, r1, #4 ");
sl@0
   900
	asm("and r2, r2, #0x1f ");
sl@0
   901
	asm("mov r3, #1 ");
sl@0
   902
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to set */
sl@0
   903
	asm("ldr r2, [r1] ");
sl@0
   904
	asm("orr r2, r2, r3 ");
sl@0
   905
	asm("str r2, [r1] ");					/* set bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
   906
sl@0
   907
	asm("rwrlma_ok: ");
sl@0
   908
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   909
	asm("ldmfd sp!, {r1-r4,r12} ");
sl@0
   910
	__JUMP(,lr);
sl@0
   911
	}
sl@0
   912
sl@0
   913
extern "C" __NAKED__ void rwspin_runlock_entry_check()
sl@0
   914
	{
sl@0
   915
	/* R0 points to lock */
sl@0
   916
	asm("stmfd sp!, {r1-r4,r12} ");
sl@0
   917
	asm("mrs r12, cpsr ");
sl@0
   918
	__ASM_CLI();							/* Disable interrupts */
sl@0
   919
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   920
	asm("cmp r1, #0 ");
sl@0
   921
	asm("beq rwruec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   922
	asm("ldr r2, [r0, #4] ");				/* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */
sl@0
   923
	asm("tst r2, #0x00E00000 ");
sl@0
   924
	asm("bne rwruec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
   925
sl@0
   926
	/* check interrupts disabled */
sl@0
   927
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   928
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
   929
	asm("beq rwruec_1 ");					/* Yes - OK */
sl@0
   930
	__ASM_CRASH();							/* No - die */
sl@0
   931
sl@0
   932
	asm("rwruec_preemption: ");
sl@0
   933
	asm("and r3, r2, #0x00FF0000 ");
sl@0
   934
	asm("cmp r3, #0x00FF0000 ");			/* check for EOrderNone */
sl@0
   935
	asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   936
	asm("beq rwruec_1 ");					/* EOrderNone - don't check interrupts or preemption */
sl@0
   937
	asm("cmp r3, #0 ");
sl@0
   938
	asm("bne rwruec_1 ");					/* Preemption disabled - OK */
sl@0
   939
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
   940
sl@0
   941
	asm("rwruec_1: ");
sl@0
   942
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   943
	asm("tst r2, r3 ");						/* Check if current CPU holds read lock */
sl@0
   944
	asm("bne rwruec_2 ");					/* Read lock held by this CPU - OK */
sl@0
   945
	__ASM_CRASH();							/* Not held by this CPU - die */
sl@0
   946
sl@0
   947
	asm("rwruec_2: ");
sl@0
   948
	asm("add r0, r0, #4 ");
sl@0
   949
	asm("1: ");
sl@0
   950
	LDREX(2,0);								/* rcpu mask */
sl@0
   951
	asm("bic r2, r2, r3 ");					/* clear bit corresponding to current CPU */
sl@0
   952
	STREX(4,2,0);
sl@0
   953
	asm("cmp r4, #0 ");
sl@0
   954
	asm("bne 1b ");
sl@0
   955
	asm("sub r0, r0, #4 ");
sl@0
   956
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   957
	asm("tst r2, #0x00C00000 ");
sl@0
   958
	asm("bne rwruec_ok ");					/* if EOrderNone, done */
sl@0
   959
	asm("tst r2, #0x00200000 ");
sl@0
   960
	asm("addne r1, r1, #4 ");
sl@0
   961
	asm("mov r2, r2, lsr #16 ");
sl@0
   962
	asm("and r2, r2, #0x1F ");
sl@0
   963
	asm("mov r3, #1 ");
sl@0
   964
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to clear */
sl@0
   965
	asm("ldr r2, [r1] ");
sl@0
   966
	asm("tst r2, r3 ");						/* test bit originally set */
sl@0
   967
	asm("bic r2, r2, r3 ");
sl@0
   968
	asm("str r2, [r1] ");					/* clear bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
   969
	asm("bne rwruec_ok ");					/* if originally set, OK */
sl@0
   970
	__ASM_CRASH();							/* if not, die - something must have got corrupted */
sl@0
   971
sl@0
   972
	asm("rwruec_ok: ");
sl@0
   973
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
   974
	asm("ldmfd sp!, {r1-r4,r12} ");
sl@0
   975
	__JUMP(,lr);
sl@0
   976
	}
sl@0
   977
sl@0
   978
sl@0
   979
extern "C" __NAKED__ void rwspin_wlock_entry_check()
sl@0
   980
	{
sl@0
   981
	/* R0 points to lock */
sl@0
   982
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
   983
	asm("mrs r12, cpsr ");
sl@0
   984
	__ASM_CLI();							/* Disable interrupts */
sl@0
   985
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
   986
	asm("cmp r1, #0 ");
sl@0
   987
	asm("beq rwwlec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
   988
	asm("ldr r2, [r0, #4] ");				/* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */
sl@0
   989
	asm("tst r2, #0x00E00000 ");
sl@0
   990
	asm("bne rwwlec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
   991
sl@0
   992
	/* check interrupts disabled */
sl@0
   993
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   994
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
   995
	asm("beq rwwlec_1 ");					/* Yes - OK */
sl@0
   996
	__ASM_CRASH();							/* No - die */
sl@0
   997
sl@0
   998
	asm("rwwlec_preemption: ");
sl@0
   999
	asm("and r3, r2, #0x00FF0000 ");
sl@0
  1000
	asm("cmp r3, #0x00FF0000 ");			/* check for EOrderNone */
sl@0
  1001
	asm("beq rwwlec_1 ");					/* EOrderNone - don't check interrupts or preemption */
sl@0
  1002
	asm("and r3, r12, #0x1F ");
sl@0
  1003
	asm("cmp r3, #0x13 ");					/* Make sure we're in mode_svc */
sl@0
  1004
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
  1005
	asm("bne rwwlec_preemption_die ");		/* If not, die */
sl@0
  1006
	asm("cmp r3, #0 ");
sl@0
  1007
	asm("bne rwwlec_1 ");					/* Preemption disabled - OK */
sl@0
  1008
	asm("rwwlec_preemption_die: ");
sl@0
  1009
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
  1010
sl@0
  1011
	asm("rwwlec_1: ");
sl@0
  1012
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
  1013
	asm("tst r2, r3 ");						/* Test if held by current CPU for read */
sl@0
  1014
	asm("beq rwwlec_2 ");					/* No - OK */
sl@0
  1015
	__ASM_CRASH();							/* Yes - die */
sl@0
  1016
sl@0
  1017
	asm("rwwlec_2: ");
sl@0
  1018
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
  1019
	asm("cmp r3, r2, lsr #24 ");			/* Test if held by current CPU for write */
sl@0
  1020
	asm("bne rwwlec_3 ");					/* No - OK */
sl@0
  1021
	__ASM_CRASH();							/* Yes - die */
sl@0
  1022
sl@0
  1023
	asm("rwwlec_3: ");
sl@0
  1024
	asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
  1025
	asm("mov r2, r2, lsr #16 ");
sl@0
  1026
	asm("and r2, r2, #0xFF ");				/* r2 = lock order */
sl@0
  1027
	asm("ldr r1, [r1, #4] ");				/* r3=low word of iSpinLockOrderCheck, r1=high word */
sl@0
  1028
	asm("cmp r3, #0 ");
sl@0
  1029
	asm("addeq r2, r2, #0x20000000 ");		/* if low word zero, add 32 to LS1 index ... */
sl@0
  1030
	asm("moveq r3, r1 ");					/* ... and r3=high word ... */
sl@0
  1031
	asm("subs r1, r3, #1 ");				/* R1 = R3 with all bits up to and including LS1 flipped */
sl@0
  1032
	asm("beq rwwlec_ok ");					/* If all bits zero, no locks held so OK */
sl@0
  1033
	asm("eor r3, r3, r1 ");					/* Clear all bits above LS1 */
sl@0
  1034
	CLZ(1,3);								/* R1 = 31 - bit number of LS1 */
sl@0
  1035
	asm("rsb r1, r1, #31 ");				/* R1 = bit number of LS1 */
sl@0
  1036
	asm("add r1, r1, r2, lsr #24 ");		/* add 32 if we were looking at high word */
sl@0
  1037
	asm("mov r2, r2, lsl #24 ");			/* this lock's order value into R2 high byte */
sl@0
  1038
	asm("cmp r1, r2, asr #24 ");			/* compare current lowest order lock to sign-extended order value */
sl@0
  1039
	asm("bgt rwwlec_ok ");					/* if this lock's order < current lowest, OK */
sl@0
  1040
	__ASM_CRASH();							/* otherwise die */
sl@0
  1041
sl@0
  1042
	asm("rwwlec_ok: ");
sl@0
  1043
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
  1044
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
  1045
	__JUMP(,lr);
sl@0
  1046
	}
sl@0
  1047
sl@0
  1048
extern "C" __NAKED__ void rwspin_wlock_mark_acq()
sl@0
  1049
	{
sl@0
  1050
	/* R0 points to lock */
sl@0
  1051
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
  1052
	asm("mrs r12, cpsr ");
sl@0
  1053
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1054
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
  1055
	asm("cmp r1, #0 ");
sl@0
  1056
	asm("beq rwwlma_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
  1057
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
  1058
	asm("ldrb r2, [r0, #6] ");				/* R2 = lock order value */
sl@0
  1059
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
  1060
	asm("strb r3, [r0, #7] ");				/* set byte 7 to holding CPU number */
sl@0
  1061
	asm("cmp r2, #0x40 ");
sl@0
  1062
	asm("bhs rwwlma_ok ");					/* if EOrderNone, done */
sl@0
  1063
	asm("cmp r2, #0x20 ");
sl@0
  1064
	asm("addhs r1, r1, #4 ");
sl@0
  1065
	asm("and r2, r2, #0x1f ");
sl@0
  1066
	asm("mov r3, #1 ");
sl@0
  1067
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to set */
sl@0
  1068
	asm("ldr r2, [r1] ");
sl@0
  1069
	asm("orr r2, r2, r3 ");
sl@0
  1070
	asm("str r2, [r1] ");					/* set bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
  1071
sl@0
  1072
	asm("rwwlma_ok: ");
sl@0
  1073
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
  1074
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
  1075
	__JUMP(,lr);
sl@0
  1076
	}
sl@0
  1077
sl@0
  1078
extern "C" __NAKED__ void rwspin_wunlock_entry_check()
sl@0
  1079
	{
sl@0
  1080
	/* R0 points to lock */
sl@0
  1081
	asm("stmfd sp!, {r1,r2,r3,r12} ");
sl@0
  1082
	asm("mrs r12, cpsr ");
sl@0
  1083
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1084
	GET_RWNO_TID(, r1);						/* R1->SubScheduler */
sl@0
  1085
	asm("cmp r1, #0 ");
sl@0
  1086
	asm("beq rwwuec_ok ");					/* Skip checks if subscheduler not yet initialised */
sl@0
  1087
	asm("ldrh r2, [r0, #6] ");				/* R2[8:15]=holding CPU, R2[0:7]=order */
sl@0
  1088
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
  1089
	asm("eor r2, r2, r3, lsl #8 ");			/* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */
sl@0
  1090
	asm("tst r2, #0xE0 ");
sl@0
  1091
	asm("bne rwwuec_preemption ");			/* This lock requires preemption to be disabled */
sl@0
  1092
sl@0
  1093
	/* check interrupts disabled */
sl@0
  1094
	asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
  1095
	asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask));		/* Check interrupts masked */
sl@0
  1096
	asm("beq rwwuec_1 ");					/* Yes - OK */
sl@0
  1097
	__ASM_CRASH();							/* No - die */
sl@0
  1098
sl@0
  1099
	asm("rwwuec_preemption: ");
sl@0
  1100
	asm("and r3, r2, #0xFF ");
sl@0
  1101
	asm("cmp r3, #0xFF ");					/* check for EOrderNone */
sl@0
  1102
	asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
  1103
	asm("beq rwwuec_1 ");					/* EOrderNone - don't check interrupts or preemption */
sl@0
  1104
	asm("cmp r3, #0 ");
sl@0
  1105
	asm("bne rwwuec_1 ");					/* Preemption disabled - OK */
sl@0
  1106
	__ASM_CRASH();							/* Preemption enabled - die */
sl@0
  1107
sl@0
  1108
	asm("rwwuec_1: ");
sl@0
  1109
	asm("tst r2, #0xFF00 ");				/* Check if holding CPU ^ current CPU number == 0 */
sl@0
  1110
	asm("beq rwwuec_2 ");					/* Held by this CPU - OK */
sl@0
  1111
	__ASM_CRASH();							/* Not held by this CPU - die */
sl@0
  1112
sl@0
  1113
	asm("rwwuec_2: ");
sl@0
  1114
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
  1115
	asm("mov r3, #0xFF ");
sl@0
  1116
	asm("strb r3, [r0, #7] ");				/* reset holding CPU */
sl@0
  1117
	asm("cmp r2, #0x40 ");
sl@0
  1118
	asm("bhs rwwuec_ok ");					/* if EOrderNone, done */
sl@0
  1119
	asm("cmp r2, #0x20 ");
sl@0
  1120
	asm("addhs r1, r1, #4 ");
sl@0
  1121
	asm("and r2, r2, #0x1F ");
sl@0
  1122
	asm("mov r3, #1 ");
sl@0
  1123
	asm("mov r3, r3, lsl r2 ");				/* r3 = bit to clear */
sl@0
  1124
	asm("ldr r2, [r1] ");
sl@0
  1125
	asm("tst r2, r3 ");						/* test bit originally set */
sl@0
  1126
	asm("bic r2, r2, r3 ");
sl@0
  1127
	asm("str r2, [r1] ");					/* clear bit in iSpinLockOrderCheck corresponding to lock order */
sl@0
  1128
	asm("bne rwwuec_ok ");					/* if originally set, OK */
sl@0
  1129
	__ASM_CRASH();							/* if not, die - something must have got corrupted */
sl@0
  1130
sl@0
  1131
	asm("rwwuec_ok: ");
sl@0
  1132
	asm("msr cpsr, r12 ");					/* restore interrupts */
sl@0
  1133
	asm("ldmfd sp!, {r1,r2,r3,r12} ");
sl@0
  1134
	__JUMP(,lr);
sl@0
  1135
	}
sl@0
  1136
#endif
sl@0
  1137
sl@0
  1138
sl@0
  1139
/*-----------------------------------------------------------------------------
sl@0
  1140
 - Read locks disabling IRQ
sl@0
  1141
 -----------------------------------------------------------------------------*/
sl@0
  1142
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqR()
sl@0
  1143
	{
sl@0
  1144
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1145
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
  1146
	asm("1: ");
sl@0
  1147
	LDREX(1,0);
sl@0
  1148
	asm("and r2, r1, #0xFF ");				/* R2 = original in.w */
sl@0
  1149
	asm("add r1, r1, #0x100 ");				/* increment in.r */
sl@0
  1150
	asm("tst r1, #0xFF00 ");				/* if wraparound ... */
sl@0
  1151
	asm("subeq r1, r1, #0x10000 ");			/* ... revert carry into out.w */
sl@0
  1152
	STREX(3,1,0);
sl@0
  1153
	asm("cmp r3, #0 ");
sl@0
  1154
	asm("bne 1b ");
sl@0
  1155
	asm("3: ");
sl@0
  1156
	asm("and r1, r1, #0xFF0000 ");			/* R1 = out.w << 16 */
sl@0
  1157
	asm("cmp r1, r2, lsl #16 ");			/* out.w = original in.w ? */
sl@0
  1158
	asm("bne 2f ");							/* no - must wait */
sl@0
  1159
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
  1160
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1161
	__JUMP(,lr);
sl@0
  1162
sl@0
  1163
	asm("2: ");
sl@0
  1164
	ARM_WFE;
sl@0
  1165
	asm("ldr r1, [r0, #0] ");				/* read out.w count again */
sl@0
  1166
	asm("b 3b ");
sl@0
  1167
	}
sl@0
  1168
sl@0
  1169
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR()
sl@0
  1170
	{
sl@0
  1171
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
  1172
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1173
	asm("1: ");
sl@0
  1174
	LDREX(2,0);
sl@0
  1175
	asm("add r2, r2, #0x01000000 ");		/* increment out.r */
sl@0
  1176
	STREX(3,2,0);
sl@0
  1177
	asm("cmp r3, #0 ");
sl@0
  1178
	asm("bne 1b ");
sl@0
  1179
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.r completes before SEV */
sl@0
  1180
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1181
	__ASM_STI();							/* Enable interrupts */
sl@0
  1182
	__JUMP(,lr);
sl@0
  1183
	}
sl@0
  1184
sl@0
  1185
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR()
sl@0
  1186
	{
sl@0
  1187
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1188
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
  1189
	asm("ldr r2, [r0, #0] ");
sl@0
  1190
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
  1191
	asm("eor r2, r2, r2, lsr #16 ");		/* r2 low byte = out.w ^ in.w = 0 if no writers waiting */
sl@0
  1192
	asm("tst r2, #0xFF ");
sl@0
  1193
	asm("addeq r3, r3, #1 ");
sl@0
  1194
	asm("cmpeq r3, #1024 ");				/* if no writers waiting for lock, check for pending interrupt */
sl@0
  1195
	asm("bne 1f ");							/* branch if writers waiting or pending interrupt */
sl@0
  1196
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1197
	__JUMP(,lr);
sl@0
  1198
sl@0
  1199
	asm("1: ");
sl@0
  1200
	asm("str lr, [sp, #-4]! ");
sl@0
  1201
	asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqREv);
sl@0
  1202
	asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv);
sl@0
  1203
	asm("mov r0, #1 ");
sl@0
  1204
	asm("ldr pc, [sp], #4 ");
sl@0
  1205
	}
sl@0
  1206
sl@0
  1207
sl@0
  1208
/*-----------------------------------------------------------------------------
sl@0
  1209
 - Write locks disabling IRQ
sl@0
  1210
 -----------------------------------------------------------------------------*/
sl@0
  1211
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqW()
sl@0
  1212
	{
sl@0
  1213
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1214
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
  1215
	asm("1: ");
sl@0
  1216
	LDREX(1,0);
sl@0
  1217
	asm("mov r2, r1, lsl #16 ");			/* R2 = original in << 16 */
sl@0
  1218
	asm("add r1, r1, #1 ");					/* increment in.w */
sl@0
  1219
	asm("tst r1, #0xFF ");					/* if wraparound ... */
sl@0
  1220
	asm("subeq r1, r1, #0x100 ");			/* ... revert carry into in.r */
sl@0
  1221
	STREX(3,1,0);
sl@0
  1222
	asm("cmp r3, #0 ");
sl@0
  1223
	asm("bne 1b ");
sl@0
  1224
	asm("3: ");
sl@0
  1225
	asm("mov r1, r1, lsr #16 ");			/* r1 = out */
sl@0
  1226
	asm("cmp r1, r2, lsr #16 ");			/* out = original in ? */
sl@0
  1227
	asm("bne 2f ");							/* no - must wait */
sl@0
  1228
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
  1229
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1230
	__JUMP(,lr);
sl@0
  1231
sl@0
  1232
	asm("2: ");
sl@0
  1233
	ARM_WFE;
sl@0
  1234
	asm("ldr r1, [r0, #0] ");				/* read out count again */
sl@0
  1235
	asm("b 3b ");
sl@0
  1236
	}
sl@0
  1237
sl@0
  1238
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW()
sl@0
  1239
	{
sl@0
  1240
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
  1241
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1242
	asm("ldrb r2, [r0, #2] ");
sl@0
  1243
	asm("add r2, r2, #1 ");
sl@0
  1244
	asm("strb r2, [r0, #2] ");				/* increment out.w */
sl@0
  1245
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.w completes before SEV */
sl@0
  1246
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1247
	__ASM_STI();							/* Enable interrupts */
sl@0
  1248
	__JUMP(,lr);
sl@0
  1249
	}
sl@0
  1250
sl@0
  1251
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW()
sl@0
  1252
	{
sl@0
  1253
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1254
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
  1255
	asm("ldr r2, [r0, #0] ");
sl@0
  1256
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
  1257
	asm("add r2, r2, #0x00010000 ");		/* increment out.w */
sl@0
  1258
	asm("tst r2, #0x00FF0000 ");			/* if wraparound, revert carry */
sl@0
  1259
	asm("subeq r2, r2, #0x01000000 ");
sl@0
  1260
	asm("eor r2, r2, r2, lsl #16 ");		/* test if (out.w+1,out.r) == (in.w,in.r) */
sl@0
  1261
	asm("cmp r2, #0x00010000 ");
sl@0
  1262
	asm("bhs 1f ");							/* if not, someone else is waiting */
sl@0
  1263
	asm("add r3, r3, #1 ");
sl@0
  1264
	asm("cmp r3, #1024 ");					/* if no-one waiting for lock, check for pending interrupt */
sl@0
  1265
	asm("bne 1f ");							/* branch if pending interrupt */
sl@0
  1266
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1267
	__JUMP(,lr);
sl@0
  1268
sl@0
  1269
	asm("1: ");
sl@0
  1270
	asm("str lr, [sp, #-4]! ");
sl@0
  1271
	asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqWEv);
sl@0
  1272
	asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv);
sl@0
  1273
	asm("mov r0, #1 ");
sl@0
  1274
	asm("ldr pc, [sp], #4 ");
sl@0
  1275
	}
sl@0
  1276
sl@0
  1277
sl@0
  1278
sl@0
  1279
/*-----------------------------------------------------------------------------
sl@0
  1280
 - Read locks leaving IRQ alone
sl@0
  1281
 -----------------------------------------------------------------------------*/
sl@0
  1282
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR()
sl@0
  1283
	{
sl@0
  1284
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
  1285
	asm("1: ");
sl@0
  1286
	LDREX(1,0);
sl@0
  1287
	asm("and r2, r1, #0xFF ");				/* R2 = original in.w */
sl@0
  1288
	asm("add r1, r1, #0x100 ");				/* increment in.r */
sl@0
  1289
	asm("tst r1, #0xFF00 ");				/* if wraparound ... */
sl@0
  1290
	asm("subeq r1, r1, #0x10000 ");			/* ... revert carry into out.w */
sl@0
  1291
	STREX(3,1,0);
sl@0
  1292
	asm("cmp r3, #0 ");
sl@0
  1293
	asm("bne 1b ");
sl@0
  1294
	asm("3: ");
sl@0
  1295
	asm("and r1, r1, #0xFF0000 ");			/* R1 = out.w << 16 */
sl@0
  1296
	asm("cmp r1, r2, lsl #16 ");			/* out.w = original in.w ? */
sl@0
  1297
	asm("bne 2f ");							/* no - must wait */
sl@0
  1298
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
  1299
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1300
	__JUMP(,lr);
sl@0
  1301
sl@0
  1302
	asm("2: ");
sl@0
  1303
	ARM_WFE;
sl@0
  1304
	asm("ldr r1, [r0, #0] ");				/* read out.w count again */
sl@0
  1305
	asm("b 3b ");
sl@0
  1306
	}
sl@0
  1307
sl@0
  1308
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR()
sl@0
  1309
	{
sl@0
  1310
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
  1311
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1312
	asm("1: ");
sl@0
  1313
	LDREX(2,0);
sl@0
  1314
	asm("add r2, r2, #0x01000000 ");		/* increment out.r */
sl@0
  1315
	STREX(3,2,0);
sl@0
  1316
	asm("cmp r3, #0 ");
sl@0
  1317
	asm("bne 1b ");
sl@0
  1318
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.r completes before SEV */
sl@0
  1319
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1320
	__JUMP(,lr);
sl@0
  1321
	}
sl@0
  1322
sl@0
  1323
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR()
sl@0
  1324
	{
sl@0
  1325
	asm("ldr r2, [r0, #0] ");
sl@0
  1326
	asm("eor r2, r2, r2, lsr #16 ");		/* r2 low byte = out.w ^ in.w = 0 if no writers waiting */
sl@0
  1327
	asm("tst r2, #0xFF ");
sl@0
  1328
	asm("bne 1f ");							/* branch if writers waiting */
sl@0
  1329
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1330
	__JUMP(,lr);
sl@0
  1331
sl@0
  1332
	asm("1: ");
sl@0
  1333
	asm("str lr, [sp, #-4]! ");
sl@0
  1334
	asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv);
sl@0
  1335
	asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv);
sl@0
  1336
	asm("mov r0, #1 ");
sl@0
  1337
	asm("ldr pc, [sp], #4 ");
sl@0
  1338
	}
sl@0
  1339
sl@0
  1340
sl@0
  1341
/*-----------------------------------------------------------------------------
sl@0
  1342
 - Write locks leaving IRQ alone
sl@0
  1343
 -----------------------------------------------------------------------------*/
sl@0
  1344
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW()
sl@0
  1345
	{
sl@0
  1346
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
  1347
	asm("1: ");
sl@0
  1348
	LDREX(1,0);
sl@0
  1349
	asm("mov r2, r1, lsl #16 ");			/* R2 = original in << 16 */
sl@0
  1350
	asm("add r1, r1, #1 ");					/* increment in.w */
sl@0
  1351
	asm("tst r1, #0xFF ");					/* if wraparound ... */
sl@0
  1352
	asm("subeq r1, r1, #0x100 ");			/* ... revert carry into in.r */
sl@0
  1353
	STREX(3,1,0);
sl@0
  1354
	asm("cmp r3, #0 ");
sl@0
  1355
	asm("bne 1b ");
sl@0
  1356
	asm("3: ");
sl@0
  1357
	asm("mov r1, r1, lsr #16 ");			/* r1 = out */
sl@0
  1358
	asm("cmp r1, r2, lsr #16 ");			/* out = original in ? */
sl@0
  1359
	asm("bne 2f ");							/* no - must wait */
sl@0
  1360
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
  1361
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1362
	__JUMP(,lr);
sl@0
  1363
sl@0
  1364
	asm("2: ");
sl@0
  1365
	ARM_WFE;
sl@0
  1366
	asm("ldr r1, [r0, #0] ");				/* read out count again */
sl@0
  1367
	asm("b 3b ");
sl@0
  1368
	}
sl@0
  1369
sl@0
  1370
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW()
sl@0
  1371
	{
sl@0
  1372
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
  1373
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1374
	asm("ldrb r2, [r0, #2] ");
sl@0
  1375
	asm("add r2, r2, #1 ");
sl@0
  1376
	asm("strb r2, [r0, #2] ");				/* increment out.w */
sl@0
  1377
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.w completes before SEV */
sl@0
  1378
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1379
	__JUMP(,lr);
sl@0
  1380
	}
sl@0
  1381
sl@0
  1382
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW()
sl@0
  1383
	{
sl@0
  1384
	asm("ldr r2, [r0, #0] ");
sl@0
  1385
	asm("add r2, r2, #0x00010000 ");		/* increment out.w */
sl@0
  1386
	asm("tst r2, #0x00FF0000 ");			/* if wraparound, revert carry */
sl@0
  1387
	asm("subeq r2, r2, #0x01000000 ");
sl@0
  1388
	asm("eor r2, r2, r2, lsl #16 ");		/* test if (out.w+1,out.r) == (in.w,in.r) */
sl@0
  1389
	asm("cmp r2, #0x00010000 ");
sl@0
  1390
	asm("bhs 1f ");							/* if not, someone else is waiting */
sl@0
  1391
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1392
	__JUMP(,lr);
sl@0
  1393
sl@0
  1394
	asm("1: ");
sl@0
  1395
	asm("str lr, [sp, #-4]! ");
sl@0
  1396
	asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv);
sl@0
  1397
	asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv);
sl@0
  1398
	asm("mov r0, #1 ");
sl@0
  1399
	asm("ldr pc, [sp], #4 ");
sl@0
  1400
	}
sl@0
  1401
sl@0
  1402
sl@0
  1403
sl@0
  1404
/*-----------------------------------------------------------------------------
sl@0
  1405
 - Read locks disabling IRQ with save/restore IRQ state
sl@0
  1406
 -----------------------------------------------------------------------------*/
sl@0
  1407
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR()
sl@0
  1408
	{
sl@0
  1409
	asm("mrs r12, cpsr ");
sl@0
  1410
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1411
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
  1412
	asm("1: ");
sl@0
  1413
	LDREX(1,0);
sl@0
  1414
	asm("and r2, r1, #0xFF ");				/* R2 = original in.w */
sl@0
  1415
	asm("add r1, r1, #0x100 ");				/* increment in.r */
sl@0
  1416
	asm("tst r1, #0xFF00 ");				/* if wraparound ... */
sl@0
  1417
	asm("subeq r1, r1, #0x10000 ");			/* ... revert carry into out.w */
sl@0
  1418
	STREX(3,1,0);
sl@0
  1419
	asm("cmp r3, #0 ");
sl@0
  1420
	asm("bne 1b ");
sl@0
  1421
	asm("3: ");
sl@0
  1422
	asm("and r1, r1, #0xFF0000 ");			/* R1 = out.w << 16 */
sl@0
  1423
	asm("cmp r1, r2, lsl #16 ");			/* out.w = original in.w ? */
sl@0
  1424
	asm("bne 2f ");							/* no - must wait */
sl@0
  1425
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
  1426
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1427
	asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
sl@0
  1428
	__JUMP(,lr);
sl@0
  1429
sl@0
  1430
	asm("2: ");
sl@0
  1431
	ARM_WFE;
sl@0
  1432
	asm("ldr r1, [r0, #0] ");				/* read out.w count again */
sl@0
  1433
	asm("b 3b ");
sl@0
  1434
	}
sl@0
  1435
sl@0
  1436
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt)
sl@0
  1437
	{
sl@0
  1438
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
  1439
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1440
	asm("1: ");
sl@0
  1441
	LDREX(2,0);
sl@0
  1442
	asm("add r2, r2, #0x01000000 ");		/* increment out.r */
sl@0
  1443
	STREX(3,2,0);
sl@0
  1444
	asm("cmp r3, #0 ");
sl@0
  1445
	asm("bne 1b ");
sl@0
  1446
	asm("mrs r12, cpsr ");
sl@0
  1447
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.r completes before SEV */
sl@0
  1448
	asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
  1449
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1450
	asm("orr r1, r1, r12 ");
sl@0
  1451
	asm("msr cpsr, r1 ");					/* restore interrupts */
sl@0
  1452
	__JUMP(,lr);
sl@0
  1453
	}
sl@0
  1454
sl@0
  1455
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt)
sl@0
  1456
	{
sl@0
  1457
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1458
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
  1459
	asm("ldr r2, [r0, #0] ");
sl@0
  1460
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
  1461
	asm("eor r2, r2, r2, lsr #16 ");		/* r2 low byte = out.w ^ in.w = 0 if no writers waiting */
sl@0
  1462
	asm("tst r2, #0xFF ");
sl@0
  1463
	asm("addeq r3, r3, #1 ");
sl@0
  1464
	asm("cmpeq r3, #1024 ");				/* if no writers waiting for lock, check for pending interrupt */
sl@0
  1465
	asm("bne 1f ");							/* branch if writers waiting or pending interrupt */
sl@0
  1466
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1467
	__JUMP(,lr);
sl@0
  1468
sl@0
  1469
	asm("1: ");
sl@0
  1470
	asm("str lr, [sp, #-4]! ");
sl@0
  1471
	asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreREi);
sl@0
  1472
	asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv);
sl@0
  1473
	asm("mov r0, #1 ");
sl@0
  1474
	asm("ldr pc, [sp], #4 ");
sl@0
  1475
	}
sl@0
  1476
sl@0
  1477
sl@0
  1478
/*-----------------------------------------------------------------------------
sl@0
  1479
 - Write locks disabling IRQ with save/restore IRQ state
sl@0
  1480
 -----------------------------------------------------------------------------*/
sl@0
  1481
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW()
sl@0
  1482
	{
sl@0
  1483
	asm("mrs r12, cpsr ");
sl@0
  1484
	__ASM_CLI();							/* Disable interrupts */
sl@0
  1485
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
  1486
	asm("1: ");
sl@0
  1487
	LDREX(1,0);
sl@0
  1488
	asm("mov r2, r1, lsl #16 ");			/* R2 = original in << 16 */
sl@0
  1489
	asm("add r1, r1, #1 ");					/* increment in.w */
sl@0
  1490
	asm("tst r1, #0xFF ");					/* if wraparound ... */
sl@0
  1491
	asm("subeq r1, r1, #0x100 ");			/* ... revert carry into in.r */
sl@0
  1492
	STREX(3,1,0);
sl@0
  1493
	asm("cmp r3, #0 ");
sl@0
  1494
	asm("bne 1b ");
sl@0
  1495
	asm("3: ");
sl@0
  1496
	asm("mov r1, r1, lsr #16 ");			/* r1 = out */
sl@0
  1497
	asm("cmp r1, r2, lsr #16 ");			/* out = original in ? */
sl@0
  1498
	asm("bne 2f ");							/* no - must wait */
sl@0
  1499
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
  1500
	__DATA_MEMORY_BARRIER__(r3);			/* we have got the lock */
sl@0
  1501
	asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
sl@0
  1502
	__JUMP(,lr);
sl@0
  1503
sl@0
  1504
	asm("2: ");
sl@0
  1505
	ARM_WFE;
sl@0
  1506
	asm("ldr r1, [r0, #0] ");				/* read out count again */
sl@0
  1507
	asm("b 3b ");
sl@0
  1508
	}
sl@0
  1509
sl@0
  1510
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt)
sl@0
  1511
	{
sl@0
  1512
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
  1513
	__DATA_MEMORY_BARRIER_Z__(r3);			/* Ensure accesses don't move outside locked section */
sl@0
  1514
	asm("ldrb r2, [r0, #2] ");
sl@0
  1515
	asm("mrs r12, cpsr ");
sl@0
  1516
	asm("add r2, r2, #1 ");
sl@0
  1517
	asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
  1518
	asm("strb r2, [r0, #2] ");				/* increment out.w */
sl@0
  1519
	__DATA_SYNC_BARRIER__(r3);				/* Ensure write to out.w completes before SEV */
sl@0
  1520
	ARM_SEV;								/* Wake up any waiting processors */
sl@0
  1521
	asm("orr r1, r1, r12 ");
sl@0
  1522
	asm("msr cpsr, r1 ");					/* restore interrupts */
sl@0
  1523
	__JUMP(,lr);
sl@0
  1524
	}
sl@0
  1525
sl@0
  1526
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt)
sl@0
  1527
	{
sl@0
  1528
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1529
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr));
sl@0
  1530
	asm("ldr r2, [r0, #0] ");
sl@0
  1531
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending));
sl@0
  1532
	asm("add r2, r2, #0x00010000 ");		/* increment out.w */
sl@0
  1533
	asm("tst r2, #0x00FF0000 ");			/* if wraparound, revert carry */
sl@0
  1534
	asm("subeq r2, r2, #0x01000000 ");
sl@0
  1535
	asm("eor r2, r2, r2, lsl #16 ");		/* test if (out.w+1,out.r) == (in.w,in.r) */
sl@0
  1536
	asm("cmp r2, #0x00010000 ");
sl@0
  1537
	asm("bhs 1f ");							/* if not, someone else is waiting */
sl@0
  1538
	asm("add r3, r3, #1 ");
sl@0
  1539
	asm("cmp r3, #1024 ");					/* if no-one else waiting for lock, check for pending interrupt */
sl@0
  1540
	asm("bne 1f ");							/* branch if pending interrupt */
sl@0
  1541
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1542
	__JUMP(,lr);
sl@0
  1543
sl@0
  1544
	asm("1: ");
sl@0
  1545
	asm("str lr, [sp, #-4]! ");
sl@0
  1546
	asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreWEi);
sl@0
  1547
	asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv);
sl@0
  1548
	asm("mov r0, #1 ");
sl@0
  1549
	asm("ldr pc, [sp], #4 ");
sl@0
  1550
	}
sl@0
  1551
sl@0
  1552
sl@0
  1553
/*-----------------------------------------------------------------------------
sl@0
  1554
 - Read lock flash allowing preemption
sl@0
  1555
 -----------------------------------------------------------------------------*/
sl@0
  1556
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR()
sl@0
  1557
	{
sl@0
  1558
	asm("ldr r2, [r0, #0] ");
sl@0
  1559
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1560
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
  1561
	asm("eor r2, r2, r2, lsr #16 ");		/* r2 low byte = out.w ^ in.w = 0 if no writers waiting */
sl@0
  1562
	asm("tst r2, #0xFF ");
sl@0
  1563
	asm("cmpeq r3, #0 ");					/* if no writers waiting, check if reschedule or IDFCs pending */
sl@0
  1564
	asm("bne 1f ");							/* branch if so or if writers waiting */
sl@0
  1565
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1566
	__JUMP(,lr);
sl@0
  1567
sl@0
  1568
	asm("1: ");
sl@0
  1569
	asm("stmfd sp!, {r0,lr} ");
sl@0
  1570
	asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv);
sl@0
  1571
	asm("bl " CSM_ZN5NKern15PreemptionPointEv);
sl@0
  1572
	asm("ldr r0, [sp], #4 ");
sl@0
  1573
	asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv);
sl@0
  1574
	asm("mov r0, #1 ");
sl@0
  1575
	asm("ldr pc, [sp], #4 ");
sl@0
  1576
	}
sl@0
  1577
sl@0
  1578
sl@0
  1579
/*-----------------------------------------------------------------------------
sl@0
  1580
 - Write lock flash allowing preemption
sl@0
  1581
 -----------------------------------------------------------------------------*/
sl@0
  1582
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW()
sl@0
  1583
	{
sl@0
  1584
	asm("ldr r2, [r0, #0] ");
sl@0
  1585
	GET_RWNO_TID(,r12);						/* r12 -> TSubScheduler */
sl@0
  1586
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
  1587
	asm("add r2, r2, #0x00010000 ");		/* increment out.w */
sl@0
  1588
	asm("tst r2, #0x00FF0000 ");			/* if wraparound, revert carry */
sl@0
  1589
	asm("subeq r2, r2, #0x01000000 ");
sl@0
  1590
	asm("eor r2, r2, r2, lsl #16 ");		/* test if (out.w+1,out.r) == (in.w,in.r) */
sl@0
  1591
	asm("cmp r2, #0x00010000 ");
sl@0
  1592
	asm("bhs 1f ");							/* if not, someone else is waiting */
sl@0
  1593
	asm("cmp r3, #0 ");						/* no-one else waiting, check if reschedule or IDFCs pending */
sl@0
  1594
	asm("bne 1f ");							/* if so, branch to release lock */
sl@0
  1595
	asm("mov r0, #0 ");						/* else return FALSE */
sl@0
  1596
	__JUMP(,lr);
sl@0
  1597
sl@0
  1598
	asm("1: ");
sl@0
  1599
	asm("stmfd sp!, {r0,lr} ");
sl@0
  1600
	asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv);
sl@0
  1601
	asm("bl " CSM_ZN5NKern15PreemptionPointEv);
sl@0
  1602
	asm("ldr r0, [sp], #4 ");
sl@0
  1603
	asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv);
sl@0
  1604
	asm("mov r0, #1 ");
sl@0
  1605
	asm("ldr pc, [sp], #4 ");
sl@0
  1606
	}
sl@0
  1607