os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncthrd.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200 (2014-06-10)
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\arm\ncthrd.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    19
sl@0
    20
#include <arm.h>
sl@0
    21
#include <e32cia.h>
sl@0
    22
sl@0
    23
#undef	iDfcState
sl@0
    24
#define	iDfcState		i8816.iHState16
sl@0
    25
sl@0
    26
extern "C" void send_accumulated_resched_ipis();
sl@0
    27
sl@0
    28
/******************************************************************************
sl@0
    29
 * Thread
sl@0
    30
 ******************************************************************************/
sl@0
    31
extern "C" __NAKED__ void __StartThread()
sl@0
    32
	{
sl@0
    33
	// On entry:
sl@0
    34
	//		R0->TSubScheduler, R1=0, R2=1, R3->current thread
sl@0
    35
	//		R12=resched IPIs
sl@0
    36
	// Interrupts disabled
sl@0
    37
sl@0
    38
	// need to send any outstanding reschedule IPIs
sl@0
    39
	asm("cmp	r12, #0 ");
sl@0
    40
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
    41
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
    42
	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack, iCPSR));
sl@0
    43
	asm("tst	r0, #0x0f ");
sl@0
    44
	asm("bne	2f ");
sl@0
    45
	USER_MEMORY_GUARD_OFF(,r0,r0);
sl@0
    46
	asm("2:		");
sl@0
    47
#endif
sl@0
    48
	asm("ldmia	sp, {r0-r14}^ ");			// load initial values for R0-R12, R13_usr, R14_usr
sl@0
    49
	asm("nop	");							// don't access banked register immediately after
sl@0
    50
	asm("add	sp, sp, #64 ");				// point to saved PC, CPSR (skip iExcCode)
sl@0
    51
	asm("adr	lr, 1f ");					// set lr_svc in case thread returns
sl@0
    52
	RFEIAW(13);								// restore PC and CPSR - jump to thread entry point
sl@0
    53
sl@0
    54
	asm("1:		");
sl@0
    55
	asm("b "	CSM_ZN5NKern4ExitEv);		// if control returns, call NKern::Exit()
sl@0
    56
	}
sl@0
    57
sl@0
    58
sl@0
    59
extern "C" __NAKED__ TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/)
sl@0
    60
	{
sl@0
    61
	asm("adr	r1, 9f ");
sl@0
    62
	asm("mov	r3, r0 ");
sl@0
    63
	asm("mvn	r0, #0 ");
sl@0
    64
	asm("1:		");
sl@0
    65
	asm("ldr	r2, [r1], #4 ");
sl@0
    66
	asm("add	r0, r0, #1 ");
sl@0
    67
	asm("cmp	r2, r3 ");
sl@0
    68
	asm("beq	2f ");
sl@0
    69
	asm("cmp	r2, #0 ");
sl@0
    70
	asm("bne	1b ");
sl@0
    71
	asm("mvn	r0, #0 ");
sl@0
    72
	asm("2:		");
sl@0
    73
	__JUMP(,	lr);
sl@0
    74
sl@0
    75
	asm("9:		");
sl@0
    76
	asm(".word " CSM_CFUNC(__StartThread));
sl@0
    77
	asm(".word	nkern_unlock_resched_return ");
sl@0
    78
	asm(".word	nkern_preemption_point_resched_return ");
sl@0
    79
	asm(".word	nkern_wfar_resched_return ");
sl@0
    80
	asm(".word	irq_resched_return ");
sl@0
    81
	asm(".word	exec_wfar_resched_return ");
sl@0
    82
	asm(".word	0 ");
sl@0
    83
	}
sl@0
    84
sl@0
    85
sl@0
    86
/**	Mark the beginning of an event handler tied to a thread or thread group
sl@0
    87
sl@0
    88
	Return the number of the CPU on which the event handler should run
sl@0
    89
*/
sl@0
    90
__NAKED__ TInt NSchedulable::BeginTiedEvent()
sl@0
    91
	{
sl@0
    92
	asm("add r1, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
    93
	asm("1: ");
sl@0
    94
	LDREX(0,1);						// r0 = original value of iEventState
sl@0
    95
	asm("add r2, r0, #%a0" : : "i" ((TInt)EEventCountInc));
sl@0
    96
	STREX(3,2,1);
sl@0
    97
	asm("cmp r3, #0 ");
sl@0
    98
	asm("bne 1b ");
sl@0
    99
	__DATA_MEMORY_BARRIER__(r3);
sl@0
   100
	asm("tst r0, #%a0" : : "i" ((TInt)EEventParent));
sl@0
   101
	asm("ldrne r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   102
	asm("beq bte0 ");				// EEventParent not set so don't look at group
sl@0
   103
	asm("cmp r2, #0 ");
sl@0
   104
	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   105
	asm("beq bte_bad ");
sl@0
   106
	asm("cmp r2, r1 ");
sl@0
   107
	asm("beq bte2 ");				// parent not yet updated, use iNewParent
sl@0
   108
	asm("bte1: ");
sl@0
   109
	LDREX(0,2);						// r0 = original value of iEventState
sl@0
   110
	asm("add r3, r0, #%a0" : : "i" ((TInt)EEventCountInc));
sl@0
   111
	STREX(12,3,2);
sl@0
   112
	asm("cmp r12, #0 ");
sl@0
   113
	asm("bne 1b ");
sl@0
   114
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   115
	asm("bte0: ");
sl@0
   116
	asm("and r0, r0, #%a0" : : "i" ((TInt)EEventCpuMask));
sl@0
   117
	__JUMP(,lr);					// return event CPU
sl@0
   118
sl@0
   119
	asm("bte2: ");
sl@0
   120
	__DATA_MEMORY_BARRIER__(r3);	// make sure iNewParent is read after iParent
sl@0
   121
	asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   122
	asm("cmp r2, #0 ");
sl@0
   123
	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   124
	asm("bne bte1 ");				// iNewParent set so OK
sl@0
   125
	__DATA_MEMORY_BARRIER__(r3);	// make sure iParent is read after iNewParent
sl@0
   126
	asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   127
	asm("cmp r2, #0 ");
sl@0
   128
	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   129
	asm("cmp r2, r1 ");
sl@0
   130
	asm("bne bte1 ");				// iParent now set so OK, otherwise something is wrong
sl@0
   131
sl@0
   132
	asm("bte_bad: ");
sl@0
   133
	__ASM_CRASH();
sl@0
   134
	}
sl@0
   135
sl@0
   136
sl@0
   137
/**	Mark the end of an event handler tied to a thread or thread group
sl@0
   138
sl@0
   139
*/
sl@0
   140
__NAKED__ void NSchedulable::EndTiedEvent()
sl@0
   141
	{
sl@0
   142
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   143
	asm("ldr r1, [r0, #%a0]!" : : "i" _FOFF(NSchedulable, iEventState));
sl@0
   144
	asm("tst r1, #%a0" : : "i" ((TInt)EEventParent));
sl@0
   145
	asm("bne etep0 ");				// branch out if EEventParent set
sl@0
   146
sl@0
   147
	// r0->NSchedulable::iEventState
sl@0
   148
	asm("ete1: ");
sl@0
   149
	LDREX(1,0);
sl@0
   150
	asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc));	// decrement event count
sl@0
   151
	asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc));		// check if now zero
sl@0
   152
	asm("biccc r1, r1, #0xFF ");	// if so, mask event CPU ...
sl@0
   153
	asm("andcc r2, r1, #0x1F00 ");	// ... and r2 = thread CPU << 8 ...
sl@0
   154
	asm("orrcc r1, r1, r2, lsr #8 ");	// ... and event CPU = thread CPU
sl@0
   155
	STREX(12,1,0);
sl@0
   156
	asm("teq r12, #0 ");			// test for success, leave carry alone
sl@0
   157
	asm("bne ete1 ");				// retry if STREX failed
sl@0
   158
	asm("bcs ete2 ");				// if not last tied event, finish
sl@0
   159
	asm("tst r1, #%a0" : : "i" ((TInt)EDeferredReady));
sl@0
   160
	asm("addne r0, r0, #%a0" : : "i" (_FOFF(NSchedulable,i_IDfcMem) - _FOFF(NSchedulable,iEventState)));
sl@0
   161
	asm("bne " CSM_ZN4TDfc3AddEv );	// if deferred ready, add IDFC to action it
sl@0
   162
	asm("ete2: ");					// ready not deferred so finish
sl@0
   163
	__JUMP(,lr);
sl@0
   164
sl@0
   165
	asm("etep0: ");
sl@0
   166
	__DATA_MEMORY_BARRIER__(r12);	// make sure iParent is read after seeing parent flag set
sl@0
   167
	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   168
	asm("cmp r3, #0 ");
sl@0
   169
	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   170
	asm("beq ete_bad ");			// no parent - shouldn't happen
sl@0
   171
	asm("cmp r3, r0 ");				// parent == this ?
sl@0
   172
	asm("beq etep1 ");				// if so, parent not yet updated so use iNewParent
sl@0
   173
sl@0
   174
	asm("etep2: ");
sl@0
   175
	asm("stmfd sp!, {r0,lr} ");		// save this and return address
sl@0
   176
	asm("mov r0, r3 ");				// operate on parent
sl@0
   177
	asm("bl ete1 ");				// update parent state
sl@0
   178
	asm("ldmfd sp!, {r0,lr} ");
sl@0
   179
	asm("1: ");
sl@0
   180
	LDREX(1,0);
sl@0
   181
	asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc));	// decrement event count
sl@0
   182
	STREX(12,1,0);
sl@0
   183
	asm("cmp r12, #0 ");
sl@0
   184
	asm("bne 1b ");
sl@0
   185
	__JUMP(,lr);
sl@0
   186
sl@0
   187
	asm("etep1: ");
sl@0
   188
	__DATA_MEMORY_BARRIER__(r12);	// make sure iNewParent is read after iParent
sl@0
   189
	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   190
	asm("cmp r3, #0 ");
sl@0
   191
	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   192
	asm("bne etep2 ");				// iNewParent set so OK
sl@0
   193
	__DATA_MEMORY_BARRIER__(r12);	// make sure iParent is read after iNewParent
sl@0
   194
	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
sl@0
   195
	asm("cmp r3, #0 ");
sl@0
   196
	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   197
	asm("cmp r3, r0 ");
sl@0
   198
	asm("bne etep2 ");				// iParent now set so OK, otherwise something is wrong
sl@0
   199
sl@0
   200
	asm("ete_bad: ");
sl@0
   201
	__ASM_CRASH();
sl@0
   202
	}
sl@0
   203
sl@0
   204
sl@0
   205
/**	Check for concurrent tied events when a thread/group becomes ready
sl@0
   206
sl@0
   207
	This is only ever called on a lone thread or a group, not on a thread
sl@0
   208
	which is part of a group.
sl@0
   209
sl@0
   210
	Update the thread CPU field in iEventState
sl@0
   211
	If thread CPU != event CPU and event count nonzero, atomically
sl@0
   212
	set the ready deferred flag and return TRUE, else return FALSE.
sl@0
   213
	If event count zero, set event CPU = thread CPU atomically.
sl@0
   214
sl@0
   215
	@param aCpu the CPU on which the thread/group is to become ready
sl@0
   216
	@return	TRUE if the ready must be deferred.
sl@0
   217
*/
sl@0
   218
__NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt /*aCpu*/)
sl@0
   219
	{
sl@0
   220
	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   221
	asm("1: ");
sl@0
   222
	LDREX(2,0);						// r2 = original iEventState
sl@0
   223
	asm("bic r3, r2, #0x1F00 ");	// r3 = original iEventState with thread CPU zeroed out
sl@0
   224
	asm("orr r3, r3, r1, lsl #8 ");	// set thread CPU field = aCpu
sl@0
   225
	asm("cmp r3, #%a0" : : "i" ((TInt)EEventCountInc));
sl@0
   226
	asm("bhs 2f ");					// branch if event count nonzero
sl@0
   227
	asm("bic r3, r3, #0xFF ");		// else mask event CPU ...
sl@0
   228
	asm("orr r3, r3, r1 ");			// ... and set event CPU = thread CPU = aCpu
sl@0
   229
	asm("3: ");
sl@0
   230
	STREX(12,3,0);
sl@0
   231
	asm("teq r12, #0 ");
sl@0
   232
	asm("bne 1b ");
sl@0
   233
	asm("eor r0, r2, r3 ");			// r0 = old event state ^ new event state
sl@0
   234
	asm("and r0, r0, #%a0" : : "i" ((TInt)EDeferredReady));
sl@0
   235
	__JUMP(,lr);					// return TRUE if we just set EDeferredReady
sl@0
   236
sl@0
   237
	// event count is nonzero
sl@0
   238
	asm("2: ");
sl@0
   239
	asm("eor r12, r3, r3, lsr #8 ");	// r12 bottom 5 bits = thread CPU ^ event CPU
sl@0
   240
	asm("tst r12, #0x1F ");				// thread CPU == event CPU?
sl@0
   241
	asm("orrne r3, r3, #%a0" : : "i" ((TInt)EDeferredReady));	// if not, set EDeferredReady
sl@0
   242
	asm("b 3b ");
sl@0
   243
	}
sl@0
   244
sl@0
   245
sl@0
   246
/**	Check for concurrent tied events when a thread leaves a group
sl@0
   247
sl@0
   248
	If event count zero, atomically	set the event and thread CPUs to the
sl@0
   249
	current CPU, clear the parent flag and return TRUE, else return FALSE.
sl@0
   250
sl@0
   251
	@return	TRUE if the parent flag has been cleared
sl@0
   252
	@pre	Preemption disabled
sl@0
   253
*/
sl@0
   254
__NAKED__ TBool NThreadBase::TiedEventLeaveInterlock()
sl@0
   255
	{
sl@0
   256
	GET_RWNO_TID(, r1);					// R1->SubScheduler
sl@0
   257
	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   258
	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   259
	asm("orr r1, r1, r1, lsl #8 ");		// event CPU = thread CPU = this CPU, EDeferredReady, EEventParent clear
sl@0
   260
	asm("1: ");
sl@0
   261
	LDREX(2,0);
sl@0
   262
	asm("cmp r2, #%a0" : : "i" ((TInt)EEventCountInc));		// check if event count zero
sl@0
   263
	asm("bhs 0f ");						// if not, finish and return FALSE
sl@0
   264
	STREX(3,1,0);						// else update CPUs and clear parent flag
sl@0
   265
								// NOTE: Deferred ready flag must have been clear since thread is running
sl@0
   266
	asm("cmp r3, #0 ");
sl@0
   267
	asm("bne 1b ");
sl@0
   268
	__JUMP(,lr);				// return TRUE (assumes this!=0)
sl@0
   269
	asm("0:");
sl@0
   270
	asm("mov r0, #0 ");
sl@0
   271
	__JUMP(,lr);				// return FALSE
sl@0
   272
	}
sl@0
   273
sl@0
   274
sl@0
   275
/**	Check for concurrent tied events when a thread joins a group
sl@0
   276
sl@0
   277
	If event count zero, atomically	set the parent flag and return TRUE,
sl@0
   278
	else return FALSE.
sl@0
   279
sl@0
   280
	@return	TRUE if the parent flag has been set
sl@0
   281
	@pre	Preemption disabled
sl@0
   282
*/
sl@0
   283
__NAKED__ TBool NThreadBase::TiedEventJoinInterlock()
sl@0
   284
	{
sl@0
   285
	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
sl@0
   286
	asm("1: ");
sl@0
   287
	LDREX(1,0);
sl@0
   288
	asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc));		// check if event count zero
sl@0
   289
	asm("bhs 0f ");						// if not, finish and return FALSE
sl@0
   290
	asm("orr r2, r1, #%a0" : : "i" ((TInt)EEventParent));	// else set parent flag
sl@0
   291
	STREX(3,2,0);
sl@0
   292
	asm("cmp r3, #0 ");
sl@0
   293
	asm("bne 1b ");
sl@0
   294
	__JUMP(,lr);				// return TRUE (assumes this!=0)
sl@0
   295
	asm("0:");
sl@0
   296
	asm("mov r0, #0 ");
sl@0
   297
	__JUMP(,lr);				// return FALSE
sl@0
   298
	}
sl@0
   299
sl@0
   300
sl@0
   301
#ifdef __FAST_SEM_MACHINE_CODED__
sl@0
   302
/******************************************************************************
sl@0
   303
 * Fast semaphore
sl@0
   304
 ******************************************************************************/
sl@0
   305
sl@0
   306
/** Waits on a fast semaphore.
sl@0
   307
sl@0
   308
    Decrements the signal count for the semaphore and
sl@0
   309
	removes the calling thread from the ready-list if the semaphore becomes
sl@0
   310
	unsignalled. Only the thread that owns a fast semaphore can wait on it.
sl@0
   311
	
sl@0
   312
	Note that this function does not block, it merely updates the NThread state,
sl@0
   313
	rescheduling will only occur when the kernel is unlocked. Generally threads
sl@0
   314
	would use NKern::FSWait() which manipulates the kernel lock for you.
sl@0
   315
sl@0
   316
	@pre The calling thread must own the semaphore.
sl@0
   317
	@pre No fast mutex can be held.
sl@0
   318
	@pre Kernel must be locked.
sl@0
   319
	
sl@0
   320
	@post Kernel is locked.
sl@0
   321
	
sl@0
   322
	@see NFastSemaphore::Signal()
sl@0
   323
	@see NKern::FSWait()
sl@0
   324
	@see NKern::Unlock()
sl@0
   325
 */
sl@0
   326
EXPORT_C __NAKED__ void NFastSemaphore::Wait()
sl@0
   327
	{
sl@0
   328
	ASM_DEBUG1(FSWait,r0);
sl@0
   329
sl@0
   330
	GET_RWNO_TID(,r1);
sl@0
   331
	asm("stmfd	sp!, {r4-r7} ");
sl@0
   332
	asm("ldr	r6, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   333
	asm("mov	r3, r0 ");
sl@0
   334
	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
sl@0
   335
	asm("add	r7, r6, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
sl@0
   336
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
sl@0
   337
	asm("1:		");
sl@0
   338
	LDREXD(		4,7);
sl@0
   339
	STREXD(		12,2,7);
sl@0
   340
	asm("cmp	r12, #0 ");
sl@0
   341
	asm("bne	1b ");
sl@0
   342
	asm("str	r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
sl@0
   343
	asm("cmp	r4, #0 ");
sl@0
   344
	asm("bne	0f ");
sl@0
   345
sl@0
   346
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   347
	asm("1:		");
sl@0
   348
	LDREX(		2,0);					// count
sl@0
   349
	asm("mov	r5, r6, lsr #2 ");		// thread>>2
sl@0
   350
	asm("orr	r5, r5, #0x80000000 ");
sl@0
   351
	asm("subs	r4, r2, #1 ");
sl@0
   352
	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
sl@0
   353
	STREX(		12,4,0);
sl@0
   354
	asm("teq	r12, #0 ");
sl@0
   355
	asm("bne	1b ");
sl@0
   356
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   357
sl@0
   358
	asm("cmp	r2, #0 ");				// original count zero ?
sl@0
   359
	asm("bne	2f ");					// if yes, don't need to wait
sl@0
   360
	asm("mov	r2, #1 ");
sl@0
   361
	asm("strb	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// else we need to reschedule
sl@0
   362
	asm("ldmfd	sp!, {r4-r7} ");
sl@0
   363
	__JUMP(,	lr);
sl@0
   364
sl@0
   365
	asm("2:		");
sl@0
   366
	asm("mov	r2, #0 ");
sl@0
   367
	asm("mov	r3, #0 ");
sl@0
   368
	asm("1:		");
sl@0
   369
	LDREXD(		4,7);
sl@0
   370
	STREXD(		12,2,7);
sl@0
   371
	asm("cmp	r12, #0 ");
sl@0
   372
	asm("bne	1b ");
sl@0
   373
	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
sl@0
   374
	asm("bne	0f ");
sl@0
   375
	asm("ldmfd	sp!, {r4-r7} ");
sl@0
   376
	__JUMP(,	lr);
sl@0
   377
sl@0
   378
	asm("0:		");
sl@0
   379
	__ASM_CRASH();
sl@0
   380
	}
sl@0
   381
sl@0
   382
sl@0
   383
/** Waits on a fast semaphore.
sl@0
   384
sl@0
   385
    Decrements the signal count for the semaphore
sl@0
   386
	and waits for a signal if the semaphore becomes unsignalled. Only the
sl@0
   387
	thread that owns a fast	semaphore can wait on it.
sl@0
   388
sl@0
   389
	@param aSem The semaphore to wait on.
sl@0
   390
	
sl@0
   391
	@pre The calling thread must own the semaphore.
sl@0
   392
	@pre No fast mutex can be held.
sl@0
   393
	
sl@0
   394
	@see NFastSemaphore::Wait()
sl@0
   395
*/
sl@0
   396
EXPORT_C __NAKED__ void NKern::FSWait(NFastSemaphore* /*aSem*/)
sl@0
   397
	{
sl@0
   398
	ASM_DEBUG1(NKFSWait,r0);
sl@0
   399
sl@0
   400
	__ASM_CLI();							// all interrupts off
sl@0
   401
	GET_RWNO_TID(,r1);
sl@0
   402
	asm("stmfd	sp!, {r4,r5,r11,lr} ");
sl@0
   403
	asm("ldr	r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   404
	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
sl@0
   405
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
sl@0
   406
	asm("mov	r3, r0 ");
sl@0
   407
	asm("add	r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
sl@0
   408
	asm("b		nkfswait1 ");
sl@0
   409
	}
sl@0
   410
sl@0
   411
sl@0
   412
/** Waits for a signal on the current thread's I/O semaphore.
sl@0
   413
sl@0
   414
	@pre No fast mutex can be held.
sl@0
   415
	@pre Call in a thread context.
sl@0
   416
	@pre Kernel must be unlocked
sl@0
   417
	@pre interrupts enabled
sl@0
   418
 */
sl@0
   419
EXPORT_C __NAKED__ void NKern::WaitForAnyRequest()
sl@0
   420
	{
sl@0
   421
	ASM_DEBUG0(WFAR);
sl@0
   422
sl@0
   423
	__ASM_CLI();							// all interrupts off
sl@0
   424
	GET_RWNO_TID(,r1);
sl@0
   425
	asm("stmfd	sp!, {r4,r5,r11,lr} ");
sl@0
   426
	asm("ldr	r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   427
	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
sl@0
   428
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
sl@0
   429
	asm("add	r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
sl@0
   430
	asm("add	r3, r11, #%a0" : : "i" _FOFF(NThreadBase, iRequestSemaphore));
sl@0
   431
sl@0
   432
	asm("nkfswait1: ");
sl@0
   433
	asm("1:		");
sl@0
   434
	LDREXD(		4,0);
sl@0
   435
	STREXD(		12,2,0);
sl@0
   436
	asm("cmp	r12, #0 ");
sl@0
   437
	asm("bne	1b ");
sl@0
   438
	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
sl@0
   439
	asm("cmp	r4, #0 ");
sl@0
   440
	asm("bne	0f ");
sl@0
   441
sl@0
   442
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   443
	asm("1:		");
sl@0
   444
	LDREX(		2,3);					// count
sl@0
   445
	asm("mov	r5, r11, lsr #2 ");		// thread>>2
sl@0
   446
	asm("orr	r5, r5, #0x80000000 ");
sl@0
   447
	asm("subs	r4, r2, #1 ");
sl@0
   448
	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
sl@0
   449
	STREX(		12,4,3);
sl@0
   450
	asm("teq	r12, #0 ");
sl@0
   451
	asm("bne	1b ");
sl@0
   452
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   453
sl@0
   454
	asm("cmp	r2, #0 ");				// original count zero ?
sl@0
   455
	asm("beq	2f ");					// if so we must wait
sl@0
   456
	asm("mov	r2, #0 ");
sl@0
   457
	asm("mov	r3, #0 ");
sl@0
   458
	asm("1:		");
sl@0
   459
	LDREXD(		4,0);
sl@0
   460
	STREXD(		12,2,0);
sl@0
   461
	asm("cmp	r12, #0 ");
sl@0
   462
	asm("bne	1b ");
sl@0
   463
	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
sl@0
   464
	asm("bne	0f ");
sl@0
   465
	__ASM_STI();
sl@0
   466
	__POPRET("r4,r5,r11,");
sl@0
   467
sl@0
   468
	asm("0:		");
sl@0
   469
	__ASM_CRASH();
sl@0
   470
sl@0
   471
	asm("2:		");
sl@0
   472
	asm("ldmfd	sp!, {r4-r5} ");
sl@0
   473
	asm("mov	r2, #1 ");
sl@0
   474
	asm("str	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));			// else lock the kernel
sl@0
   475
	__ASM_STI();
sl@0
   476
	asm("strb	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// and set the reschedule flag
sl@0
   477
	asm("stmfd	sp!, {r0,r4-r10} ");
sl@0
   478
	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );	// reschedule
sl@0
   479
	asm(".global nkern_wfar_resched_return ");
sl@0
   480
	asm("nkern_wfar_resched_return: ");
sl@0
   481
sl@0
   482
	// need to send any outstanding reschedule IPIs
sl@0
   483
	asm("cmp	r12, #0 ");
sl@0
   484
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   485
	__ASM_STI();
sl@0
   486
	__POPRET("r0,r4-r11,");
sl@0
   487
sl@0
   488
	asm(".global wait_for_any_request ");
sl@0
   489
	asm("wait_for_any_request: ");
sl@0
   490
	asm("add	r3, r9, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
sl@0
   491
	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
sl@0
   492
	asm("add	r7, r9, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
sl@0
   493
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
sl@0
   494
	asm("1:		");
sl@0
   495
	LDREXD(		4,7);
sl@0
   496
	STREXD(		12,2,7);
sl@0
   497
	asm("cmp	r12, #0 ");
sl@0
   498
	asm("bne	1b ");
sl@0
   499
	asm("str	r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
sl@0
   500
	asm("cmp	r4, #0 ");
sl@0
   501
	asm("bne	0b ");
sl@0
   502
sl@0
   503
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   504
	asm("1:		");
sl@0
   505
	LDREX(		0,3);					// count
sl@0
   506
	asm("mov	r5, r9, lsr #2 ");		// thread>>2
sl@0
   507
	asm("orr	r5, r5, #0x80000000 ");
sl@0
   508
	asm("subs	r4, r0, #1 ");
sl@0
   509
	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
sl@0
   510
	STREX(		12,4,3);
sl@0
   511
	asm("teq	r12, #0 ");
sl@0
   512
	asm("bne	1b ");
sl@0
   513
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   514
#ifdef __RECORD_STATE__
sl@0
   515
	asm("str	r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iNThreadBaseSpare6));
sl@0
   516
#endif
sl@0
   517
sl@0
   518
	asm("cmp	r0, #0 ");				// original count zero ?
sl@0
   519
	asm("beq	exec_wfar_wait ");		// yes - must wait
sl@0
   520
	asm("mov	r2, #0 ");
sl@0
   521
	asm("mov	r3, #0 ");
sl@0
   522
	asm("1:		");
sl@0
   523
	LDREXD(		4,7);
sl@0
   524
	STREXD(		12,2,7);
sl@0
   525
	asm("cmp	r12, #0 ");
sl@0
   526
	asm("bne	1b ");
sl@0
   527
	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
sl@0
   528
	asm("ldreq	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));	// check for callbacks
sl@0
   529
	asm("beq	exec_wfar_finish ");
sl@0
   530
	asm("b		0b ");
sl@0
   531
	}
sl@0
   532
sl@0
   533
sl@0
   534
/** Signals a fast semaphore.
sl@0
   535
sl@0
   536
    Increments the signal count of a fast semaphore by
sl@0
   537
	one and releases any waiting thread if the semphore becomes signalled.
sl@0
   538
	
sl@0
   539
	Note that a reschedule will not occur before this function returns, this will
sl@0
   540
	only take place when the kernel is unlocked. Generally threads
sl@0
   541
	would use NKern::FSSignal() which manipulates the kernel lock for you.
sl@0
   542
	
sl@0
   543
	@pre Kernel must be locked.
sl@0
   544
	@pre Call either in a thread or an IDFC context.
sl@0
   545
	
sl@0
   546
	@post Kernel is locked.
sl@0
   547
	
sl@0
   548
	@see NFastSemaphore::Wait()
sl@0
   549
	@see NKern::FSSignal()
sl@0
   550
	@see NKern::Unlock()
sl@0
   551
 */
sl@0
   552
EXPORT_C __NAKED__ void NFastSemaphore::Signal()
sl@0
   553
	{
sl@0
   554
	ASM_DEBUG1(FSSignal,r0);
sl@0
   555
sl@0
   556
	asm("mov	r1, #1 ");
sl@0
   557
	asm("fssignal1: ");
sl@0
   558
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   559
	asm("1:		");
sl@0
   560
	LDREX(		2,0);				// count
sl@0
   561
	asm("cmp	r2, #0 ");
sl@0
   562
	asm("sublt	r3, r1, #1 ");		// if count<0, replace with aCount-1
sl@0
   563
	asm("addges	r3, r2, r1 ");		// if count>=0, add aCount
sl@0
   564
	asm("bvs	0f ");				// if overflow, leave alone
sl@0
   565
	STREX(		12,3,0);
sl@0
   566
	asm("teq	r12, #0 ");
sl@0
   567
	asm("bne	1b ");
sl@0
   568
	asm("cmp	r2, #0 ");
sl@0
   569
	asm("movlt	r1, r2, lsl #2 ");	// if original count<0 r1 = original count<<2 = thread
sl@0
   570
	asm("blt	fs_signal_wake ");
sl@0
   571
	asm("0:		");
sl@0
   572
	__JUMP(,	lr);				// else finished
sl@0
   573
sl@0
   574
	asm("fs_signal_wake: ");
sl@0
   575
	asm("stmfd	sp!, {r4-r6,lr} ");
sl@0
   576
	asm("mov	r4, r0 ");
sl@0
   577
	asm("mov	r5, r1 ");
sl@0
   578
	asm("mov	r0, r1 ");
sl@0
   579
	asm("bl		AcqSLock__12NSchedulable ");
sl@0
   580
	asm("add	r0, r5, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
sl@0
   581
	asm("mov	r1, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore));
sl@0
   582
	asm("mov	r2, r4 ");
sl@0
   583
	asm("mov	r3, #0 ");
sl@0
   584
	asm("bl		UnBlockT__16NThreadWaitStateUiPvi ");
sl@0
   585
	asm("mov	r0, r5 ");
sl@0
   586
	asm("ldmfd	sp!, {r4-r6,lr} ");
sl@0
   587
	asm("b		RelSLock__12NSchedulable ");
sl@0
   588
	}
sl@0
   589
sl@0
   590
sl@0
   591
/** Signals a fast semaphore multiple times.
sl@0
   592
sl@0
   593
	@pre Kernel must be locked.
sl@0
   594
	@pre Call either in a thread or an IDFC context.
sl@0
   595
	
sl@0
   596
	@post Kernel is locked.
sl@0
   597
sl@0
   598
	@internalComponent	
sl@0
   599
 */
sl@0
   600
EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/)
sl@0
   601
	{
sl@0
   602
	ASM_DEBUG2(FSSignalN,r0,r1);
sl@0
   603
sl@0
   604
	asm("cmp	r1, #0 ");
sl@0
   605
	asm("bgt	fssignal1 ");
sl@0
   606
	__JUMP(,	lr);
sl@0
   607
	}
sl@0
   608
sl@0
   609
sl@0
   610
/** Signals the request semaphore of a nanothread several times.
sl@0
   611
sl@0
   612
	This function is intended to be used by the EPOC layer and personality
sl@0
   613
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
   614
sl@0
   615
	@param aThread Nanothread to signal.  If NULL, the current thread is signaled.
sl@0
   616
	@param aCount Number of times the request semaphore must be signaled.
sl@0
   617
	
sl@0
   618
	@pre aCount >= 0
sl@0
   619
sl@0
   620
	@see Kern::RequestComplete()
sl@0
   621
 */
sl@0
   622
EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, TInt /*aCount*/)
sl@0
   623
	{
sl@0
   624
	ASM_DEBUG2(NKThreadRequestSignalN,r0,r1);
sl@0
   625
sl@0
   626
	asm("cmp	r1, #0 ");
sl@0
   627
	asm("ble	0f ");
sl@0
   628
	asm("cmp	r0, #0 ");
sl@0
   629
	asm("addne	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
sl@0
   630
	asm("bne	nkfssignal1 ");
sl@0
   631
	__ASM_CLI();
sl@0
   632
	GET_RWNO_TID(,r0);
sl@0
   633
	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   634
	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
sl@0
   635
	asm("b		nkfssignal2 ");
sl@0
   636
sl@0
   637
	asm("0:		");
sl@0
   638
	__JUMP(eq,	lr);
sl@0
   639
	__ASM_CRASH();
sl@0
   640
	}
sl@0
   641
sl@0
   642
sl@0
   643
/** Signals the request semaphore of a nanothread.
sl@0
   644
sl@0
   645
	This function is intended to be used by the EPOC layer and personality
sl@0
   646
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
   647
sl@0
   648
	@param aThread Nanothread to signal. Must be non NULL.
sl@0
   649
sl@0
   650
	@see Kern::RequestComplete()
sl@0
   651
sl@0
   652
	@pre Interrupts must be enabled.
sl@0
   653
	@pre Do not call from an ISR
sl@0
   654
 */
sl@0
   655
EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
sl@0
   656
	{
sl@0
   657
	ASM_DEBUG1(NKThreadRequestSignal,r0);
sl@0
   658
	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
sl@0
   659
sl@0
   660
	/* fall through to FSSignal() ... */
sl@0
   661
	}
sl@0
   662
sl@0
   663
sl@0
   664
/** Signals a fast semaphore.
sl@0
   665
sl@0
   666
    Increments the signal count of a fast semaphore
sl@0
   667
	by one and releases any	waiting thread if the semaphore becomes signalled.
sl@0
   668
	
sl@0
   669
	@param aSem The semaphore to signal.
sl@0
   670
sl@0
   671
	@see NKern::FSWait()
sl@0
   672
sl@0
   673
	@pre Interrupts must be enabled.
sl@0
   674
	@pre Do not call from an ISR
sl@0
   675
 */
sl@0
   676
EXPORT_C __NAKED__ void NKern::FSSignal(NFastSemaphore* /*aSem*/)
sl@0
   677
	{
sl@0
   678
	ASM_DEBUG1(NKFSSignal,r0);
sl@0
   679
sl@0
   680
	asm("mov	r1, #1 ");
sl@0
   681
	asm("nkfssignal1: ");
sl@0
   682
	__ASM_CLI();
sl@0
   683
	asm("nkfssignal2: ");
sl@0
   684
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   685
	asm("1:		");
sl@0
   686
	LDREX(		2,0);				// count
sl@0
   687
	asm("cmp	r2, #0 ");
sl@0
   688
	asm("sublt	r3, r1, #1 ");		// if count<0, replace with aCount-1
sl@0
   689
	asm("addges	r3, r2, r1 ");		// if count>=0, add aCount
sl@0
   690
	asm("bvs	0f ");				// if overflow, leave alone
sl@0
   691
	STREX(		12,3,0);
sl@0
   692
	asm("teq	r12, #0 ");
sl@0
   693
	asm("bne	1b ");
sl@0
   694
	asm("cmp	r2, #0 ");
sl@0
   695
	asm("blt	2f ");
sl@0
   696
	asm("0:		");
sl@0
   697
	__ASM_STI();
sl@0
   698
	__JUMP(,	lr);				// else finished
sl@0
   699
sl@0
   700
	asm("2:		");
sl@0
   701
	GET_RWNO_TID(,r3);
sl@0
   702
	asm("mov	r1, r2, lsl #2 ");	// if original count<0 r1 = original count<<2 = thread
sl@0
   703
	asm("ldr	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   704
	asm("stmfd	sp!, {r4,lr} ");
sl@0
   705
	asm("add	r12, r12, #1 ");			// lock the kernel
sl@0
   706
	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   707
	__ASM_STI();
sl@0
   708
	asm("bl		fs_signal_wake ");			// wake up the thread
sl@0
   709
	asm("ldmfd	sp!, {r4,lr} ");
sl@0
   710
	asm("b		Unlock__5NKern ");
sl@0
   711
	}
sl@0
   712
sl@0
   713
sl@0
   714
/** Signals a fast semaphore multiple times.
sl@0
   715
sl@0
   716
    Increments the signal count of a
sl@0
   717
	fast semaphore by aCount and releases any waiting thread if the semphore
sl@0
   718
	becomes signalled.
sl@0
   719
	
sl@0
   720
	@param aSem The semaphore to signal.
sl@0
   721
	@param aCount The number of times to signal the semaphore.
sl@0
   722
sl@0
   723
	@see NKern::FSWait()
sl@0
   724
sl@0
   725
	@pre Interrupts must be enabled.
sl@0
   726
	@pre Do not call from an ISR
sl@0
   727
 */
sl@0
   728
EXPORT_C __NAKED__ void NKern::FSSignalN(NFastSemaphore* /*aSem*/, TInt /*aCount*/)
sl@0
   729
	{
sl@0
   730
	ASM_DEBUG2(NKFSSignalN,r0,r1);
sl@0
   731
sl@0
   732
	asm("cmp	r1, #0 ");
sl@0
   733
	asm("bgt	nkfssignal1 ");
sl@0
   734
	__JUMP(,	lr);
sl@0
   735
	}
sl@0
   736
sl@0
   737
sl@0
   738
/** Cancels a wait on a fast semaphore.
sl@0
   739
sl@0
   740
	@pre Kernel must be locked.
sl@0
   741
	@pre Call either in a thread or an IDFC context.
sl@0
   742
	
sl@0
   743
	@post Kernel is locked.
sl@0
   744
sl@0
   745
	@internalComponent	
sl@0
   746
 */
sl@0
   747
__NAKED__ void NFastSemaphore::WaitCancel()
sl@0
   748
	{
sl@0
   749
	asm("mov	r1, #1 ");
sl@0
   750
	/* Fall through ... */
sl@0
   751
	}
sl@0
   752
sl@0
   753
/* Fall through ... */
sl@0
   754
#endif
sl@0
   755
/* Fall through ... */
sl@0
   756
sl@0
   757
/**	Increment a fast semaphore count
sl@0
   758
sl@0
   759
	Do memory barrier
sl@0
   760
	If iCount >= 0, increment by aCount and return 0
sl@0
   761
	If iCount < 0, set count equal to aCount-1 and return (original count << 2)
sl@0
   762
sl@0
   763
	Release semantics
sl@0
   764
*/
sl@0
   765
__NAKED__ NThreadBase* NFastSemaphore::Inc(TInt /*aCount*/)
sl@0
   766
	{
sl@0
   767
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   768
	asm("1: ");
sl@0
   769
	LDREX(2,0);					// count
sl@0
   770
	asm("cmp r2, #0 ");
sl@0
   771
	asm("sublt r3, r1, #1 ");	// if count<0, replace with aCount-1
sl@0
   772
	asm("addges r3, r2, r1 ");	// if count>=0, add aCount
sl@0
   773
	asm("bvs 0f ");				// if overflow leave alone
sl@0
   774
	STREX(12,3,0);
sl@0
   775
	asm("teq r12, #0 ");
sl@0
   776
	asm("bne 1b ");
sl@0
   777
	asm("0: ");
sl@0
   778
	asm("cmp r2, #0 ");
sl@0
   779
	asm("movlt r0, r2, lsl #2 ");	// if original count<0, return count<<2
sl@0
   780
	asm("movge r0, #0 ");			// else return 0
sl@0
   781
	__JUMP(,lr);
sl@0
   782
	}
sl@0
   783
sl@0
   784
sl@0
   785
/**	Decrement a fast semaphore count
sl@0
   786
sl@0
   787
	If count > 0, decrement
sl@0
   788
	If count = 0, set equal to (thread>>2)|0x80000000
sl@0
   789
	Return original count
sl@0
   790
	Full barrier semantics
sl@0
   791
*/
sl@0
   792
__NAKED__ TInt NFastSemaphore::Dec(NThreadBase*)
sl@0
   793
	{
sl@0
   794
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   795
	asm("1: ");
sl@0
   796
	LDREX(2,0);					// count
sl@0
   797
	asm("subs r3, r2, #1 ");
sl@0
   798
	asm("movlt r3, #0x80000000 ");
sl@0
   799
	asm("orrlt r3, r3, r1, lsr #2 ");	// if --count<0, r3=(thread>>2)|0x80000000
sl@0
   800
	STREX(12,3,0);
sl@0
   801
	asm("teq r12, #0 ");
sl@0
   802
	asm("bne 1b ");
sl@0
   803
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   804
	asm("mov r0, r2 ");			// return original count
sl@0
   805
	__JUMP(,lr);
sl@0
   806
	}
sl@0
   807
sl@0
   808
/**	Reset a fast semaphore count
sl@0
   809
sl@0
   810
	Do memory barrier
sl@0
   811
	If iCount >= 0, set iCount=0 and return 0
sl@0
   812
	If iCount < 0, set iCount=0 and return (original count << 2)
sl@0
   813
sl@0
   814
	Release semantics
sl@0
   815
*/
sl@0
   816
__NAKED__ NThreadBase* NFastSemaphore::DoReset()
sl@0
   817
	{
sl@0
   818
	__DATA_MEMORY_BARRIER_Z__(r3);
sl@0
   819
	asm("1: ");
sl@0
   820
	LDREX(2,0);					// count
sl@0
   821
	STREX(12,3,0);				// zero count
sl@0
   822
	asm("teq r12, #0 ");
sl@0
   823
	asm("bne 1b ");
sl@0
   824
	asm("cmp r2, #0 ");
sl@0
   825
	asm("movlt r0, r2, lsl #2 ");	// if original count<0, return count<<2
sl@0
   826
	asm("movge r0, #0 ");			// else return 0
sl@0
   827
	__JUMP(,lr);
sl@0
   828
	}
sl@0
   829
sl@0
   830
sl@0
   831
#ifdef __NTHREAD_WAITSTATE_MACHINE_CODED__
sl@0
   832
/******************************************************************************
sl@0
   833
 * Thread wait state
sl@0
   834
 ******************************************************************************/
sl@0
   835
sl@0
   836
__NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/)
sl@0
   837
	{
sl@0
   838
	asm("stmfd	sp!, {r4-r5} ");
sl@0
   839
	asm("and	r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
sl@0
   840
	asm("and	r1, r1, #0xff ");
sl@0
   841
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
sl@0
   842
	asm("orr	r2, r2, r1, lsl #8 ");
sl@0
   843
	asm("1:		");
sl@0
   844
	LDREXD(		4,0);
sl@0
   845
	STREXD(		12,2,0);
sl@0
   846
	asm("cmp	r12, #0 ");
sl@0
   847
	asm("bne	1b ");
sl@0
   848
	asm("cmp	r4, #0 ");
sl@0
   849
	asm("bne	0f ");
sl@0
   850
	asm("ldmfd	sp!, {r4-r5} ");
sl@0
   851
	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
sl@0
   852
	__JUMP(,	lr);
sl@0
   853
sl@0
   854
	asm("0:		");
sl@0
   855
	__ASM_CRASH();
sl@0
   856
	}
sl@0
   857
sl@0
   858
__NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/, TUint32 /*aTimeout*/)
sl@0
   859
	{
sl@0
   860
	asm("stmfd	sp!, {r4-r5} ");
sl@0
   861
	asm("and	r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
sl@0
   862
	asm("and	r1, r1, #0xff ");
sl@0
   863
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
sl@0
   864
	asm("orr	r2, r2, r1, lsl #8 ");
sl@0
   865
	asm("1:		");
sl@0
   866
	LDREXD(		4,0);
sl@0
   867
	STREXD(		12,2,0);
sl@0
   868
	asm("cmp	r12, #0 ");
sl@0
   869
	asm("bne	1b ");
sl@0
   870
	asm("ldr	r12, [sp, #8] ");
sl@0
   871
	asm("cmp	r4, #0 ");
sl@0
   872
	asm("bne	0f ");
sl@0
   873
	asm("ldmfd	sp!, {r4-r5} ");
sl@0
   874
	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
sl@0
   875
	__JUMP(,	lr);
sl@0
   876
sl@0
   877
	asm("0:		");
sl@0
   878
	__ASM_CRASH();
sl@0
   879
	}
sl@0
   880
sl@0
   881
__NAKED__ void NThreadWaitState::CancelWait()
sl@0
   882
	{
sl@0
   883
	asm("mov	r12, r0 ");
sl@0
   884
	asm("mov	r2, #0 ");
sl@0
   885
	asm("mov	r3, #0 ");
sl@0
   886
	asm("1:		");
sl@0
   887
	LDREXD(		0,12);
sl@0
   888
	STREXD(		1,2,12);
sl@0
   889
	asm("cmp	r1, #0 ");
sl@0
   890
	asm("bne	1b ");
sl@0
   891
	asm("tst	r0, #%a0" : : "i" ((TInt)(EWtStDead|EWtStWaitActive)));
sl@0
   892
	asm("bne	0f ");
sl@0
   893
	__JUMP(,	lr);
sl@0
   894
sl@0
   895
	asm("0:		");
sl@0
   896
	__ASM_CRASH();
sl@0
   897
	}
sl@0
   898
sl@0
   899
__NAKED__ TInt NThreadWaitState::DoWait()
sl@0
   900
	{
sl@0
   901
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iTriggerTime));
sl@0
   902
	asm("1:		");
sl@0
   903
	LDREXD(		2,0);
sl@0
   904
	asm("cmp	r1, #0 ");
sl@0
   905
	asm("orrne	r2, r2, #%a0" : : "i" ((TInt)EWtStTimeout));
sl@0
   906
	asm("tst	r2, #%a0" : : "i" ((TInt)EWtStDead));
sl@0
   907
	asm("bne	0f ");
sl@0
   908
	asm("tst	r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
sl@0
   909
	asm("beq	9f ");
sl@0
   910
	asm("bic	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
sl@0
   911
	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitActive));
sl@0
   912
	STREXD(		12,2,0);
sl@0
   913
	asm("cmp	r12, #0 ");
sl@0
   914
	asm("bne	1b ");
sl@0
   915
	asm("cmp	r1, #0 ");
sl@0
   916
	asm("bne	2f ");
sl@0
   917
	asm("mov	r0, r2, lsr #8 ");
sl@0
   918
	__JUMP(,	lr);
sl@0
   919
sl@0
   920
	asm("2:		");
sl@0
   921
	asm("stmfd	sp!, {r2-r4,lr} ");
sl@0
   922
	asm("mov	r4, r0 ");
sl@0
   923
	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadWaitState,iTimer));
sl@0
   924
	asm("mov	r2, #1 ");
sl@0
   925
	asm("bl	"	CSM_ZN6NTimer7OneShotEii );
sl@0
   926
	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
sl@0
   927
	asm("cmp	r0, #0 ");
sl@0
   928
	asm("bne	8f ");
sl@0
   929
	asm("add	r1, r1, #1 ");
sl@0
   930
	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
sl@0
   931
	asm("ldmfd	sp!, {r2-r4,lr} ");
sl@0
   932
	asm("mov	r0, r2, lsr #8 ");
sl@0
   933
	__JUMP(,	lr);
sl@0
   934
sl@0
   935
	asm("0:		");
sl@0
   936
	asm("mvn	r0, #%a0" : : "i" (~KErrDied));
sl@0
   937
	__JUMP(,	lr);
sl@0
   938
	asm("9:		");
sl@0
   939
	asm("mvn	r0, #%a0" : : "i" (~KErrGeneral));
sl@0
   940
	__JUMP(,	lr);
sl@0
   941
	asm("8:		");
sl@0
   942
	__ASM_CRASH();
sl@0
   943
	}
sl@0
   944
sl@0
   945
__NAKED__ TInt NThreadWaitState::UnBlockT(TUint /*aType*/, TAny* /*aWaitObj*/, TInt /*aReturnValue*/)
sl@0
   946
	{
sl@0
   947
	asm("stmfd	sp!, {r4-r6,lr} ");
sl@0
   948
	asm("mov	r6, r2 ");					// r6 = aWaitObj
sl@0
   949
	asm("mov	r2, #0 ");
sl@0
   950
	__DATA_MEMORY_BARRIER__(r2);
sl@0
   951
	asm("1:		");
sl@0
   952
	LDREXD(		4,0);						// r5:r4 = oldws64
sl@0
   953
	asm("cmp	r5, r6 ");					// does iWaitObj match?
sl@0
   954
	asm("bne	2f ");						// no
sl@0
   955
	asm("eor	r12, r4, r1, lsl #8 ");		// does wait type match?
sl@0
   956
	asm("cmp	r12, #%a0" : : "i" ((TInt)EWtStDead));
sl@0
   957
	asm("bhs	2f ");						// no
sl@0
   958
	STREXD(		12,2,0);					// yes - wait matches - try to write return value
sl@0
   959
	asm("cmp	r12, #0 ");					// success?
sl@0
   960
	asm("bne	1b ");						// no - retry
sl@0
   961
	asm("mov	r6, r0 ");
sl@0
   962
	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStTimeout));
sl@0
   963
	asm("blne	CancelTimerT__16NThreadWaitState ");
sl@0
   964
	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStWaitActive));
sl@0
   965
	asm("beq	0f ");
sl@0
   966
	asm("ldr	r1, [r6, #%a0]" : : "i" (_FOFF(NThreadBase,iPauseCount)-_FOFF(NThreadBase,iWaitState)));
sl@0
   967
	asm("sub	r0, r6, #%a0" : : "i" _FOFF(NThreadBase,iWaitState));	// r0 = Thread()
sl@0
   968
	asm("movs	r1, r1, lsl #16 ");				// check if iPauseCount=iSuspendCount=0
sl@0
   969
	asm("bleq	ReadyT__12NSchedulableUi ");	// if so, make thread ready
sl@0
   970
	asm("0:		");
sl@0
   971
	asm("mov	r0, #0 ");
sl@0
   972
	__POPRET("	r4-r6,");					// return KErrNone
sl@0
   973
sl@0
   974
	asm("2:		");
sl@0
   975
	STREXD(		12,4,0);					// no matching wait - write back to check atomicity
sl@0
   976
	asm("cmp	r12, #0 ");					// success?
sl@0
   977
	asm("bne	1b ");						// no - retry
sl@0
   978
	asm("mvn	r0, #%a0" : : "i" (~KErrGeneral));
sl@0
   979
	__POPRET("	r4-r6,");					// no matching wait - return KErrGeneral
sl@0
   980
	}
sl@0
   981
sl@0
   982
__NAKED__ TUint32 NThreadWaitState::ReleaseT(TAny*& /*aWaitObj*/, TInt /*aReturnValue*/)
sl@0
   983
	{
sl@0
   984
	asm("stmfd	sp!, {r4-r5} ");
sl@0
   985
	asm("mov	r3, r2 ");
sl@0
   986
	asm("mov	r2, #0 ");
sl@0
   987
	__DATA_MEMORY_BARRIER__(r2);
sl@0
   988
	asm("1:		");
sl@0
   989
	LDREXD(		4,0);
sl@0
   990
	asm("and	r2, r4, #%a0" : : "i" ((TInt)EWtStDead));
sl@0
   991
	STREXD(		12,2,0);
sl@0
   992
	asm("cmp	r12, #0 ");
sl@0
   993
	asm("bne	1b ");
sl@0
   994
	__DATA_MEMORY_BARRIER__(r12);
sl@0
   995
	asm("str	r5, [r1] ");
sl@0
   996
	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStTimeout));
sl@0
   997
	asm("bne	2f ");
sl@0
   998
	asm("mov	r0, r4 ");
sl@0
   999
	asm("ldmfd	sp!, {r4-r5} ");
sl@0
  1000
	__JUMP(,	lr);
sl@0
  1001
sl@0
  1002
	asm("2:		");
sl@0
  1003
	asm("mov	r5, lr ");
sl@0
  1004
	asm("bl		CancelTimerT__16NThreadWaitState ");
sl@0
  1005
	asm("mov	r0, r4 ");
sl@0
  1006
	asm("mov	lr, r5 ");
sl@0
  1007
	asm("ldmfd	sp!, {r4-r5} ");
sl@0
  1008
	__JUMP(,	lr);
sl@0
  1009
	}
sl@0
  1010
#endif
sl@0
  1011
sl@0
  1012
sl@0
  1013
#ifdef __FAST_MUTEX_MACHINE_CODED__
sl@0
  1014
/******************************************************************************
sl@0
  1015
 * Fast mutex
sl@0
  1016
 ******************************************************************************/
sl@0
  1017
sl@0
  1018
/** Releases a previously acquired fast mutex.
sl@0
  1019
	
sl@0
  1020
	Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
sl@0
  1021
	for you.
sl@0
  1022
	
sl@0
  1023
	@pre The calling thread holds the mutex.
sl@0
  1024
	@pre Kernel must be locked.
sl@0
  1025
	
sl@0
  1026
	@post Kernel is locked.
sl@0
  1027
	
sl@0
  1028
	@see NFastMutex::Wait()
sl@0
  1029
	@see NKern::FMSignal()
sl@0
  1030
*/
sl@0
  1031
EXPORT_C __NAKED__ void NFastMutex::Signal()
sl@0
  1032
	{
sl@0
  1033
	ASM_DEBUG1(FMSignal,r0);
sl@0
  1034
#ifdef BTRACE_FAST_MUTEX
sl@0
  1035
//	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
sl@0
  1036
	asm("stmfd	sp!, {r0,lr} ");
sl@0
  1037
	asm("mov	r1, r0 ");
sl@0
  1038
	asm("ldr	r0, btrace_hdr_fmsignal ");
sl@0
  1039
	asm("mov	r2, #0 ");
sl@0
  1040
	asm("mov	r3, #0 ");
sl@0
  1041
	asm("bl		OutX__6BTraceUlUlUlUl ");
sl@0
  1042
	asm("ldmfd	sp!, {r0,lr} ");
sl@0
  1043
#endif
sl@0
  1044
	GET_RWNO_TID(,r3);
sl@0
  1045
	asm("mov	r12, #0 ");
sl@0
  1046
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1047
	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
  1048
	__ASM_CLI();
sl@0
  1049
	asm("1:		");
sl@0
  1050
	LDREX(		2,0);				// r2=aMutex->iHoldingThread
sl@0
  1051
	asm("cmp	r2, r1 ");			// anyone else waiting?
sl@0
  1052
	asm("mov	r2, #0 ");
sl@0
  1053
	asm("bne	2f ");				// branch out if someone else waiting
sl@0
  1054
	STREX(		12,2,0);			// else try to clear the holding thread
sl@0
  1055
	asm("teq	r12, #0 ");
sl@0
  1056
	asm("bne	1b ");
sl@0
  1057
	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1058
	__ASM_STI();
sl@0
  1059
	__JUMP(,lr);					// mutex released without contention
sl@0
  1060
sl@0
  1061
#ifdef BTRACE_FAST_MUTEX
sl@0
  1062
	asm("btrace_hdr_fmsignal: ");
sl@0
  1063
	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexSignal)));
sl@0
  1064
#endif
sl@0
  1065
sl@0
  1066
	// there is contention
sl@0
  1067
	asm("2:		");
sl@0
  1068
	asm("orr	r12, r0, #1 ");
sl@0
  1069
	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1070
	__ASM_STI();
sl@0
  1071
	asm("b		DoSignalL__10NFastMutex ");
sl@0
  1072
	}
sl@0
  1073
sl@0
  1074
sl@0
  1075
/** Acquires the System Lock.
sl@0
  1076
sl@0
  1077
    This will block until the mutex is available, and causes
sl@0
  1078
	the thread to enter an implicit critical section until the mutex is released.
sl@0
  1079
sl@0
  1080
	@post System lock is held.
sl@0
  1081
sl@0
  1082
	@see NKern::UnlockSystem()
sl@0
  1083
	@see NKern::FMWait()
sl@0
  1084
sl@0
  1085
	@pre No fast mutex can be held.
sl@0
  1086
	@pre Call in a thread context.
sl@0
  1087
	@pre Kernel must be unlocked
sl@0
  1088
	@pre interrupts enabled
sl@0
  1089
sl@0
  1090
*/
sl@0
  1091
EXPORT_C __NAKED__ void NKern::LockSystem()
sl@0
  1092
	{
sl@0
  1093
	asm("ldr	r0, __SystemLock ");
sl@0
  1094
sl@0
  1095
	/* fall through to FMWait() ... */
sl@0
  1096
	}
sl@0
  1097
sl@0
  1098
/** Acquires a fast mutex.
sl@0
  1099
sl@0
  1100
    This will block until the mutex is available, and causes
sl@0
  1101
	the thread to enter an implicit critical section until the mutex is released.
sl@0
  1102
sl@0
  1103
	@param aMutex The fast mutex to acquire.
sl@0
  1104
	
sl@0
  1105
	@post The calling thread holds the mutex.
sl@0
  1106
	
sl@0
  1107
	@see NFastMutex::Wait()
sl@0
  1108
	@see NKern::FMSignal()
sl@0
  1109
sl@0
  1110
	@pre No fast mutex can be held.
sl@0
  1111
	@pre Call in a thread context.
sl@0
  1112
	@pre Kernel must be unlocked
sl@0
  1113
	@pre interrupts enabled
sl@0
  1114
sl@0
  1115
*/
sl@0
  1116
EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex* /*aMutex*/)
sl@0
  1117
	{
sl@0
  1118
	ASM_DEBUG1(NKFMWait,r0);
sl@0
  1119
sl@0
  1120
	__ASM_CLI();
sl@0
  1121
	GET_RWNO_TID(,r3);
sl@0
  1122
	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
  1123
	asm("1:		");
sl@0
  1124
	LDREX(		2,0);				// r2=aMutex->iHoldingThread
sl@0
  1125
	asm("cmp	r2, #0 ");			//
sl@0
  1126
	asm("bne	2f ");				// branch out if mutex held
sl@0
  1127
	STREX(		12,1,0);			// else try to set us as holding thread
sl@0
  1128
	asm("teq	r12, #0 ");
sl@0
  1129
	asm("bne	1b ");
sl@0
  1130
	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1131
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1132
	__ASM_STI();
sl@0
  1133
#ifdef BTRACE_FAST_MUTEX
sl@0
  1134
//	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
sl@0
  1135
	asm("mov	r1, r0 ");
sl@0
  1136
	asm("ldr	r0, btrace_hdr_fmwait ");
sl@0
  1137
	asm("mov	r2, #0 ");
sl@0
  1138
	asm("mov	r3, #0 ");
sl@0
  1139
	asm("b		OutX__6BTraceUlUlUlUl ");
sl@0
  1140
#endif
sl@0
  1141
	__JUMP(,lr);					// mutex acquired without contention
sl@0
  1142
sl@0
  1143
	// there is contention
sl@0
  1144
	asm("2:		");
sl@0
  1145
	asm("mov	r2, #1 ");
sl@0
  1146
	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1147
	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
  1148
	__ASM_STI();
sl@0
  1149
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1150
	asm("stmfd	sp!, {r4,lr} ");
sl@0
  1151
	asm("bl		DoWaitL__10NFastMutex ");
sl@0
  1152
	asm("ldmfd	sp!, {r4,lr} ");
sl@0
  1153
	asm("b		Unlock__5NKern ");
sl@0
  1154
sl@0
  1155
	asm("__SystemLock: ");
sl@0
  1156
	asm(".word	%a0" : : "i" ((TInt)&TheScheduler.iLock));
sl@0
  1157
#ifdef BTRACE_FAST_MUTEX
sl@0
  1158
	asm("btrace_hdr_fmwait: ");
sl@0
  1159
	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexWait)));
sl@0
  1160
#endif
sl@0
  1161
	}
sl@0
  1162
sl@0
  1163
sl@0
  1164
/** Releases the System Lock.
sl@0
  1165
sl@0
  1166
	@pre System lock must be held.
sl@0
  1167
sl@0
  1168
	@see NKern::LockSystem()
sl@0
  1169
	@see NKern::FMSignal()
sl@0
  1170
*/
sl@0
  1171
EXPORT_C __NAKED__ void NKern::UnlockSystem()
sl@0
  1172
	{
sl@0
  1173
	asm("ldr	r0, __SystemLock ");
sl@0
  1174
sl@0
  1175
	/* fall through to FMSignal() ... */
sl@0
  1176
	}
sl@0
  1177
sl@0
  1178
/** Releases a previously acquired fast mutex.
sl@0
  1179
	
sl@0
  1180
	@param aMutex The fast mutex to release.
sl@0
  1181
	
sl@0
  1182
	@pre The calling thread holds the mutex.
sl@0
  1183
	
sl@0
  1184
	@see NFastMutex::Signal()
sl@0
  1185
	@see NKern::FMWait()
sl@0
  1186
*/
sl@0
  1187
EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex* /*aMutex*/)
sl@0
  1188
	{
sl@0
  1189
	ASM_DEBUG1(NKFMSignal,r0);
sl@0
  1190
#ifdef BTRACE_FAST_MUTEX
sl@0
  1191
//	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
sl@0
  1192
	asm("stmfd	sp!, {r0,lr} ");
sl@0
  1193
	asm("mov	r1, r0 ");
sl@0
  1194
	asm("ldr	r0, btrace_hdr_fmsignal ");
sl@0
  1195
	asm("mov	r2, #0 ");
sl@0
  1196
	asm("mov	r3, #0 ");
sl@0
  1197
	asm("bl		OutX__6BTraceUlUlUlUl ");
sl@0
  1198
	asm("ldmfd	sp!, {r0,lr} ");
sl@0
  1199
#endif
sl@0
  1200
	__ASM_CLI();
sl@0
  1201
	GET_RWNO_TID(,r3);
sl@0
  1202
	asm("mov	r12, #0 ");
sl@0
  1203
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1204
	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
  1205
	asm("1:		");
sl@0
  1206
	LDREX(		12,0);				// r12=aMutex->iHoldingThread
sl@0
  1207
	asm("mov	r2, #0 ");
sl@0
  1208
	asm("cmp	r12, r1 ");			// anyone else waiting?
sl@0
  1209
	asm("bne	2f ");				// branch out if someone else waiting
sl@0
  1210
	STREX(		12,2,0);			// else try to clear the holding thread
sl@0
  1211
	asm("teq	r12, #0 ");
sl@0
  1212
	asm("bne	1b ");
sl@0
  1213
	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1214
	__ASM_STI();
sl@0
  1215
	__JUMP(,lr);					// mutex released without contention
sl@0
  1216
sl@0
  1217
	// there is contention
sl@0
  1218
	asm("2:		");
sl@0
  1219
	asm("stmfd	sp!, {r4,lr} ");
sl@0
  1220
	asm("mov	r12, #1 ");
sl@0
  1221
	asm("orr	r4, r0, #1 ");
sl@0
  1222
	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
  1223
	asm("str	r4, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1224
	__ASM_STI();
sl@0
  1225
	asm("bl		DoSignalL__10NFastMutex ");
sl@0
  1226
	asm("ldmfd	sp!, {r4,lr} ");
sl@0
  1227
	asm("b		Unlock__5NKern ");
sl@0
  1228
	}
sl@0
  1229
sl@0
  1230
sl@0
  1231
/** Temporarily releases the System Lock if there is contention.
sl@0
  1232
sl@0
  1233
    If there
sl@0
  1234
	is another thread attempting to acquire the System lock, the calling
sl@0
  1235
	thread releases the mutex and then acquires it again.
sl@0
  1236
	
sl@0
  1237
	This is more efficient than the equivalent code:
sl@0
  1238
	
sl@0
  1239
	@code
sl@0
  1240
	NKern::UnlockSystem();
sl@0
  1241
	NKern::LockSystem();
sl@0
  1242
	@endcode
sl@0
  1243
sl@0
  1244
	Note that this can only allow higher priority threads to use the System
sl@0
  1245
	lock as lower priority cannot cause contention on a fast mutex.
sl@0
  1246
sl@0
  1247
	@return	TRUE if the system lock was relinquished, FALSE if not.
sl@0
  1248
sl@0
  1249
	@pre	System lock must be held.
sl@0
  1250
sl@0
  1251
	@post	System lock is held.
sl@0
  1252
sl@0
  1253
	@see NKern::LockSystem()
sl@0
  1254
	@see NKern::UnlockSystem()
sl@0
  1255
*/
sl@0
  1256
EXPORT_C __NAKED__ TBool NKern::FlashSystem()
sl@0
  1257
	{
sl@0
  1258
//	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
sl@0
  1259
	asm("ldr	r0, __SystemLock ");
sl@0
  1260
sl@0
  1261
	/* fall through to FMFlash() ... */
sl@0
  1262
	}
sl@0
  1263
sl@0
  1264
/** Temporarily releases a fast mutex if there is contention.
sl@0
  1265
sl@0
  1266
    If there is another thread attempting to acquire the mutex, the calling
sl@0
  1267
	thread releases the mutex and then acquires it again.
sl@0
  1268
	
sl@0
  1269
	This is more efficient than the equivalent code:
sl@0
  1270
	
sl@0
  1271
	@code
sl@0
  1272
	NKern::FMSignal();
sl@0
  1273
	NKern::FMWait();
sl@0
  1274
	@endcode
sl@0
  1275
sl@0
  1276
	@return	TRUE if the mutex was relinquished, FALSE if not.
sl@0
  1277
sl@0
  1278
	@pre	The mutex must be held.
sl@0
  1279
sl@0
  1280
	@post	The mutex is held.
sl@0
  1281
*/
sl@0
  1282
EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex* /*aM*/)
sl@0
  1283
	{
sl@0
  1284
	ASM_DEBUG1(NKFMFlash,r0);
sl@0
  1285
	__ASM_CLI();
sl@0
  1286
	GET_RWNO_TID(,r3);
sl@0
  1287
	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
  1288
	asm("ldrb	r2, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iMutexPri));
sl@0
  1289
	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iBasePri));
sl@0
  1290
	asm("cmp	r2, r12 ");
sl@0
  1291
	asm("bhs	1f ");							// a thread of greater or equal priority is waiting
sl@0
  1292
	__ASM_STI();
sl@0
  1293
#ifdef BTRACE_FAST_MUTEX
sl@0
  1294
//	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
sl@0
  1295
	asm("mov	r1, r0 ");
sl@0
  1296
	asm("ldr	r0, btrace_hdr_fmsignal ");
sl@0
  1297
	asm("stmfd	sp!, {r4,lr} ");
sl@0
  1298
	asm("mov	r2, #0 ");
sl@0
  1299
	asm("mov	r3, #0 ");
sl@0
  1300
	asm("bl		OutX__6BTraceUlUlUlUl ");
sl@0
  1301
	asm("ldmfd	sp!, {r4,lr} ");
sl@0
  1302
#endif
sl@0
  1303
	asm("mov	r0, #0 ");
sl@0
  1304
	__JUMP(,lr);								// return FALSE
sl@0
  1305
sl@0
  1306
#ifdef BTRACE_FAST_MUTEX
sl@0
  1307
	asm("btrace_hdr_fmflash: ");
sl@0
  1308
	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexFlash)));
sl@0
  1309
#endif
sl@0
  1310
sl@0
  1311
	asm("1:		");
sl@0
  1312
	asm("mov	r12, #1 ");
sl@0
  1313
	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
  1314
	__ASM_STI();
sl@0
  1315
	asm("stmfd	sp!, {r4,lr} ");
sl@0
  1316
	asm("mov	r4, r0 ");
sl@0
  1317
	asm("bl		Signal__10NFastMutex ");
sl@0
  1318
	asm("bl		PreemptionPoint__5NKern ");
sl@0
  1319
	asm("mov	r0, r4 ");
sl@0
  1320
	asm("bl		Wait__10NFastMutex ");
sl@0
  1321
	asm("bl		Unlock__5NKern ");
sl@0
  1322
	asm("ldmfd	sp!, {r4,lr} ");
sl@0
  1323
	asm("mov	r0, #1 ");
sl@0
  1324
	__JUMP(,lr);								// return TRUE
sl@0
  1325
	}
sl@0
  1326
#endif
sl@0
  1327
sl@0
  1328
sl@0
  1329
sl@0
  1330
/** Check whether a thread holds a fast mutex.
sl@0
  1331
	If so set the mutex contention flag and return TRUE, else return FALSE.
sl@0
  1332
sl@0
  1333
	Called with kernel lock held
sl@0
  1334
sl@0
  1335
	@internalComponent
sl@0
  1336
 */
sl@0
  1337
__NAKED__ TBool NThreadBase::CheckFastMutexDefer()
sl@0
  1338
	{
sl@0
  1339
	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1340
	asm("bics r2, r1, #3 ");		// r2 = pointer to mutex if any, r1 bit 0 = flag
sl@0
  1341
	asm("bne 1f ");
sl@0
  1342
	asm("mov r0, #0 ");				// no mutex - return FALSE
sl@0
  1343
	__JUMP(,lr);
sl@0
  1344
sl@0
  1345
	// iHeldFastMutex points to a mutex
sl@0
  1346
	asm("1: ");
sl@0
  1347
	asm("tst r1, #1 ");				// test flag
sl@0
  1348
	asm("beq 2f ");					// branch if not being released
sl@0
  1349
sl@0
  1350
	// mutex being released
sl@0
  1351
	asm("3: ");
sl@0
  1352
	LDREX(3,2);						// r3 = m->iHoldingThread
sl@0
  1353
	asm("sub r3, r3, r0 ");			// m->iHoldingThread - this
sl@0
  1354
	asm("cmp r3, #1 ");
sl@0
  1355
	asm("bhi 4f ");					// if m->iHoldingThread != this or this+1, skip
sl@0
  1356
	asm("orr r3, r0, #1 ");			// if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
sl@0
  1357
	STREX(12,3,2);
sl@0
  1358
	asm("teq r12, #0 ");
sl@0
  1359
	asm("bne 3b ");
sl@0
  1360
	asm("mov r0, #1 ");				// return TRUE
sl@0
  1361
	__JUMP(,lr);
sl@0
  1362
sl@0
  1363
	asm("4: ");
sl@0
  1364
	asm("mov r3, #0 ");				// already released, so set iHeldFastMutex=0
sl@0
  1365
	asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1366
	asm("0: ");
sl@0
  1367
	asm("mov r0, #0 ");				// no mutex - return FALSE
sl@0
  1368
	__JUMP(,lr);
sl@0
  1369
sl@0
  1370
	// mutex being acquired or has been acquired
sl@0
  1371
	// if it has been acquired set the contention flag and return TRUE, else return FALSE
sl@0
  1372
	asm("2: ");
sl@0
  1373
	LDREX(3,2);						// r3 = m->iHoldingThread
sl@0
  1374
	asm("sub r3, r3, r0 ");			// m->iHoldingThread - this
sl@0
  1375
	asm("cmp r3, #1 ");
sl@0
  1376
	asm("bhi 0b ");					// if m->iHoldingThread != this or this+1, finish and return FALSE
sl@0
  1377
	asm("orr r3, r0, #1 ");			// if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
sl@0
  1378
	STREX(12,3,2);
sl@0
  1379
	asm("teq r12, #0 ");
sl@0
  1380
	asm("bne 2b ");
sl@0
  1381
	asm("mov r0, #1 ");				// return TRUE
sl@0
  1382
	__JUMP(,lr);
sl@0
  1383
sl@0
  1384
	asm("4: ");
sl@0
  1385
	asm("mov r3, #0 ");				// already released, so set iHeldFastMutex=0
sl@0
  1386
	asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
sl@0
  1387
	asm("mov r0, #0 ");				// no mutex - return FALSE
sl@0
  1388
	__JUMP(,lr);
sl@0
  1389
	}
sl@0
  1390
sl@0
  1391
sl@0
  1392
/******************************************************************************
sl@0
  1393
 * IDFC/DFC
sl@0
  1394
 ******************************************************************************/
sl@0
  1395
sl@0
  1396
/**	Transition the state of an IDFC or DFC when Add() is called
sl@0
  1397
sl@0
  1398
	0000->008n, 00Cn->00En, all other states unchanged
sl@0
  1399
	Return original state.
sl@0
  1400
sl@0
  1401
	Enter and return with interrupts disabled.
sl@0
  1402
*/
sl@0
  1403
__NAKED__ TUint32 TDfc::AddStateChange()
sl@0
  1404
	{
sl@0
  1405
	GET_RWNO_TID(, r1);				// r1->SubScheduler
sl@0
  1406
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1407
	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
sl@0
  1408
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1409
	asm("1: ");
sl@0
  1410
	LDREXH(0,3);
sl@0
  1411
	asm("cmp r0, #0 ");				// original state 0000 ?
sl@0
  1412
	asm("orreq r2, r1, #0x0080 ");	// yes -> 008n
sl@0
  1413
	asm("movne r2, r0 ");			// no -> R2=original state ...
sl@0
  1414
	asm("eorne r12, r0, #0x00C0 ");	// ... and R12=original state^00C0 ...
sl@0
  1415
	asm("cmpne r12, #0x0020 ");		// ... and check if result < 0020 (i.e. original==00C0..00DF)
sl@0
  1416
	asm("addlo r2, r2, #0x0020 ");	// 00Cn->00En otherwise leave R2 alone
sl@0
  1417
	STREXH(12,2,3);
sl@0
  1418
	asm("cmp r12, #0 ");
sl@0
  1419
	asm("bne 1b ");
sl@0
  1420
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1421
	__JUMP(,lr);
sl@0
  1422
	}
sl@0
  1423
sl@0
  1424
/**	Transition the state of an IDFC just before running it.
sl@0
  1425
sl@0
  1426
	002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000
sl@0
  1427
	other initial states invalid
sl@0
  1428
	Return original state
sl@0
  1429
sl@0
  1430
	Enter and return with interrupts disabled.
sl@0
  1431
*/
sl@0
  1432
__NAKED__ TUint32 TDfc::RunIDFCStateChange()
sl@0
  1433
	{
sl@0
  1434
	GET_RWNO_TID(, r1);				// r1->SubScheduler
sl@0
  1435
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1436
	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
sl@0
  1437
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1438
#ifdef _DEBUG
sl@0
  1439
	asm("str r4, [sp, #-4]! ");
sl@0
  1440
	asm("ldr r4, __IdleGeneration ");
sl@0
  1441
	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
sl@0
  1442
	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
sl@0
  1443
#endif
sl@0
  1444
	asm("1: ");
sl@0
  1445
	LDREXH(0,3);
sl@0
  1446
	asm("eor r2, r0, #0x0080 ");
sl@0
  1447
	asm("cmp r2, #0x0040 ");
sl@0
  1448
	asm("bhs 2f ");					// branch out unless 008n or 00An
sl@0
  1449
#ifdef _DEBUG
sl@0
  1450
	asm("and r2, r0, #0x001F ");
sl@0
  1451
	asm("cmp r2, r1 ");
sl@0
  1452
	asm("bne 0f ");					// if n!=current CPU number, die
sl@0
  1453
#endif
sl@0
  1454
	asm("orr r2, r1, #0x00C0 ");	// 008n->00Cn, 00An->00Cn
sl@0
  1455
	asm("3: ");
sl@0
  1456
	STREXH(12,2,3);
sl@0
  1457
	asm("cmp r12, #0 ");
sl@0
  1458
	asm("bne 1b ");
sl@0
  1459
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1460
#ifdef _DEBUG
sl@0
  1461
	asm("ldr r4, [sp], #4 ");
sl@0
  1462
#endif
sl@0
  1463
	__JUMP(,lr);
sl@0
  1464
sl@0
  1465
	asm("2: ");
sl@0
  1466
	asm("bic r2, r0, #1 ");
sl@0
  1467
	asm("cmp r2, #0x0020 ");
sl@0
  1468
	asm("orreq r2, r1, #0x00C0 ");	// 002g->00Cn
sl@0
  1469
#ifdef _DEBUG
sl@0
  1470
	asm("bne 4f ");
sl@0
  1471
	asm("cmp r0, r4 ");
sl@0
  1472
	asm("bne 0f ");					// wrong idle state
sl@0
  1473
	asm("4: ");
sl@0
  1474
#endif
sl@0
  1475
	asm("beq 3b ");
sl@0
  1476
	asm("cmp r0, #0x0100 ");		// C=1 if XXYY or XX00, C=0 if bad state
sl@0
  1477
	asm("bic r2, r0, #0x00FF ");	// XXYY->XX00, C unchanged
sl@0
  1478
	asm("tst r0, #0x00FF ");		// C unchanged
sl@0
  1479
	asm("moveq r2, #0 ");			// XX00->0000, C unchanged
sl@0
  1480
	asm("bcs 3b ");					// branch to STREX if valid state
sl@0
  1481
sl@0
  1482
	asm("0: ");
sl@0
  1483
	__ASM_CRASH();					// bad state
sl@0
  1484
sl@0
  1485
	asm("__IdleGeneration: ");
sl@0
  1486
	asm(".word %a0 " : : "i" ((TInt)&TheScheduler.iIdleGeneration));
sl@0
  1487
	}
sl@0
  1488
sl@0
  1489
/**	Transition the state of an IDFC just after running it.
sl@0
  1490
sl@0
  1491
	First swap aS->iCurrentIDFC with 0
sl@0
  1492
	If original value != this, return 0xFFFFFFFF and don't touch *this
sl@0
  1493
	Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000
sl@0
  1494
	other initial states invalid
sl@0
  1495
	Return original state
sl@0
  1496
sl@0
  1497
	Enter and return with interrupts disabled.
sl@0
  1498
*/
sl@0
  1499
__NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/)
sl@0
  1500
	{
sl@0
  1501
	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler,iCurrentIDFC));
sl@0
  1502
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1503
	asm("1: ");
sl@0
  1504
	LDREX(2,1);
sl@0
  1505
	asm("subs r2, r2, r0 ");		// aS->iCurrentIDFC == this?
sl@0
  1506
	asm("bne 9f ");					// no - bail out immediately
sl@0
  1507
	STREX(12,2,1);					// yes - set aS->iCurrentIDFC=0
sl@0
  1508
	asm("cmp r12, #0 ");
sl@0
  1509
	asm("bne 1b ");
sl@0
  1510
sl@0
  1511
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1512
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1513
#ifdef _DEBUG
sl@0
  1514
	asm("str r4, [sp, #-4]! ");
sl@0
  1515
	GET_RWNO_TID(, r4);				// r4->SubScheduler
sl@0
  1516
	asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r4 = current CPU number
sl@0
  1517
#endif
sl@0
  1518
	asm("2: ");
sl@0
  1519
	LDREXH(0,3);					// r0 = original DFC state
sl@0
  1520
	asm("mov r2, #0 ");				// r2 = 0 to begin with
sl@0
  1521
#ifdef _DEBUG
sl@0
  1522
	asm("tst r0, #0x00FF ");
sl@0
  1523
	asm("beq 5f ");
sl@0
  1524
	asm("eor r12, r0, r4 ");		// original state ^ CPU number, should be xxC0, xxE0 or xx60
sl@0
  1525
	asm("and r12, r12, #0x00E0 ");
sl@0
  1526
	asm("cmp r12, #0x00E0 ");
sl@0
  1527
	asm("cmpne r12, #0x00C0 ");
sl@0
  1528
	asm("cmpne r12, #0x0060 ");
sl@0
  1529
	asm("beq 5f ");
sl@0
  1530
	__ASM_CRASH();					// bad state
sl@0
  1531
	asm("5: ");
sl@0
  1532
#endif
sl@0
  1533
	asm("bic r12, r0, #0x001F ");
sl@0
  1534
	asm("cmp r12, #0x00E0 ");
sl@0
  1535
	asm("bhi 4f ");					// branch out if XXYY or XX00
sl@0
  1536
	asm("subeq r2, r0, #0x0060 ");	// 00En->008n
sl@0
  1537
	asm("cmp r12, #0x0060 ");
sl@0
  1538
	asm("moveq r2, r0 ");			// 006n->006n, else R2=0
sl@0
  1539
	asm("3: ");
sl@0
  1540
	STREXH(12,2,3);
sl@0
  1541
	asm("cmp r12, #0 ");
sl@0
  1542
	asm("bne 2b ");
sl@0
  1543
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1544
#ifdef _DEBUG
sl@0
  1545
	asm("ldr r4, [sp], #4 ");
sl@0
  1546
#endif
sl@0
  1547
	__JUMP(,lr);
sl@0
  1548
sl@0
  1549
	asm("4: ");
sl@0
  1550
	asm("tst r0, #0x00FF ");
sl@0
  1551
	asm("bicne r2, r0, #0x00FF ");	// XXYY->XX00, XX00->0000
sl@0
  1552
	asm("b 3b ");
sl@0
  1553
sl@0
  1554
	asm("9: ");
sl@0
  1555
	asm("mvn r0, #0 ");				// return 0xFFFFFFFF
sl@0
  1556
	__JUMP(,lr);
sl@0
  1557
	}
sl@0
  1558
sl@0
  1559
/**	Transition the state of an IDFC just after running it.
sl@0
  1560
sl@0
  1561
	006n->002g where g = TheScheduler.iIdleGeneration
sl@0
  1562
	XX6n->XX00
sl@0
  1563
	other initial states invalid
sl@0
  1564
	Return original state
sl@0
  1565
sl@0
  1566
	Enter and return with interrupts disabled.
sl@0
  1567
*/
sl@0
  1568
__NAKED__ TUint32 TDfc::EndIDFCStateChange2()
sl@0
  1569
	{
sl@0
  1570
	asm("ldr r12, __IdleGeneration ");
sl@0
  1571
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1572
#ifdef _DEBUG
sl@0
  1573
	asm("str r4, [sp, #-4]! ");
sl@0
  1574
	GET_RWNO_TID(, r4);				// r4->SubScheduler
sl@0
  1575
	asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r4 = current CPU number
sl@0
  1576
#endif
sl@0
  1577
	asm("ldrb r1, [r12] ");			// r1 = TheScheduler.iIdleGeneration
sl@0
  1578
	asm("1: ");
sl@0
  1579
	LDREXH(0,3);
sl@0
  1580
#ifdef _DEBUG
sl@0
  1581
	asm("eor r12, r0, r4 ");
sl@0
  1582
	asm("and r12, r12, #0x00FF ");
sl@0
  1583
	asm("cmp r12, #0x0060 ");		// should be 006n or XX6n
sl@0
  1584
	asm("beq 2f ");
sl@0
  1585
	__ASM_CRASH();					// if not, die
sl@0
  1586
	asm("2: ");
sl@0
  1587
#endif
sl@0
  1588
	asm("tst r0, #0xFF00 ");		// XX6n or 006n ?
sl@0
  1589
	asm("orreq r2, r1, #0x0020 ");	// 006n->002g
sl@0
  1590
	asm("bicne r2, r0, #0x00FF ");	// XX6n->XX00
sl@0
  1591
	STREXH(12,2,3);
sl@0
  1592
	asm("cmp r12, #0 ");
sl@0
  1593
	asm("bne 1b ");
sl@0
  1594
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1595
#ifdef _DEBUG
sl@0
  1596
	asm("ldr r4, [sp], #4 ");
sl@0
  1597
#endif
sl@0
  1598
	__JUMP(,lr);
sl@0
  1599
	}
sl@0
  1600
sl@0
  1601
/**	Transition the state of a DFC just before moving it from the IDFC queue to
sl@0
  1602
	its final queue.
sl@0
  1603
sl@0
  1604
	002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000
sl@0
  1605
	other initial states invalid
sl@0
  1606
	Return original state
sl@0
  1607
*/
sl@0
  1608
__NAKED__ TUint32 TDfc::MoveToFinalQStateChange()
sl@0
  1609
	{
sl@0
  1610
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1611
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1612
#ifdef _DEBUG
sl@0
  1613
	asm("str r4, [sp, #-4]! ");
sl@0
  1614
	asm("ldr r4, __IdleGeneration ");
sl@0
  1615
	GET_RWNO_TID(, r1);				// r1->SubScheduler
sl@0
  1616
	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
sl@0
  1617
	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
sl@0
  1618
	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
sl@0
  1619
	asm("orr r1, r1, #0x0080 ");
sl@0
  1620
#endif
sl@0
  1621
	asm("1: ");
sl@0
  1622
	LDREXH(0,3);
sl@0
  1623
#ifdef _DEBUG
sl@0
  1624
	asm("cmp r0, #0 ");
sl@0
  1625
	asm("beq 0f ");					// 0000 -> die
sl@0
  1626
	asm("ands r2, r0, #0x00FF ");
sl@0
  1627
	asm("beq 3f ");					// XX00 -> OK
sl@0
  1628
	asm("cmp r2, r4 ");				// 002g ?
sl@0
  1629
	asm("beq 3f ");					// yes -> OK
sl@0
  1630
	asm("cmp r2, r1 ");				// 008n ?
sl@0
  1631
	asm("beq 3f ");					// yes -> OK
sl@0
  1632
	asm("0: ");
sl@0
  1633
	__ASM_CRASH();					// otherwise die
sl@0
  1634
	asm("3: ");
sl@0
  1635
#endif
sl@0
  1636
	asm("bics r2, r0, #0x00FF ");	// XXYY->XX00
sl@0
  1637
	asm("moveq r2, #0x0001 ");		// 002g,008n->0001
sl@0
  1638
	asm("beq 2f ");
sl@0
  1639
	asm("tst r0, #0x00FF ");
sl@0
  1640
	asm("moveq r2, #0 ");			// XX00->0000
sl@0
  1641
	asm("2: ");
sl@0
  1642
	STREXH(12,2,3);
sl@0
  1643
	asm("cmp r12, #0 ");
sl@0
  1644
	asm("bne 1b ");
sl@0
  1645
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1646
#ifdef _DEBUG
sl@0
  1647
	asm("ldr r4, [sp], #4 ");
sl@0
  1648
#endif
sl@0
  1649
	__JUMP(,lr);
sl@0
  1650
	}
sl@0
  1651
sl@0
  1652
/**	Transition the state of an IDFC when transferring it to another CPU
sl@0
  1653
sl@0
  1654
	002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000
sl@0
  1655
	other initial states invalid
sl@0
  1656
	Return original state
sl@0
  1657
sl@0
  1658
	Enter and return with interrupts disabled and target CPU's ExIDfcLock held.
sl@0
  1659
*/
sl@0
  1660
__NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/)
sl@0
  1661
	{
sl@0
  1662
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1663
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1664
#ifdef _DEBUG
sl@0
  1665
	asm("stmfd sp!, {r4-r5} ");
sl@0
  1666
	asm("ldr r4, __IdleGeneration ");
sl@0
  1667
	GET_RWNO_TID(, r5);				// r5->SubScheduler
sl@0
  1668
	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
sl@0
  1669
	asm("ldr r5, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r5 = current CPU number
sl@0
  1670
	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
sl@0
  1671
	asm("orr r5, r5, #0x0080 ");
sl@0
  1672
#endif
sl@0
  1673
	asm("1: ");
sl@0
  1674
	LDREXH(0,3);
sl@0
  1675
#ifdef _DEBUG
sl@0
  1676
	asm("cmp r0, #0 ");
sl@0
  1677
	asm("beq 0f ");					// 0000 -> die
sl@0
  1678
	asm("ands r2, r0, #0x00FF ");
sl@0
  1679
	asm("beq 3f ");					// XX00 -> OK
sl@0
  1680
	asm("cmp r2, r4 ");				// 002g ?
sl@0
  1681
	asm("beq 3f ");					// yes -> OK
sl@0
  1682
	asm("cmp r2, r5 ");				// 008n ?
sl@0
  1683
	asm("beq 3f ");					// yes -> OK
sl@0
  1684
	asm("0: ");
sl@0
  1685
	__ASM_CRASH();					// otherwise die
sl@0
  1686
	asm("3: ");
sl@0
  1687
#endif
sl@0
  1688
	asm("bics r2, r0, #0x00FF ");	// XXYY->XX00
sl@0
  1689
	asm("orreq r2, r1, #0x00A0 ");	// 002g,008n->00Am
sl@0
  1690
	asm("beq 2f ");
sl@0
  1691
	asm("tst r0, #0x00FF ");
sl@0
  1692
	asm("moveq r2, #0 ");			// XX00->0000
sl@0
  1693
	asm("2: ");
sl@0
  1694
	STREXH(12,2,3);
sl@0
  1695
	asm("cmp r12, #0 ");
sl@0
  1696
	asm("bne 1b ");
sl@0
  1697
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1698
#ifdef _DEBUG
sl@0
  1699
	asm("ldmfd sp!, {r4-r5} ");
sl@0
  1700
#endif
sl@0
  1701
	__JUMP(,lr);
sl@0
  1702
	}
sl@0
  1703
sl@0
  1704
/**	Transition the state of an IDFC/DFC just before cancelling it.
sl@0
  1705
sl@0
  1706
	0000->0000, XX00->ZZ00, xxYY->zzYY
sl@0
  1707
	Return original state
sl@0
  1708
sl@0
  1709
	Enter and return with interrupts disabled.
sl@0
  1710
*/
sl@0
  1711
__NAKED__ TUint32 TDfc::CancelInitialStateChange()
sl@0
  1712
	{
sl@0
  1713
	GET_RWNO_TID(,r1);
sl@0
  1714
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1715
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1716
	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuMask));	// r1 = mask of current CPU number
sl@0
  1717
sl@0
  1718
	asm("1: ");
sl@0
  1719
	LDREXH(0,3);
sl@0
  1720
	asm("cmp r0, #0 ");
sl@0
  1721
	asm("beq 2f ");				// if original state 0000 leave alone
sl@0
  1722
	asm("orr r2, r0, r1, lsl #8 ");	// else set bit 8-15 corresponding to CPU number
sl@0
  1723
	STREXH(12,2,3);
sl@0
  1724
	asm("cmp r12, #0 ");
sl@0
  1725
	asm("bne 1b ");
sl@0
  1726
	asm("2: ");
sl@0
  1727
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1728
	__JUMP(,lr);
sl@0
  1729
	}
sl@0
  1730
sl@0
  1731
/**	Transition the state of an IDFC/DFC at the end of a cancel operation
sl@0
  1732
sl@0
  1733
	XXYY->XX00, XX00->0000
sl@0
  1734
	Return original state
sl@0
  1735
sl@0
  1736
	Enter and return with interrupts disabled.
sl@0
  1737
*/
sl@0
  1738
__NAKED__ TUint32 TDfc::CancelFinalStateChange()
sl@0
  1739
	{
sl@0
  1740
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1741
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1742
sl@0
  1743
	asm("1: ");
sl@0
  1744
	LDREXH(0,3);
sl@0
  1745
	asm("tst r0, #0x00FF ");
sl@0
  1746
	asm("bicne r2, r0, #0x00FF ");	// XXYY->XX00
sl@0
  1747
	asm("moveq r2, #0 ");			// xx00->0000
sl@0
  1748
	STREXH(12,2,3);
sl@0
  1749
	asm("cmp r12, #0 ");
sl@0
  1750
	asm("bne 1b ");
sl@0
  1751
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1752
	__JUMP(,lr);
sl@0
  1753
	}
sl@0
  1754
sl@0
  1755
/**	Transition the state of an IDFC or DFC when QueueOnIdle() is called
sl@0
  1756
sl@0
  1757
	0000->002g where g = TheScheduler.iIdleGeneration,
sl@0
  1758
	00Cn->006n, all other states unchanged
sl@0
  1759
	Return original state.
sl@0
  1760
sl@0
  1761
	Enter and return with interrupts disabled and IdleSpinLock held.
sl@0
  1762
*/
sl@0
  1763
__NAKED__ TUint32 TDfc::QueueOnIdleStateChange()
sl@0
  1764
	{
sl@0
  1765
	asm("ldr r12, __IdleGeneration ");
sl@0
  1766
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1767
	asm("ldrb r1, [r12] ");			// r1 = TheScheduler.iIdleGeneration
sl@0
  1768
	__DATA_MEMORY_BARRIER_Z__(r12);
sl@0
  1769
	asm("1: ");
sl@0
  1770
	LDREXH(0,3);
sl@0
  1771
	asm("cmp r0, #0 ");				// original state 0000 ?
sl@0
  1772
	asm("orreq r2, r1, #0x0020 ");	// yes -> 002g
sl@0
  1773
	asm("movne r2, r0 ");			// no -> R2=original state ...
sl@0
  1774
	asm("eorne r12, r0, #0x00C0 ");	// ... and R12=original state^00C0 ...
sl@0
  1775
	asm("cmpne r12, #0x0020 ");		// ... and check if result < 0020 (i.e. original==00C0..00DF)
sl@0
  1776
	asm("sublo r2, r2, #0x0060 ");	// 00Cn->006n otherwise leave R2 alone
sl@0
  1777
	STREXH(12,2,3);
sl@0
  1778
	asm("cmp r12, #0 ");
sl@0
  1779
	asm("bne 1b ");
sl@0
  1780
	__DATA_MEMORY_BARRIER__(r12);
sl@0
  1781
	__JUMP(,lr);
sl@0
  1782
	}
sl@0
  1783
sl@0
  1784
sl@0
  1785
__NAKED__ void TDfc::ResetState()
sl@0
  1786
	{
sl@0
  1787
	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
sl@0
  1788
	__DATA_MEMORY_BARRIER_Z__(r2);
sl@0
  1789
#ifdef _DEBUG
sl@0
  1790
	asm("1: ");
sl@0
  1791
	LDREXH(0,3);
sl@0
  1792
	asm("cmp r0, #0 ");
sl@0
  1793
	asm("beq 0f ");				// if state already zero, die
sl@0
  1794
	STREXH(12,2,3);
sl@0
  1795
	asm("cmp r12, #0 ");
sl@0
  1796
	asm("bne 1b ");
sl@0
  1797
#else
sl@0
  1798
	asm("strh r2, [r3] ");		// __e32_atomic_store_rel16(&iDfcState, 0)
sl@0
  1799
#endif
sl@0
  1800
	__JUMP(,lr);
sl@0
  1801
#ifdef _DEBUG
sl@0
  1802
	asm("0: ");
sl@0
  1803
	__ASM_CRASH();
sl@0
  1804
#endif
sl@0
  1805
	}
sl@0
  1806
sl@0
  1807
sl@0
  1808