os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncthrd.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\nkernsmp\arm\ncthrd.cia
    15 // 
    16 //
    17 
    18 #define __INCLUDE_NTHREADBASE_DEFINES__
    19 
    20 #include <arm.h>
    21 #include <e32cia.h>
    22 
    23 #undef	iDfcState
    24 #define	iDfcState		i8816.iHState16
    25 
    26 extern "C" void send_accumulated_resched_ipis();
    27 
    28 /******************************************************************************
    29  * Thread
    30  ******************************************************************************/
    31 extern "C" __NAKED__ void __StartThread()
    32 	{
    33 	// On entry:
    34 	//		R0->TSubScheduler, R1=0, R2=1, R3->current thread
    35 	//		R12=resched IPIs
    36 	// Interrupts disabled
    37 
    38 	// need to send any outstanding reschedule IPIs
    39 	asm("cmp	r12, #0 ");
    40 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
    41 #ifdef __USER_MEMORY_GUARDS_ENABLED__
    42 	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack, iCPSR));
    43 	asm("tst	r0, #0x0f ");
    44 	asm("bne	2f ");
    45 	USER_MEMORY_GUARD_OFF(,r0,r0);
    46 	asm("2:		");
    47 #endif
    48 	asm("ldmia	sp, {r0-r14}^ ");			// load initial values for R0-R12, R13_usr, R14_usr
    49 	asm("nop	");							// don't access banked register immediately after
    50 	asm("add	sp, sp, #64 ");				// point to saved PC, CPSR (skip iExcCode)
    51 	asm("adr	lr, 1f ");					// set lr_svc in case thread returns
    52 	RFEIAW(13);								// restore PC and CPSR - jump to thread entry point
    53 
    54 	asm("1:		");
    55 	asm("b "	CSM_ZN5NKern4ExitEv);		// if control returns, call NKern::Exit()
    56 	}
    57 
    58 
    59 extern "C" __NAKED__ TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/)
    60 	{
    61 	asm("adr	r1, 9f ");
    62 	asm("mov	r3, r0 ");
    63 	asm("mvn	r0, #0 ");
    64 	asm("1:		");
    65 	asm("ldr	r2, [r1], #4 ");
    66 	asm("add	r0, r0, #1 ");
    67 	asm("cmp	r2, r3 ");
    68 	asm("beq	2f ");
    69 	asm("cmp	r2, #0 ");
    70 	asm("bne	1b ");
    71 	asm("mvn	r0, #0 ");
    72 	asm("2:		");
    73 	__JUMP(,	lr);
    74 
    75 	asm("9:		");
    76 	asm(".word " CSM_CFUNC(__StartThread));
    77 	asm(".word	nkern_unlock_resched_return ");
    78 	asm(".word	nkern_preemption_point_resched_return ");
    79 	asm(".word	nkern_wfar_resched_return ");
    80 	asm(".word	irq_resched_return ");
    81 	asm(".word	exec_wfar_resched_return ");
    82 	asm(".word	0 ");
    83 	}
    84 
    85 
    86 /**	Mark the beginning of an event handler tied to a thread or thread group
    87 
    88 	Return the number of the CPU on which the event handler should run
    89 */
    90 __NAKED__ TInt NSchedulable::BeginTiedEvent()
    91 	{
    92 	asm("add r1, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
    93 	asm("1: ");
    94 	LDREX(0,1);						// r0 = original value of iEventState
    95 	asm("add r2, r0, #%a0" : : "i" ((TInt)EEventCountInc));
    96 	STREX(3,2,1);
    97 	asm("cmp r3, #0 ");
    98 	asm("bne 1b ");
    99 	__DATA_MEMORY_BARRIER__(r3);
   100 	asm("tst r0, #%a0" : : "i" ((TInt)EEventParent));
   101 	asm("ldrne r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
   102 	asm("beq bte0 ");				// EEventParent not set so don't look at group
   103 	asm("cmp r2, #0 ");
   104 	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   105 	asm("beq bte_bad ");
   106 	asm("cmp r2, r1 ");
   107 	asm("beq bte2 ");				// parent not yet updated, use iNewParent
   108 	asm("bte1: ");
   109 	LDREX(0,2);						// r0 = original value of iEventState
   110 	asm("add r3, r0, #%a0" : : "i" ((TInt)EEventCountInc));
   111 	STREX(12,3,2);
   112 	asm("cmp r12, #0 ");
   113 	asm("bne 1b ");
   114 	__DATA_MEMORY_BARRIER__(r12);
   115 	asm("bte0: ");
   116 	asm("and r0, r0, #%a0" : : "i" ((TInt)EEventCpuMask));
   117 	__JUMP(,lr);					// return event CPU
   118 
   119 	asm("bte2: ");
   120 	__DATA_MEMORY_BARRIER__(r3);	// make sure iNewParent is read after iParent
   121 	asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
   122 	asm("cmp r2, #0 ");
   123 	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   124 	asm("bne bte1 ");				// iNewParent set so OK
   125 	__DATA_MEMORY_BARRIER__(r3);	// make sure iParent is read after iNewParent
   126 	asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
   127 	asm("cmp r2, #0 ");
   128 	asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   129 	asm("cmp r2, r1 ");
   130 	asm("bne bte1 ");				// iParent now set so OK, otherwise something is wrong
   131 
   132 	asm("bte_bad: ");
   133 	__ASM_CRASH();
   134 	}
   135 
   136 
   137 /**	Mark the end of an event handler tied to a thread or thread group
   138 
   139 */
   140 __NAKED__ void NSchedulable::EndTiedEvent()
   141 	{
   142 	__DATA_MEMORY_BARRIER_Z__(r12);
   143 	asm("ldr r1, [r0, #%a0]!" : : "i" _FOFF(NSchedulable, iEventState));
   144 	asm("tst r1, #%a0" : : "i" ((TInt)EEventParent));
   145 	asm("bne etep0 ");				// branch out if EEventParent set
   146 
   147 	// r0->NSchedulable::iEventState
   148 	asm("ete1: ");
   149 	LDREX(1,0);
   150 	asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc));	// decrement event count
   151 	asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc));		// check if now zero
   152 	asm("biccc r1, r1, #0xFF ");	// if so, mask event CPU ...
   153 	asm("andcc r2, r1, #0x1F00 ");	// ... and r2 = thread CPU << 8 ...
   154 	asm("orrcc r1, r1, r2, lsr #8 ");	// ... and event CPU = thread CPU
   155 	STREX(12,1,0);
   156 	asm("teq r12, #0 ");			// test for success, leave carry alone
   157 	asm("bne ete1 ");				// retry if STREX failed
   158 	asm("bcs ete2 ");				// if not last tied event, finish
   159 	asm("tst r1, #%a0" : : "i" ((TInt)EDeferredReady));
   160 	asm("addne r0, r0, #%a0" : : "i" (_FOFF(NSchedulable,i_IDfcMem) - _FOFF(NSchedulable,iEventState)));
   161 	asm("bne " CSM_ZN4TDfc3AddEv );	// if deferred ready, add IDFC to action it
   162 	asm("ete2: ");					// ready not deferred so finish
   163 	__JUMP(,lr);
   164 
   165 	asm("etep0: ");
   166 	__DATA_MEMORY_BARRIER__(r12);	// make sure iParent is read after seeing parent flag set
   167 	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
   168 	asm("cmp r3, #0 ");
   169 	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   170 	asm("beq ete_bad ");			// no parent - shouldn't happen
   171 	asm("cmp r3, r0 ");				// parent == this ?
   172 	asm("beq etep1 ");				// if so, parent not yet updated so use iNewParent
   173 
   174 	asm("etep2: ");
   175 	asm("stmfd sp!, {r0,lr} ");		// save this and return address
   176 	asm("mov r0, r3 ");				// operate on parent
   177 	asm("bl ete1 ");				// update parent state
   178 	asm("ldmfd sp!, {r0,lr} ");
   179 	asm("1: ");
   180 	LDREX(1,0);
   181 	asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc));	// decrement event count
   182 	STREX(12,1,0);
   183 	asm("cmp r12, #0 ");
   184 	asm("bne 1b ");
   185 	__JUMP(,lr);
   186 
   187 	asm("etep1: ");
   188 	__DATA_MEMORY_BARRIER__(r12);	// make sure iNewParent is read after iParent
   189 	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
   190 	asm("cmp r3, #0 ");
   191 	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   192 	asm("bne etep2 ");				// iNewParent set so OK
   193 	__DATA_MEMORY_BARRIER__(r12);	// make sure iParent is read after iNewParent
   194 	asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
   195 	asm("cmp r3, #0 ");
   196 	asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   197 	asm("cmp r3, r0 ");
   198 	asm("bne etep2 ");				// iParent now set so OK, otherwise something is wrong
   199 
   200 	asm("ete_bad: ");
   201 	__ASM_CRASH();
   202 	}
   203 
   204 
   205 /**	Check for concurrent tied events when a thread/group becomes ready
   206 
   207 	This is only ever called on a lone thread or a group, not on a thread
   208 	which is part of a group.
   209 
   210 	Update the thread CPU field in iEventState
   211 	If thread CPU != event CPU and event count nonzero, atomically
   212 	set the ready deferred flag and return TRUE, else return FALSE.
   213 	If event count zero, set event CPU = thread CPU atomically.
   214 
   215 	@param aCpu the CPU on which the thread/group is to become ready
   216 	@return	TRUE if the ready must be deferred.
   217 */
   218 __NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt /*aCpu*/)
   219 	{
   220 	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   221 	asm("1: ");
   222 	LDREX(2,0);						// r2 = original iEventState
   223 	asm("bic r3, r2, #0x1F00 ");	// r3 = original iEventState with thread CPU zeroed out
   224 	asm("orr r3, r3, r1, lsl #8 ");	// set thread CPU field = aCpu
   225 	asm("cmp r3, #%a0" : : "i" ((TInt)EEventCountInc));
   226 	asm("bhs 2f ");					// branch if event count nonzero
   227 	asm("bic r3, r3, #0xFF ");		// else mask event CPU ...
   228 	asm("orr r3, r3, r1 ");			// ... and set event CPU = thread CPU = aCpu
   229 	asm("3: ");
   230 	STREX(12,3,0);
   231 	asm("teq r12, #0 ");
   232 	asm("bne 1b ");
   233 	asm("eor r0, r2, r3 ");			// r0 = old event state ^ new event state
   234 	asm("and r0, r0, #%a0" : : "i" ((TInt)EDeferredReady));
   235 	__JUMP(,lr);					// return TRUE if we just set EDeferredReady
   236 
   237 	// event count is nonzero
   238 	asm("2: ");
   239 	asm("eor r12, r3, r3, lsr #8 ");	// r12 bottom 5 bits = thread CPU ^ event CPU
   240 	asm("tst r12, #0x1F ");				// thread CPU == event CPU?
   241 	asm("orrne r3, r3, #%a0" : : "i" ((TInt)EDeferredReady));	// if not, set EDeferredReady
   242 	asm("b 3b ");
   243 	}
   244 
   245 
   246 /**	Check for concurrent tied events when a thread leaves a group
   247 
   248 	If event count zero, atomically	set the event and thread CPUs to the
   249 	current CPU, clear the parent flag and return TRUE, else return FALSE.
   250 
   251 	@return	TRUE if the parent flag has been cleared
   252 	@pre	Preemption disabled
   253 */
   254 __NAKED__ TBool NThreadBase::TiedEventLeaveInterlock()
   255 	{
   256 	GET_RWNO_TID(, r1);					// R1->SubScheduler
   257 	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
   258 	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   259 	asm("orr r1, r1, r1, lsl #8 ");		// event CPU = thread CPU = this CPU, EDeferredReady, EEventParent clear
   260 	asm("1: ");
   261 	LDREX(2,0);
   262 	asm("cmp r2, #%a0" : : "i" ((TInt)EEventCountInc));		// check if event count zero
   263 	asm("bhs 0f ");						// if not, finish and return FALSE
   264 	STREX(3,1,0);						// else update CPUs and clear parent flag
   265 								// NOTE: Deferred ready flag must have been clear since thread is running
   266 	asm("cmp r3, #0 ");
   267 	asm("bne 1b ");
   268 	__JUMP(,lr);				// return TRUE (assumes this!=0)
   269 	asm("0:");
   270 	asm("mov r0, #0 ");
   271 	__JUMP(,lr);				// return FALSE
   272 	}
   273 
   274 
   275 /**	Check for concurrent tied events when a thread joins a group
   276 
   277 	If event count zero, atomically	set the parent flag and return TRUE,
   278 	else return FALSE.
   279 
   280 	@return	TRUE if the parent flag has been set
   281 	@pre	Preemption disabled
   282 */
   283 __NAKED__ TBool NThreadBase::TiedEventJoinInterlock()
   284 	{
   285 	asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
   286 	asm("1: ");
   287 	LDREX(1,0);
   288 	asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc));		// check if event count zero
   289 	asm("bhs 0f ");						// if not, finish and return FALSE
   290 	asm("orr r2, r1, #%a0" : : "i" ((TInt)EEventParent));	// else set parent flag
   291 	STREX(3,2,0);
   292 	asm("cmp r3, #0 ");
   293 	asm("bne 1b ");
   294 	__JUMP(,lr);				// return TRUE (assumes this!=0)
   295 	asm("0:");
   296 	asm("mov r0, #0 ");
   297 	__JUMP(,lr);				// return FALSE
   298 	}
   299 
   300 
   301 #ifdef __FAST_SEM_MACHINE_CODED__
   302 /******************************************************************************
   303  * Fast semaphore
   304  ******************************************************************************/
   305 
   306 /** Waits on a fast semaphore.
   307 
   308     Decrements the signal count for the semaphore and
   309 	removes the calling thread from the ready-list if the semaphore becomes
   310 	unsignalled. Only the thread that owns a fast semaphore can wait on it.
   311 	
   312 	Note that this function does not block, it merely updates the NThread state,
   313 	rescheduling will only occur when the kernel is unlocked. Generally threads
   314 	would use NKern::FSWait() which manipulates the kernel lock for you.
   315 
   316 	@pre The calling thread must own the semaphore.
   317 	@pre No fast mutex can be held.
   318 	@pre Kernel must be locked.
   319 	
   320 	@post Kernel is locked.
   321 	
   322 	@see NFastSemaphore::Signal()
   323 	@see NKern::FSWait()
   324 	@see NKern::Unlock()
   325  */
   326 EXPORT_C __NAKED__ void NFastSemaphore::Wait()
   327 	{
   328 	ASM_DEBUG1(FSWait,r0);
   329 
   330 	GET_RWNO_TID(,r1);
   331 	asm("stmfd	sp!, {r4-r7} ");
   332 	asm("ldr	r6, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
   333 	asm("mov	r3, r0 ");
   334 	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
   335 	asm("add	r7, r6, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
   336 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
   337 	asm("1:		");
   338 	LDREXD(		4,7);
   339 	STREXD(		12,2,7);
   340 	asm("cmp	r12, #0 ");
   341 	asm("bne	1b ");
   342 	asm("str	r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
   343 	asm("cmp	r4, #0 ");
   344 	asm("bne	0f ");
   345 
   346 	__DATA_MEMORY_BARRIER__(r12);
   347 	asm("1:		");
   348 	LDREX(		2,0);					// count
   349 	asm("mov	r5, r6, lsr #2 ");		// thread>>2
   350 	asm("orr	r5, r5, #0x80000000 ");
   351 	asm("subs	r4, r2, #1 ");
   352 	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
   353 	STREX(		12,4,0);
   354 	asm("teq	r12, #0 ");
   355 	asm("bne	1b ");
   356 	__DATA_MEMORY_BARRIER__(r12);
   357 
   358 	asm("cmp	r2, #0 ");				// original count zero ?
   359 	asm("bne	2f ");					// if yes, don't need to wait
   360 	asm("mov	r2, #1 ");
   361 	asm("strb	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// else we need to reschedule
   362 	asm("ldmfd	sp!, {r4-r7} ");
   363 	__JUMP(,	lr);
   364 
   365 	asm("2:		");
   366 	asm("mov	r2, #0 ");
   367 	asm("mov	r3, #0 ");
   368 	asm("1:		");
   369 	LDREXD(		4,7);
   370 	STREXD(		12,2,7);
   371 	asm("cmp	r12, #0 ");
   372 	asm("bne	1b ");
   373 	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
   374 	asm("bne	0f ");
   375 	asm("ldmfd	sp!, {r4-r7} ");
   376 	__JUMP(,	lr);
   377 
   378 	asm("0:		");
   379 	__ASM_CRASH();
   380 	}
   381 
   382 
   383 /** Waits on a fast semaphore.
   384 
   385     Decrements the signal count for the semaphore
   386 	and waits for a signal if the semaphore becomes unsignalled. Only the
   387 	thread that owns a fast	semaphore can wait on it.
   388 
   389 	@param aSem The semaphore to wait on.
   390 	
   391 	@pre The calling thread must own the semaphore.
   392 	@pre No fast mutex can be held.
   393 	
   394 	@see NFastSemaphore::Wait()
   395 */
   396 EXPORT_C __NAKED__ void NKern::FSWait(NFastSemaphore* /*aSem*/)
   397 	{
   398 	ASM_DEBUG1(NKFSWait,r0);
   399 
   400 	__ASM_CLI();							// all interrupts off
   401 	GET_RWNO_TID(,r1);
   402 	asm("stmfd	sp!, {r4,r5,r11,lr} ");
   403 	asm("ldr	r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
   404 	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
   405 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
   406 	asm("mov	r3, r0 ");
   407 	asm("add	r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
   408 	asm("b		nkfswait1 ");
   409 	}
   410 
   411 
   412 /** Waits for a signal on the current thread's I/O semaphore.
   413 
   414 	@pre No fast mutex can be held.
   415 	@pre Call in a thread context.
   416 	@pre Kernel must be unlocked
   417 	@pre interrupts enabled
   418  */
   419 EXPORT_C __NAKED__ void NKern::WaitForAnyRequest()
   420 	{
   421 	ASM_DEBUG0(WFAR);
   422 
   423 	__ASM_CLI();							// all interrupts off
   424 	GET_RWNO_TID(,r1);
   425 	asm("stmfd	sp!, {r4,r5,r11,lr} ");
   426 	asm("ldr	r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
   427 	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
   428 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
   429 	asm("add	r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
   430 	asm("add	r3, r11, #%a0" : : "i" _FOFF(NThreadBase, iRequestSemaphore));
   431 
   432 	asm("nkfswait1: ");
   433 	asm("1:		");
   434 	LDREXD(		4,0);
   435 	STREXD(		12,2,0);
   436 	asm("cmp	r12, #0 ");
   437 	asm("bne	1b ");
   438 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
   439 	asm("cmp	r4, #0 ");
   440 	asm("bne	0f ");
   441 
   442 	__DATA_MEMORY_BARRIER__(r12);
   443 	asm("1:		");
   444 	LDREX(		2,3);					// count
   445 	asm("mov	r5, r11, lsr #2 ");		// thread>>2
   446 	asm("orr	r5, r5, #0x80000000 ");
   447 	asm("subs	r4, r2, #1 ");
   448 	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
   449 	STREX(		12,4,3);
   450 	asm("teq	r12, #0 ");
   451 	asm("bne	1b ");
   452 	__DATA_MEMORY_BARRIER__(r12);
   453 
   454 	asm("cmp	r2, #0 ");				// original count zero ?
   455 	asm("beq	2f ");					// if so we must wait
   456 	asm("mov	r2, #0 ");
   457 	asm("mov	r3, #0 ");
   458 	asm("1:		");
   459 	LDREXD(		4,0);
   460 	STREXD(		12,2,0);
   461 	asm("cmp	r12, #0 ");
   462 	asm("bne	1b ");
   463 	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
   464 	asm("bne	0f ");
   465 	__ASM_STI();
   466 	__POPRET("r4,r5,r11,");
   467 
   468 	asm("0:		");
   469 	__ASM_CRASH();
   470 
   471 	asm("2:		");
   472 	asm("ldmfd	sp!, {r4-r5} ");
   473 	asm("mov	r2, #1 ");
   474 	asm("str	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));			// else lock the kernel
   475 	__ASM_STI();
   476 	asm("strb	r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// and set the reschedule flag
   477 	asm("stmfd	sp!, {r0,r4-r10} ");
   478 	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );	// reschedule
   479 	asm(".global nkern_wfar_resched_return ");
   480 	asm("nkern_wfar_resched_return: ");
   481 
   482 	// need to send any outstanding reschedule IPIs
   483 	asm("cmp	r12, #0 ");
   484 	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
   485 	__ASM_STI();
   486 	__POPRET("r0,r4-r11,");
   487 
   488 	asm(".global wait_for_any_request ");
   489 	asm("wait_for_any_request: ");
   490 	asm("add	r3, r9, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
   491 	asm("mov	r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
   492 	asm("add	r7, r9, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
   493 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
   494 	asm("1:		");
   495 	LDREXD(		4,7);
   496 	STREXD(		12,2,7);
   497 	asm("cmp	r12, #0 ");
   498 	asm("bne	1b ");
   499 	asm("str	r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
   500 	asm("cmp	r4, #0 ");
   501 	asm("bne	0b ");
   502 
   503 	__DATA_MEMORY_BARRIER__(r12);
   504 	asm("1:		");
   505 	LDREX(		0,3);					// count
   506 	asm("mov	r5, r9, lsr #2 ");		// thread>>2
   507 	asm("orr	r5, r5, #0x80000000 ");
   508 	asm("subs	r4, r0, #1 ");
   509 	asm("movlt	r4, r5 ");				// if --count<0, r4=(thread>>2)|0x80000000
   510 	STREX(		12,4,3);
   511 	asm("teq	r12, #0 ");
   512 	asm("bne	1b ");
   513 	__DATA_MEMORY_BARRIER__(r12);
   514 #ifdef __RECORD_STATE__
   515 	asm("str	r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iNThreadBaseSpare6));
   516 #endif
   517 
   518 	asm("cmp	r0, #0 ");				// original count zero ?
   519 	asm("beq	exec_wfar_wait ");		// yes - must wait
   520 	asm("mov	r2, #0 ");
   521 	asm("mov	r3, #0 ");
   522 	asm("1:		");
   523 	LDREXD(		4,7);
   524 	STREXD(		12,2,7);
   525 	asm("cmp	r12, #0 ");
   526 	asm("bne	1b ");
   527 	asm("tst	r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
   528 	asm("ldreq	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));	// check for callbacks
   529 	asm("beq	exec_wfar_finish ");
   530 	asm("b		0b ");
   531 	}
   532 
   533 
   534 /** Signals a fast semaphore.
   535 
   536     Increments the signal count of a fast semaphore by
   537 	one and releases any waiting thread if the semphore becomes signalled.
   538 	
   539 	Note that a reschedule will not occur before this function returns, this will
   540 	only take place when the kernel is unlocked. Generally threads
   541 	would use NKern::FSSignal() which manipulates the kernel lock for you.
   542 	
   543 	@pre Kernel must be locked.
   544 	@pre Call either in a thread or an IDFC context.
   545 	
   546 	@post Kernel is locked.
   547 	
   548 	@see NFastSemaphore::Wait()
   549 	@see NKern::FSSignal()
   550 	@see NKern::Unlock()
   551  */
   552 EXPORT_C __NAKED__ void NFastSemaphore::Signal()
   553 	{
   554 	ASM_DEBUG1(FSSignal,r0);
   555 
   556 	asm("mov	r1, #1 ");
   557 	asm("fssignal1: ");
   558 	__DATA_MEMORY_BARRIER_Z__(r12);
   559 	asm("1:		");
   560 	LDREX(		2,0);				// count
   561 	asm("cmp	r2, #0 ");
   562 	asm("sublt	r3, r1, #1 ");		// if count<0, replace with aCount-1
   563 	asm("addges	r3, r2, r1 ");		// if count>=0, add aCount
   564 	asm("bvs	0f ");				// if overflow, leave alone
   565 	STREX(		12,3,0);
   566 	asm("teq	r12, #0 ");
   567 	asm("bne	1b ");
   568 	asm("cmp	r2, #0 ");
   569 	asm("movlt	r1, r2, lsl #2 ");	// if original count<0 r1 = original count<<2 = thread
   570 	asm("blt	fs_signal_wake ");
   571 	asm("0:		");
   572 	__JUMP(,	lr);				// else finished
   573 
   574 	asm("fs_signal_wake: ");
   575 	asm("stmfd	sp!, {r4-r6,lr} ");
   576 	asm("mov	r4, r0 ");
   577 	asm("mov	r5, r1 ");
   578 	asm("mov	r0, r1 ");
   579 	asm("bl		AcqSLock__12NSchedulable ");
   580 	asm("add	r0, r5, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
   581 	asm("mov	r1, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore));
   582 	asm("mov	r2, r4 ");
   583 	asm("mov	r3, #0 ");
   584 	asm("bl		UnBlockT__16NThreadWaitStateUiPvi ");
   585 	asm("mov	r0, r5 ");
   586 	asm("ldmfd	sp!, {r4-r6,lr} ");
   587 	asm("b		RelSLock__12NSchedulable ");
   588 	}
   589 
   590 
   591 /** Signals a fast semaphore multiple times.
   592 
   593 	@pre Kernel must be locked.
   594 	@pre Call either in a thread or an IDFC context.
   595 	
   596 	@post Kernel is locked.
   597 
   598 	@internalComponent	
   599  */
   600 EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/)
   601 	{
   602 	ASM_DEBUG2(FSSignalN,r0,r1);
   603 
   604 	asm("cmp	r1, #0 ");
   605 	asm("bgt	fssignal1 ");
   606 	__JUMP(,	lr);
   607 	}
   608 
   609 
   610 /** Signals the request semaphore of a nanothread several times.
   611 
   612 	This function is intended to be used by the EPOC layer and personality
   613 	layers.  Device drivers should use Kern::RequestComplete instead.
   614 
   615 	@param aThread Nanothread to signal.  If NULL, the current thread is signaled.
   616 	@param aCount Number of times the request semaphore must be signaled.
   617 	
   618 	@pre aCount >= 0
   619 
   620 	@see Kern::RequestComplete()
   621  */
   622 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, TInt /*aCount*/)
   623 	{
   624 	ASM_DEBUG2(NKThreadRequestSignalN,r0,r1);
   625 
   626 	asm("cmp	r1, #0 ");
   627 	asm("ble	0f ");
   628 	asm("cmp	r0, #0 ");
   629 	asm("addne	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
   630 	asm("bne	nkfssignal1 ");
   631 	__ASM_CLI();
   632 	GET_RWNO_TID(,r0);
   633 	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
   634 	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
   635 	asm("b		nkfssignal2 ");
   636 
   637 	asm("0:		");
   638 	__JUMP(eq,	lr);
   639 	__ASM_CRASH();
   640 	}
   641 
   642 
   643 /** Signals the request semaphore of a nanothread.
   644 
   645 	This function is intended to be used by the EPOC layer and personality
   646 	layers.  Device drivers should use Kern::RequestComplete instead.
   647 
   648 	@param aThread Nanothread to signal. Must be non NULL.
   649 
   650 	@see Kern::RequestComplete()
   651 
   652 	@pre Interrupts must be enabled.
   653 	@pre Do not call from an ISR
   654  */
   655 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
   656 	{
   657 	ASM_DEBUG1(NKThreadRequestSignal,r0);
   658 	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
   659 
   660 	/* fall through to FSSignal() ... */
   661 	}
   662 
   663 
   664 /** Signals a fast semaphore.
   665 
   666     Increments the signal count of a fast semaphore
   667 	by one and releases any	waiting thread if the semaphore becomes signalled.
   668 	
   669 	@param aSem The semaphore to signal.
   670 
   671 	@see NKern::FSWait()
   672 
   673 	@pre Interrupts must be enabled.
   674 	@pre Do not call from an ISR
   675  */
   676 EXPORT_C __NAKED__ void NKern::FSSignal(NFastSemaphore* /*aSem*/)
   677 	{
   678 	ASM_DEBUG1(NKFSSignal,r0);
   679 
   680 	asm("mov	r1, #1 ");
   681 	asm("nkfssignal1: ");
   682 	__ASM_CLI();
   683 	asm("nkfssignal2: ");
   684 	__DATA_MEMORY_BARRIER_Z__(r12);
   685 	asm("1:		");
   686 	LDREX(		2,0);				// count
   687 	asm("cmp	r2, #0 ");
   688 	asm("sublt	r3, r1, #1 ");		// if count<0, replace with aCount-1
   689 	asm("addges	r3, r2, r1 ");		// if count>=0, add aCount
   690 	asm("bvs	0f ");				// if overflow, leave alone
   691 	STREX(		12,3,0);
   692 	asm("teq	r12, #0 ");
   693 	asm("bne	1b ");
   694 	asm("cmp	r2, #0 ");
   695 	asm("blt	2f ");
   696 	asm("0:		");
   697 	__ASM_STI();
   698 	__JUMP(,	lr);				// else finished
   699 
   700 	asm("2:		");
   701 	GET_RWNO_TID(,r3);
   702 	asm("mov	r1, r2, lsl #2 ");	// if original count<0 r1 = original count<<2 = thread
   703 	asm("ldr	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
   704 	asm("stmfd	sp!, {r4,lr} ");
   705 	asm("add	r12, r12, #1 ");			// lock the kernel
   706 	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
   707 	__ASM_STI();
   708 	asm("bl		fs_signal_wake ");			// wake up the thread
   709 	asm("ldmfd	sp!, {r4,lr} ");
   710 	asm("b		Unlock__5NKern ");
   711 	}
   712 
   713 
   714 /** Signals a fast semaphore multiple times.
   715 
   716     Increments the signal count of a
   717 	fast semaphore by aCount and releases any waiting thread if the semphore
   718 	becomes signalled.
   719 	
   720 	@param aSem The semaphore to signal.
   721 	@param aCount The number of times to signal the semaphore.
   722 
   723 	@see NKern::FSWait()
   724 
   725 	@pre Interrupts must be enabled.
   726 	@pre Do not call from an ISR
   727  */
   728 EXPORT_C __NAKED__ void NKern::FSSignalN(NFastSemaphore* /*aSem*/, TInt /*aCount*/)
   729 	{
   730 	ASM_DEBUG2(NKFSSignalN,r0,r1);
   731 
   732 	asm("cmp	r1, #0 ");
   733 	asm("bgt	nkfssignal1 ");
   734 	__JUMP(,	lr);
   735 	}
   736 
   737 
   738 /** Cancels a wait on a fast semaphore.
   739 
   740 	@pre Kernel must be locked.
   741 	@pre Call either in a thread or an IDFC context.
   742 	
   743 	@post Kernel is locked.
   744 
   745 	@internalComponent	
   746  */
   747 __NAKED__ void NFastSemaphore::WaitCancel()
   748 	{
   749 	asm("mov	r1, #1 ");
   750 	/* Fall through ... */
   751 	}
   752 
   753 /* Fall through ... */
   754 #endif
   755 /* Fall through ... */
   756 
   757 /**	Increment a fast semaphore count
   758 
   759 	Do memory barrier
   760 	If iCount >= 0, increment by aCount and return 0
   761 	If iCount < 0, set count equal to aCount-1 and return (original count << 2)
   762 
   763 	Release semantics
   764 */
   765 __NAKED__ NThreadBase* NFastSemaphore::Inc(TInt /*aCount*/)
   766 	{
   767 	__DATA_MEMORY_BARRIER_Z__(r12);
   768 	asm("1: ");
   769 	LDREX(2,0);					// count
   770 	asm("cmp r2, #0 ");
   771 	asm("sublt r3, r1, #1 ");	// if count<0, replace with aCount-1
   772 	asm("addges r3, r2, r1 ");	// if count>=0, add aCount
   773 	asm("bvs 0f ");				// if overflow leave alone
   774 	STREX(12,3,0);
   775 	asm("teq r12, #0 ");
   776 	asm("bne 1b ");
   777 	asm("0: ");
   778 	asm("cmp r2, #0 ");
   779 	asm("movlt r0, r2, lsl #2 ");	// if original count<0, return count<<2
   780 	asm("movge r0, #0 ");			// else return 0
   781 	__JUMP(,lr);
   782 	}
   783 
   784 
   785 /**	Decrement a fast semaphore count
   786 
   787 	If count > 0, decrement
   788 	If count = 0, set equal to (thread>>2)|0x80000000
   789 	Return original count
   790 	Full barrier semantics
   791 */
   792 __NAKED__ TInt NFastSemaphore::Dec(NThreadBase*)
   793 	{
   794 	__DATA_MEMORY_BARRIER_Z__(r12);
   795 	asm("1: ");
   796 	LDREX(2,0);					// count
   797 	asm("subs r3, r2, #1 ");
   798 	asm("movlt r3, #0x80000000 ");
   799 	asm("orrlt r3, r3, r1, lsr #2 ");	// if --count<0, r3=(thread>>2)|0x80000000
   800 	STREX(12,3,0);
   801 	asm("teq r12, #0 ");
   802 	asm("bne 1b ");
   803 	__DATA_MEMORY_BARRIER__(r12);
   804 	asm("mov r0, r2 ");			// return original count
   805 	__JUMP(,lr);
   806 	}
   807 
   808 /**	Reset a fast semaphore count
   809 
   810 	Do memory barrier
   811 	If iCount >= 0, set iCount=0 and return 0
   812 	If iCount < 0, set iCount=0 and return (original count << 2)
   813 
   814 	Release semantics
   815 */
   816 __NAKED__ NThreadBase* NFastSemaphore::DoReset()
   817 	{
   818 	__DATA_MEMORY_BARRIER_Z__(r3);
   819 	asm("1: ");
   820 	LDREX(2,0);					// count
   821 	STREX(12,3,0);				// zero count
   822 	asm("teq r12, #0 ");
   823 	asm("bne 1b ");
   824 	asm("cmp r2, #0 ");
   825 	asm("movlt r0, r2, lsl #2 ");	// if original count<0, return count<<2
   826 	asm("movge r0, #0 ");			// else return 0
   827 	__JUMP(,lr);
   828 	}
   829 
   830 
   831 #ifdef __NTHREAD_WAITSTATE_MACHINE_CODED__
   832 /******************************************************************************
   833  * Thread wait state
   834  ******************************************************************************/
   835 
   836 __NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/)
   837 	{
   838 	asm("stmfd	sp!, {r4-r5} ");
   839 	asm("and	r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
   840 	asm("and	r1, r1, #0xff ");
   841 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
   842 	asm("orr	r2, r2, r1, lsl #8 ");
   843 	asm("1:		");
   844 	LDREXD(		4,0);
   845 	STREXD(		12,2,0);
   846 	asm("cmp	r12, #0 ");
   847 	asm("bne	1b ");
   848 	asm("cmp	r4, #0 ");
   849 	asm("bne	0f ");
   850 	asm("ldmfd	sp!, {r4-r5} ");
   851 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
   852 	__JUMP(,	lr);
   853 
   854 	asm("0:		");
   855 	__ASM_CRASH();
   856 	}
   857 
   858 __NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/, TUint32 /*aTimeout*/)
   859 	{
   860 	asm("stmfd	sp!, {r4-r5} ");
   861 	asm("and	r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
   862 	asm("and	r1, r1, #0xff ");
   863 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
   864 	asm("orr	r2, r2, r1, lsl #8 ");
   865 	asm("1:		");
   866 	LDREXD(		4,0);
   867 	STREXD(		12,2,0);
   868 	asm("cmp	r12, #0 ");
   869 	asm("bne	1b ");
   870 	asm("ldr	r12, [sp, #8] ");
   871 	asm("cmp	r4, #0 ");
   872 	asm("bne	0f ");
   873 	asm("ldmfd	sp!, {r4-r5} ");
   874 	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
   875 	__JUMP(,	lr);
   876 
   877 	asm("0:		");
   878 	__ASM_CRASH();
   879 	}
   880 
   881 __NAKED__ void NThreadWaitState::CancelWait()
   882 	{
   883 	asm("mov	r12, r0 ");
   884 	asm("mov	r2, #0 ");
   885 	asm("mov	r3, #0 ");
   886 	asm("1:		");
   887 	LDREXD(		0,12);
   888 	STREXD(		1,2,12);
   889 	asm("cmp	r1, #0 ");
   890 	asm("bne	1b ");
   891 	asm("tst	r0, #%a0" : : "i" ((TInt)(EWtStDead|EWtStWaitActive)));
   892 	asm("bne	0f ");
   893 	__JUMP(,	lr);
   894 
   895 	asm("0:		");
   896 	__ASM_CRASH();
   897 	}
   898 
   899 __NAKED__ TInt NThreadWaitState::DoWait()
   900 	{
   901 	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iTriggerTime));
   902 	asm("1:		");
   903 	LDREXD(		2,0);
   904 	asm("cmp	r1, #0 ");
   905 	asm("orrne	r2, r2, #%a0" : : "i" ((TInt)EWtStTimeout));
   906 	asm("tst	r2, #%a0" : : "i" ((TInt)EWtStDead));
   907 	asm("bne	0f ");
   908 	asm("tst	r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
   909 	asm("beq	9f ");
   910 	asm("bic	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
   911 	asm("orr	r2, r2, #%a0" : : "i" ((TInt)EWtStWaitActive));
   912 	STREXD(		12,2,0);
   913 	asm("cmp	r12, #0 ");
   914 	asm("bne	1b ");
   915 	asm("cmp	r1, #0 ");
   916 	asm("bne	2f ");
   917 	asm("mov	r0, r2, lsr #8 ");
   918 	__JUMP(,	lr);
   919 
   920 	asm("2:		");
   921 	asm("stmfd	sp!, {r2-r4,lr} ");
   922 	asm("mov	r4, r0 ");
   923 	asm("add	r0, r0, #%a0" : : "i" _FOFF(NThreadWaitState,iTimer));
   924 	asm("mov	r2, #1 ");
   925 	asm("bl	"	CSM_ZN6NTimer7OneShotEii );
   926 	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
   927 	asm("cmp	r0, #0 ");
   928 	asm("bne	8f ");
   929 	asm("add	r1, r1, #1 ");
   930 	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
   931 	asm("ldmfd	sp!, {r2-r4,lr} ");
   932 	asm("mov	r0, r2, lsr #8 ");
   933 	__JUMP(,	lr);
   934 
   935 	asm("0:		");
   936 	asm("mvn	r0, #%a0" : : "i" (~KErrDied));
   937 	__JUMP(,	lr);
   938 	asm("9:		");
   939 	asm("mvn	r0, #%a0" : : "i" (~KErrGeneral));
   940 	__JUMP(,	lr);
   941 	asm("8:		");
   942 	__ASM_CRASH();
   943 	}
   944 
   945 __NAKED__ TInt NThreadWaitState::UnBlockT(TUint /*aType*/, TAny* /*aWaitObj*/, TInt /*aReturnValue*/)
   946 	{
   947 	asm("stmfd	sp!, {r4-r6,lr} ");
   948 	asm("mov	r6, r2 ");					// r6 = aWaitObj
   949 	asm("mov	r2, #0 ");
   950 	__DATA_MEMORY_BARRIER__(r2);
   951 	asm("1:		");
   952 	LDREXD(		4,0);						// r5:r4 = oldws64
   953 	asm("cmp	r5, r6 ");					// does iWaitObj match?
   954 	asm("bne	2f ");						// no
   955 	asm("eor	r12, r4, r1, lsl #8 ");		// does wait type match?
   956 	asm("cmp	r12, #%a0" : : "i" ((TInt)EWtStDead));
   957 	asm("bhs	2f ");						// no
   958 	STREXD(		12,2,0);					// yes - wait matches - try to write return value
   959 	asm("cmp	r12, #0 ");					// success?
   960 	asm("bne	1b ");						// no - retry
   961 	asm("mov	r6, r0 ");
   962 	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStTimeout));
   963 	asm("blne	CancelTimerT__16NThreadWaitState ");
   964 	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStWaitActive));
   965 	asm("beq	0f ");
   966 	asm("ldr	r1, [r6, #%a0]" : : "i" (_FOFF(NThreadBase,iPauseCount)-_FOFF(NThreadBase,iWaitState)));
   967 	asm("sub	r0, r6, #%a0" : : "i" _FOFF(NThreadBase,iWaitState));	// r0 = Thread()
   968 	asm("movs	r1, r1, lsl #16 ");				// check if iPauseCount=iSuspendCount=0
   969 	asm("bleq	ReadyT__12NSchedulableUi ");	// if so, make thread ready
   970 	asm("0:		");
   971 	asm("mov	r0, #0 ");
   972 	__POPRET("	r4-r6,");					// return KErrNone
   973 
   974 	asm("2:		");
   975 	STREXD(		12,4,0);					// no matching wait - write back to check atomicity
   976 	asm("cmp	r12, #0 ");					// success?
   977 	asm("bne	1b ");						// no - retry
   978 	asm("mvn	r0, #%a0" : : "i" (~KErrGeneral));
   979 	__POPRET("	r4-r6,");					// no matching wait - return KErrGeneral
   980 	}
   981 
   982 __NAKED__ TUint32 NThreadWaitState::ReleaseT(TAny*& /*aWaitObj*/, TInt /*aReturnValue*/)
   983 	{
   984 	asm("stmfd	sp!, {r4-r5} ");
   985 	asm("mov	r3, r2 ");
   986 	asm("mov	r2, #0 ");
   987 	__DATA_MEMORY_BARRIER__(r2);
   988 	asm("1:		");
   989 	LDREXD(		4,0);
   990 	asm("and	r2, r4, #%a0" : : "i" ((TInt)EWtStDead));
   991 	STREXD(		12,2,0);
   992 	asm("cmp	r12, #0 ");
   993 	asm("bne	1b ");
   994 	__DATA_MEMORY_BARRIER__(r12);
   995 	asm("str	r5, [r1] ");
   996 	asm("tst	r4, #%a0" : : "i" ((TInt)EWtStTimeout));
   997 	asm("bne	2f ");
   998 	asm("mov	r0, r4 ");
   999 	asm("ldmfd	sp!, {r4-r5} ");
  1000 	__JUMP(,	lr);
  1001 
  1002 	asm("2:		");
  1003 	asm("mov	r5, lr ");
  1004 	asm("bl		CancelTimerT__16NThreadWaitState ");
  1005 	asm("mov	r0, r4 ");
  1006 	asm("mov	lr, r5 ");
  1007 	asm("ldmfd	sp!, {r4-r5} ");
  1008 	__JUMP(,	lr);
  1009 	}
  1010 #endif
  1011 
  1012 
  1013 #ifdef __FAST_MUTEX_MACHINE_CODED__
  1014 /******************************************************************************
  1015  * Fast mutex
  1016  ******************************************************************************/
  1017 
  1018 /** Releases a previously acquired fast mutex.
  1019 	
  1020 	Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
  1021 	for you.
  1022 	
  1023 	@pre The calling thread holds the mutex.
  1024 	@pre Kernel must be locked.
  1025 	
  1026 	@post Kernel is locked.
  1027 	
  1028 	@see NFastMutex::Wait()
  1029 	@see NKern::FMSignal()
  1030 */
  1031 EXPORT_C __NAKED__ void NFastMutex::Signal()
  1032 	{
  1033 	ASM_DEBUG1(FMSignal,r0);
  1034 #ifdef BTRACE_FAST_MUTEX
  1035 //	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
  1036 	asm("stmfd	sp!, {r0,lr} ");
  1037 	asm("mov	r1, r0 ");
  1038 	asm("ldr	r0, btrace_hdr_fmsignal ");
  1039 	asm("mov	r2, #0 ");
  1040 	asm("mov	r3, #0 ");
  1041 	asm("bl		OutX__6BTraceUlUlUlUl ");
  1042 	asm("ldmfd	sp!, {r0,lr} ");
  1043 #endif
  1044 	GET_RWNO_TID(,r3);
  1045 	asm("mov	r12, #0 ");
  1046 	__DATA_MEMORY_BARRIER__(r12);
  1047 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
  1048 	__ASM_CLI();
  1049 	asm("1:		");
  1050 	LDREX(		2,0);				// r2=aMutex->iHoldingThread
  1051 	asm("cmp	r2, r1 ");			// anyone else waiting?
  1052 	asm("mov	r2, #0 ");
  1053 	asm("bne	2f ");				// branch out if someone else waiting
  1054 	STREX(		12,2,0);			// else try to clear the holding thread
  1055 	asm("teq	r12, #0 ");
  1056 	asm("bne	1b ");
  1057 	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1058 	__ASM_STI();
  1059 	__JUMP(,lr);					// mutex released without contention
  1060 
  1061 #ifdef BTRACE_FAST_MUTEX
  1062 	asm("btrace_hdr_fmsignal: ");
  1063 	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexSignal)));
  1064 #endif
  1065 
  1066 	// there is contention
  1067 	asm("2:		");
  1068 	asm("orr	r12, r0, #1 ");
  1069 	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1070 	__ASM_STI();
  1071 	asm("b		DoSignalL__10NFastMutex ");
  1072 	}
  1073 
  1074 
  1075 /** Acquires the System Lock.
  1076 
  1077     This will block until the mutex is available, and causes
  1078 	the thread to enter an implicit critical section until the mutex is released.
  1079 
  1080 	@post System lock is held.
  1081 
  1082 	@see NKern::UnlockSystem()
  1083 	@see NKern::FMWait()
  1084 
  1085 	@pre No fast mutex can be held.
  1086 	@pre Call in a thread context.
  1087 	@pre Kernel must be unlocked
  1088 	@pre interrupts enabled
  1089 
  1090 */
  1091 EXPORT_C __NAKED__ void NKern::LockSystem()
  1092 	{
  1093 	asm("ldr	r0, __SystemLock ");
  1094 
  1095 	/* fall through to FMWait() ... */
  1096 	}
  1097 
  1098 /** Acquires a fast mutex.
  1099 
  1100     This will block until the mutex is available, and causes
  1101 	the thread to enter an implicit critical section until the mutex is released.
  1102 
  1103 	@param aMutex The fast mutex to acquire.
  1104 	
  1105 	@post The calling thread holds the mutex.
  1106 	
  1107 	@see NFastMutex::Wait()
  1108 	@see NKern::FMSignal()
  1109 
  1110 	@pre No fast mutex can be held.
  1111 	@pre Call in a thread context.
  1112 	@pre Kernel must be unlocked
  1113 	@pre interrupts enabled
  1114 
  1115 */
  1116 EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex* /*aMutex*/)
  1117 	{
  1118 	ASM_DEBUG1(NKFMWait,r0);
  1119 
  1120 	__ASM_CLI();
  1121 	GET_RWNO_TID(,r3);
  1122 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
  1123 	asm("1:		");
  1124 	LDREX(		2,0);				// r2=aMutex->iHoldingThread
  1125 	asm("cmp	r2, #0 ");			//
  1126 	asm("bne	2f ");				// branch out if mutex held
  1127 	STREX(		12,1,0);			// else try to set us as holding thread
  1128 	asm("teq	r12, #0 ");
  1129 	asm("bne	1b ");
  1130 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1131 	__DATA_MEMORY_BARRIER__(r12);
  1132 	__ASM_STI();
  1133 #ifdef BTRACE_FAST_MUTEX
  1134 //	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
  1135 	asm("mov	r1, r0 ");
  1136 	asm("ldr	r0, btrace_hdr_fmwait ");
  1137 	asm("mov	r2, #0 ");
  1138 	asm("mov	r3, #0 ");
  1139 	asm("b		OutX__6BTraceUlUlUlUl ");
  1140 #endif
  1141 	__JUMP(,lr);					// mutex acquired without contention
  1142 
  1143 	// there is contention
  1144 	asm("2:		");
  1145 	asm("mov	r2, #1 ");
  1146 	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1147 	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
  1148 	__ASM_STI();
  1149 	__DATA_MEMORY_BARRIER_Z__(r12);
  1150 	asm("stmfd	sp!, {r4,lr} ");
  1151 	asm("bl		DoWaitL__10NFastMutex ");
  1152 	asm("ldmfd	sp!, {r4,lr} ");
  1153 	asm("b		Unlock__5NKern ");
  1154 
  1155 	asm("__SystemLock: ");
  1156 	asm(".word	%a0" : : "i" ((TInt)&TheScheduler.iLock));
  1157 #ifdef BTRACE_FAST_MUTEX
  1158 	asm("btrace_hdr_fmwait: ");
  1159 	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexWait)));
  1160 #endif
  1161 	}
  1162 
  1163 
  1164 /** Releases the System Lock.
  1165 
  1166 	@pre System lock must be held.
  1167 
  1168 	@see NKern::LockSystem()
  1169 	@see NKern::FMSignal()
  1170 */
  1171 EXPORT_C __NAKED__ void NKern::UnlockSystem()
  1172 	{
  1173 	asm("ldr	r0, __SystemLock ");
  1174 
  1175 	/* fall through to FMSignal() ... */
  1176 	}
  1177 
  1178 /** Releases a previously acquired fast mutex.
  1179 	
  1180 	@param aMutex The fast mutex to release.
  1181 	
  1182 	@pre The calling thread holds the mutex.
  1183 	
  1184 	@see NFastMutex::Signal()
  1185 	@see NKern::FMWait()
  1186 */
  1187 EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex* /*aMutex*/)
  1188 	{
  1189 	ASM_DEBUG1(NKFMSignal,r0);
  1190 #ifdef BTRACE_FAST_MUTEX
  1191 //	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
  1192 	asm("stmfd	sp!, {r0,lr} ");
  1193 	asm("mov	r1, r0 ");
  1194 	asm("ldr	r0, btrace_hdr_fmsignal ");
  1195 	asm("mov	r2, #0 ");
  1196 	asm("mov	r3, #0 ");
  1197 	asm("bl		OutX__6BTraceUlUlUlUl ");
  1198 	asm("ldmfd	sp!, {r0,lr} ");
  1199 #endif
  1200 	__ASM_CLI();
  1201 	GET_RWNO_TID(,r3);
  1202 	asm("mov	r12, #0 ");
  1203 	__DATA_MEMORY_BARRIER__(r12);
  1204 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
  1205 	asm("1:		");
  1206 	LDREX(		12,0);				// r12=aMutex->iHoldingThread
  1207 	asm("mov	r2, #0 ");
  1208 	asm("cmp	r12, r1 ");			// anyone else waiting?
  1209 	asm("bne	2f ");				// branch out if someone else waiting
  1210 	STREX(		12,2,0);			// else try to clear the holding thread
  1211 	asm("teq	r12, #0 ");
  1212 	asm("bne	1b ");
  1213 	asm("str	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1214 	__ASM_STI();
  1215 	__JUMP(,lr);					// mutex released without contention
  1216 
  1217 	// there is contention
  1218 	asm("2:		");
  1219 	asm("stmfd	sp!, {r4,lr} ");
  1220 	asm("mov	r12, #1 ");
  1221 	asm("orr	r4, r0, #1 ");
  1222 	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
  1223 	asm("str	r4, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1224 	__ASM_STI();
  1225 	asm("bl		DoSignalL__10NFastMutex ");
  1226 	asm("ldmfd	sp!, {r4,lr} ");
  1227 	asm("b		Unlock__5NKern ");
  1228 	}
  1229 
  1230 
  1231 /** Temporarily releases the System Lock if there is contention.
  1232 
  1233     If there
  1234 	is another thread attempting to acquire the System lock, the calling
  1235 	thread releases the mutex and then acquires it again.
  1236 	
  1237 	This is more efficient than the equivalent code:
  1238 	
  1239 	@code
  1240 	NKern::UnlockSystem();
  1241 	NKern::LockSystem();
  1242 	@endcode
  1243 
  1244 	Note that this can only allow higher priority threads to use the System
  1245 	lock as lower priority cannot cause contention on a fast mutex.
  1246 
  1247 	@return	TRUE if the system lock was relinquished, FALSE if not.
  1248 
  1249 	@pre	System lock must be held.
  1250 
  1251 	@post	System lock is held.
  1252 
  1253 	@see NKern::LockSystem()
  1254 	@see NKern::UnlockSystem()
  1255 */
  1256 EXPORT_C __NAKED__ TBool NKern::FlashSystem()
  1257 	{
  1258 //	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
  1259 	asm("ldr	r0, __SystemLock ");
  1260 
  1261 	/* fall through to FMFlash() ... */
  1262 	}
  1263 
  1264 /** Temporarily releases a fast mutex if there is contention.
  1265 
  1266     If there is another thread attempting to acquire the mutex, the calling
  1267 	thread releases the mutex and then acquires it again.
  1268 	
  1269 	This is more efficient than the equivalent code:
  1270 	
  1271 	@code
  1272 	NKern::FMSignal();
  1273 	NKern::FMWait();
  1274 	@endcode
  1275 
  1276 	@return	TRUE if the mutex was relinquished, FALSE if not.
  1277 
  1278 	@pre	The mutex must be held.
  1279 
  1280 	@post	The mutex is held.
  1281 */
  1282 EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex* /*aM*/)
  1283 	{
  1284 	ASM_DEBUG1(NKFMFlash,r0);
  1285 	__ASM_CLI();
  1286 	GET_RWNO_TID(,r3);
  1287 	asm("ldr	r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
  1288 	asm("ldrb	r2, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iMutexPri));
  1289 	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iBasePri));
  1290 	asm("cmp	r2, r12 ");
  1291 	asm("bhs	1f ");							// a thread of greater or equal priority is waiting
  1292 	__ASM_STI();
  1293 #ifdef BTRACE_FAST_MUTEX
  1294 //	BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
  1295 	asm("mov	r1, r0 ");
  1296 	asm("ldr	r0, btrace_hdr_fmsignal ");
  1297 	asm("stmfd	sp!, {r4,lr} ");
  1298 	asm("mov	r2, #0 ");
  1299 	asm("mov	r3, #0 ");
  1300 	asm("bl		OutX__6BTraceUlUlUlUl ");
  1301 	asm("ldmfd	sp!, {r4,lr} ");
  1302 #endif
  1303 	asm("mov	r0, #0 ");
  1304 	__JUMP(,lr);								// return FALSE
  1305 
  1306 #ifdef BTRACE_FAST_MUTEX
  1307 	asm("btrace_hdr_fmflash: ");
  1308 	asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexFlash)));
  1309 #endif
  1310 
  1311 	asm("1:		");
  1312 	asm("mov	r12, #1 ");
  1313 	asm("str	r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
  1314 	__ASM_STI();
  1315 	asm("stmfd	sp!, {r4,lr} ");
  1316 	asm("mov	r4, r0 ");
  1317 	asm("bl		Signal__10NFastMutex ");
  1318 	asm("bl		PreemptionPoint__5NKern ");
  1319 	asm("mov	r0, r4 ");
  1320 	asm("bl		Wait__10NFastMutex ");
  1321 	asm("bl		Unlock__5NKern ");
  1322 	asm("ldmfd	sp!, {r4,lr} ");
  1323 	asm("mov	r0, #1 ");
  1324 	__JUMP(,lr);								// return TRUE
  1325 	}
  1326 #endif
  1327 
  1328 
  1329 
  1330 /** Check whether a thread holds a fast mutex.
  1331 	If so set the mutex contention flag and return TRUE, else return FALSE.
  1332 
  1333 	Called with kernel lock held
  1334 
  1335 	@internalComponent
  1336  */
  1337 __NAKED__ TBool NThreadBase::CheckFastMutexDefer()
  1338 	{
  1339 	asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1340 	asm("bics r2, r1, #3 ");		// r2 = pointer to mutex if any, r1 bit 0 = flag
  1341 	asm("bne 1f ");
  1342 	asm("mov r0, #0 ");				// no mutex - return FALSE
  1343 	__JUMP(,lr);
  1344 
  1345 	// iHeldFastMutex points to a mutex
  1346 	asm("1: ");
  1347 	asm("tst r1, #1 ");				// test flag
  1348 	asm("beq 2f ");					// branch if not being released
  1349 
  1350 	// mutex being released
  1351 	asm("3: ");
  1352 	LDREX(3,2);						// r3 = m->iHoldingThread
  1353 	asm("sub r3, r3, r0 ");			// m->iHoldingThread - this
  1354 	asm("cmp r3, #1 ");
  1355 	asm("bhi 4f ");					// if m->iHoldingThread != this or this+1, skip
  1356 	asm("orr r3, r0, #1 ");			// if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
  1357 	STREX(12,3,2);
  1358 	asm("teq r12, #0 ");
  1359 	asm("bne 3b ");
  1360 	asm("mov r0, #1 ");				// return TRUE
  1361 	__JUMP(,lr);
  1362 
  1363 	asm("4: ");
  1364 	asm("mov r3, #0 ");				// already released, so set iHeldFastMutex=0
  1365 	asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1366 	asm("0: ");
  1367 	asm("mov r0, #0 ");				// no mutex - return FALSE
  1368 	__JUMP(,lr);
  1369 
  1370 	// mutex being acquired or has been acquired
  1371 	// if it has been acquired set the contention flag and return TRUE, else return FALSE
  1372 	asm("2: ");
  1373 	LDREX(3,2);						// r3 = m->iHoldingThread
  1374 	asm("sub r3, r3, r0 ");			// m->iHoldingThread - this
  1375 	asm("cmp r3, #1 ");
  1376 	asm("bhi 0b ");					// if m->iHoldingThread != this or this+1, finish and return FALSE
  1377 	asm("orr r3, r0, #1 ");			// if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
  1378 	STREX(12,3,2);
  1379 	asm("teq r12, #0 ");
  1380 	asm("bne 2b ");
  1381 	asm("mov r0, #1 ");				// return TRUE
  1382 	__JUMP(,lr);
  1383 
  1384 	asm("4: ");
  1385 	asm("mov r3, #0 ");				// already released, so set iHeldFastMutex=0
  1386 	asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
  1387 	asm("mov r0, #0 ");				// no mutex - return FALSE
  1388 	__JUMP(,lr);
  1389 	}
  1390 
  1391 
  1392 /******************************************************************************
  1393  * IDFC/DFC
  1394  ******************************************************************************/
  1395 
  1396 /**	Transition the state of an IDFC or DFC when Add() is called
  1397 
  1398 	0000->008n, 00Cn->00En, all other states unchanged
  1399 	Return original state.
  1400 
  1401 	Enter and return with interrupts disabled.
  1402 */
  1403 __NAKED__ TUint32 TDfc::AddStateChange()
  1404 	{
  1405 	GET_RWNO_TID(, r1);				// r1->SubScheduler
  1406 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1407 	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
  1408 	__DATA_MEMORY_BARRIER_Z__(r12);
  1409 	asm("1: ");
  1410 	LDREXH(0,3);
  1411 	asm("cmp r0, #0 ");				// original state 0000 ?
  1412 	asm("orreq r2, r1, #0x0080 ");	// yes -> 008n
  1413 	asm("movne r2, r0 ");			// no -> R2=original state ...
  1414 	asm("eorne r12, r0, #0x00C0 ");	// ... and R12=original state^00C0 ...
  1415 	asm("cmpne r12, #0x0020 ");		// ... and check if result < 0020 (i.e. original==00C0..00DF)
  1416 	asm("addlo r2, r2, #0x0020 ");	// 00Cn->00En otherwise leave R2 alone
  1417 	STREXH(12,2,3);
  1418 	asm("cmp r12, #0 ");
  1419 	asm("bne 1b ");
  1420 	__DATA_MEMORY_BARRIER__(r12);
  1421 	__JUMP(,lr);
  1422 	}
  1423 
  1424 /**	Transition the state of an IDFC just before running it.
  1425 
  1426 	002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000
  1427 	other initial states invalid
  1428 	Return original state
  1429 
  1430 	Enter and return with interrupts disabled.
  1431 */
  1432 __NAKED__ TUint32 TDfc::RunIDFCStateChange()
  1433 	{
  1434 	GET_RWNO_TID(, r1);				// r1->SubScheduler
  1435 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1436 	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
  1437 	__DATA_MEMORY_BARRIER_Z__(r12);
  1438 #ifdef _DEBUG
  1439 	asm("str r4, [sp, #-4]! ");
  1440 	asm("ldr r4, __IdleGeneration ");
  1441 	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
  1442 	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
  1443 #endif
  1444 	asm("1: ");
  1445 	LDREXH(0,3);
  1446 	asm("eor r2, r0, #0x0080 ");
  1447 	asm("cmp r2, #0x0040 ");
  1448 	asm("bhs 2f ");					// branch out unless 008n or 00An
  1449 #ifdef _DEBUG
  1450 	asm("and r2, r0, #0x001F ");
  1451 	asm("cmp r2, r1 ");
  1452 	asm("bne 0f ");					// if n!=current CPU number, die
  1453 #endif
  1454 	asm("orr r2, r1, #0x00C0 ");	// 008n->00Cn, 00An->00Cn
  1455 	asm("3: ");
  1456 	STREXH(12,2,3);
  1457 	asm("cmp r12, #0 ");
  1458 	asm("bne 1b ");
  1459 	__DATA_MEMORY_BARRIER__(r12);
  1460 #ifdef _DEBUG
  1461 	asm("ldr r4, [sp], #4 ");
  1462 #endif
  1463 	__JUMP(,lr);
  1464 
  1465 	asm("2: ");
  1466 	asm("bic r2, r0, #1 ");
  1467 	asm("cmp r2, #0x0020 ");
  1468 	asm("orreq r2, r1, #0x00C0 ");	// 002g->00Cn
  1469 #ifdef _DEBUG
  1470 	asm("bne 4f ");
  1471 	asm("cmp r0, r4 ");
  1472 	asm("bne 0f ");					// wrong idle state
  1473 	asm("4: ");
  1474 #endif
  1475 	asm("beq 3b ");
  1476 	asm("cmp r0, #0x0100 ");		// C=1 if XXYY or XX00, C=0 if bad state
  1477 	asm("bic r2, r0, #0x00FF ");	// XXYY->XX00, C unchanged
  1478 	asm("tst r0, #0x00FF ");		// C unchanged
  1479 	asm("moveq r2, #0 ");			// XX00->0000, C unchanged
  1480 	asm("bcs 3b ");					// branch to STREX if valid state
  1481 
  1482 	asm("0: ");
  1483 	__ASM_CRASH();					// bad state
  1484 
  1485 	asm("__IdleGeneration: ");
  1486 	asm(".word %a0 " : : "i" ((TInt)&TheScheduler.iIdleGeneration));
  1487 	}
  1488 
  1489 /**	Transition the state of an IDFC just after running it.
  1490 
  1491 	First swap aS->iCurrentIDFC with 0
  1492 	If original value != this, return 0xFFFFFFFF and don't touch *this
  1493 	Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000
  1494 	other initial states invalid
  1495 	Return original state
  1496 
  1497 	Enter and return with interrupts disabled.
  1498 */
  1499 __NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/)
  1500 	{
  1501 	asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler,iCurrentIDFC));
  1502 	__DATA_MEMORY_BARRIER_Z__(r12);
  1503 	asm("1: ");
  1504 	LDREX(2,1);
  1505 	asm("subs r2, r2, r0 ");		// aS->iCurrentIDFC == this?
  1506 	asm("bne 9f ");					// no - bail out immediately
  1507 	STREX(12,2,1);					// yes - set aS->iCurrentIDFC=0
  1508 	asm("cmp r12, #0 ");
  1509 	asm("bne 1b ");
  1510 
  1511 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1512 	__DATA_MEMORY_BARRIER__(r12);
  1513 #ifdef _DEBUG
  1514 	asm("str r4, [sp, #-4]! ");
  1515 	GET_RWNO_TID(, r4);				// r4->SubScheduler
  1516 	asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r4 = current CPU number
  1517 #endif
  1518 	asm("2: ");
  1519 	LDREXH(0,3);					// r0 = original DFC state
  1520 	asm("mov r2, #0 ");				// r2 = 0 to begin with
  1521 #ifdef _DEBUG
  1522 	asm("tst r0, #0x00FF ");
  1523 	asm("beq 5f ");
  1524 	asm("eor r12, r0, r4 ");		// original state ^ CPU number, should be xxC0, xxE0 or xx60
  1525 	asm("and r12, r12, #0x00E0 ");
  1526 	asm("cmp r12, #0x00E0 ");
  1527 	asm("cmpne r12, #0x00C0 ");
  1528 	asm("cmpne r12, #0x0060 ");
  1529 	asm("beq 5f ");
  1530 	__ASM_CRASH();					// bad state
  1531 	asm("5: ");
  1532 #endif
  1533 	asm("bic r12, r0, #0x001F ");
  1534 	asm("cmp r12, #0x00E0 ");
  1535 	asm("bhi 4f ");					// branch out if XXYY or XX00
  1536 	asm("subeq r2, r0, #0x0060 ");	// 00En->008n
  1537 	asm("cmp r12, #0x0060 ");
  1538 	asm("moveq r2, r0 ");			// 006n->006n, else R2=0
  1539 	asm("3: ");
  1540 	STREXH(12,2,3);
  1541 	asm("cmp r12, #0 ");
  1542 	asm("bne 2b ");
  1543 	__DATA_MEMORY_BARRIER__(r12);
  1544 #ifdef _DEBUG
  1545 	asm("ldr r4, [sp], #4 ");
  1546 #endif
  1547 	__JUMP(,lr);
  1548 
  1549 	asm("4: ");
  1550 	asm("tst r0, #0x00FF ");
  1551 	asm("bicne r2, r0, #0x00FF ");	// XXYY->XX00, XX00->0000
  1552 	asm("b 3b ");
  1553 
  1554 	asm("9: ");
  1555 	asm("mvn r0, #0 ");				// return 0xFFFFFFFF
  1556 	__JUMP(,lr);
  1557 	}
  1558 
  1559 /**	Transition the state of an IDFC just after running it.
  1560 
  1561 	006n->002g where g = TheScheduler.iIdleGeneration
  1562 	XX6n->XX00
  1563 	other initial states invalid
  1564 	Return original state
  1565 
  1566 	Enter and return with interrupts disabled.
  1567 */
  1568 __NAKED__ TUint32 TDfc::EndIDFCStateChange2()
  1569 	{
  1570 	asm("ldr r12, __IdleGeneration ");
  1571 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1572 #ifdef _DEBUG
  1573 	asm("str r4, [sp, #-4]! ");
  1574 	GET_RWNO_TID(, r4);				// r4->SubScheduler
  1575 	asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r4 = current CPU number
  1576 #endif
  1577 	asm("ldrb r1, [r12] ");			// r1 = TheScheduler.iIdleGeneration
  1578 	asm("1: ");
  1579 	LDREXH(0,3);
  1580 #ifdef _DEBUG
  1581 	asm("eor r12, r0, r4 ");
  1582 	asm("and r12, r12, #0x00FF ");
  1583 	asm("cmp r12, #0x0060 ");		// should be 006n or XX6n
  1584 	asm("beq 2f ");
  1585 	__ASM_CRASH();					// if not, die
  1586 	asm("2: ");
  1587 #endif
  1588 	asm("tst r0, #0xFF00 ");		// XX6n or 006n ?
  1589 	asm("orreq r2, r1, #0x0020 ");	// 006n->002g
  1590 	asm("bicne r2, r0, #0x00FF ");	// XX6n->XX00
  1591 	STREXH(12,2,3);
  1592 	asm("cmp r12, #0 ");
  1593 	asm("bne 1b ");
  1594 	__DATA_MEMORY_BARRIER__(r12);
  1595 #ifdef _DEBUG
  1596 	asm("ldr r4, [sp], #4 ");
  1597 #endif
  1598 	__JUMP(,lr);
  1599 	}
  1600 
  1601 /**	Transition the state of a DFC just before moving it from the IDFC queue to
  1602 	its final queue.
  1603 
  1604 	002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000
  1605 	other initial states invalid
  1606 	Return original state
  1607 */
  1608 __NAKED__ TUint32 TDfc::MoveToFinalQStateChange()
  1609 	{
  1610 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1611 	__DATA_MEMORY_BARRIER_Z__(r12);
  1612 #ifdef _DEBUG
  1613 	asm("str r4, [sp, #-4]! ");
  1614 	asm("ldr r4, __IdleGeneration ");
  1615 	GET_RWNO_TID(, r1);				// r1->SubScheduler
  1616 	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
  1617 	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r1 = current CPU number
  1618 	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
  1619 	asm("orr r1, r1, #0x0080 ");
  1620 #endif
  1621 	asm("1: ");
  1622 	LDREXH(0,3);
  1623 #ifdef _DEBUG
  1624 	asm("cmp r0, #0 ");
  1625 	asm("beq 0f ");					// 0000 -> die
  1626 	asm("ands r2, r0, #0x00FF ");
  1627 	asm("beq 3f ");					// XX00 -> OK
  1628 	asm("cmp r2, r4 ");				// 002g ?
  1629 	asm("beq 3f ");					// yes -> OK
  1630 	asm("cmp r2, r1 ");				// 008n ?
  1631 	asm("beq 3f ");					// yes -> OK
  1632 	asm("0: ");
  1633 	__ASM_CRASH();					// otherwise die
  1634 	asm("3: ");
  1635 #endif
  1636 	asm("bics r2, r0, #0x00FF ");	// XXYY->XX00
  1637 	asm("moveq r2, #0x0001 ");		// 002g,008n->0001
  1638 	asm("beq 2f ");
  1639 	asm("tst r0, #0x00FF ");
  1640 	asm("moveq r2, #0 ");			// XX00->0000
  1641 	asm("2: ");
  1642 	STREXH(12,2,3);
  1643 	asm("cmp r12, #0 ");
  1644 	asm("bne 1b ");
  1645 	__DATA_MEMORY_BARRIER__(r12);
  1646 #ifdef _DEBUG
  1647 	asm("ldr r4, [sp], #4 ");
  1648 #endif
  1649 	__JUMP(,lr);
  1650 	}
  1651 
  1652 /**	Transition the state of an IDFC when transferring it to another CPU
  1653 
  1654 	002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000
  1655 	other initial states invalid
  1656 	Return original state
  1657 
  1658 	Enter and return with interrupts disabled and target CPU's ExIDfcLock held.
  1659 */
  1660 __NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/)
  1661 	{
  1662 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1663 	__DATA_MEMORY_BARRIER_Z__(r12);
  1664 #ifdef _DEBUG
  1665 	asm("stmfd sp!, {r4-r5} ");
  1666 	asm("ldr r4, __IdleGeneration ");
  1667 	GET_RWNO_TID(, r5);				// r5->SubScheduler
  1668 	asm("ldrb r4, [r4] ");			// r4 = TheScheduler.iIdleGeneration
  1669 	asm("ldr r5, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));	// r5 = current CPU number
  1670 	asm("eor r4, r4, #0x0021 ");	// r4 = expected state of idle IDFCs
  1671 	asm("orr r5, r5, #0x0080 ");
  1672 #endif
  1673 	asm("1: ");
  1674 	LDREXH(0,3);
  1675 #ifdef _DEBUG
  1676 	asm("cmp r0, #0 ");
  1677 	asm("beq 0f ");					// 0000 -> die
  1678 	asm("ands r2, r0, #0x00FF ");
  1679 	asm("beq 3f ");					// XX00 -> OK
  1680 	asm("cmp r2, r4 ");				// 002g ?
  1681 	asm("beq 3f ");					// yes -> OK
  1682 	asm("cmp r2, r5 ");				// 008n ?
  1683 	asm("beq 3f ");					// yes -> OK
  1684 	asm("0: ");
  1685 	__ASM_CRASH();					// otherwise die
  1686 	asm("3: ");
  1687 #endif
  1688 	asm("bics r2, r0, #0x00FF ");	// XXYY->XX00
  1689 	asm("orreq r2, r1, #0x00A0 ");	// 002g,008n->00Am
  1690 	asm("beq 2f ");
  1691 	asm("tst r0, #0x00FF ");
  1692 	asm("moveq r2, #0 ");			// XX00->0000
  1693 	asm("2: ");
  1694 	STREXH(12,2,3);
  1695 	asm("cmp r12, #0 ");
  1696 	asm("bne 1b ");
  1697 	__DATA_MEMORY_BARRIER__(r12);
  1698 #ifdef _DEBUG
  1699 	asm("ldmfd sp!, {r4-r5} ");
  1700 #endif
  1701 	__JUMP(,lr);
  1702 	}
  1703 
  1704 /**	Transition the state of an IDFC/DFC just before cancelling it.
  1705 
  1706 	0000->0000, XX00->ZZ00, xxYY->zzYY
  1707 	Return original state
  1708 
  1709 	Enter and return with interrupts disabled.
  1710 */
  1711 __NAKED__ TUint32 TDfc::CancelInitialStateChange()
  1712 	{
  1713 	GET_RWNO_TID(,r1);
  1714 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1715 	__DATA_MEMORY_BARRIER_Z__(r12);
  1716 	asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuMask));	// r1 = mask of current CPU number
  1717 
  1718 	asm("1: ");
  1719 	LDREXH(0,3);
  1720 	asm("cmp r0, #0 ");
  1721 	asm("beq 2f ");				// if original state 0000 leave alone
  1722 	asm("orr r2, r0, r1, lsl #8 ");	// else set bit 8-15 corresponding to CPU number
  1723 	STREXH(12,2,3);
  1724 	asm("cmp r12, #0 ");
  1725 	asm("bne 1b ");
  1726 	asm("2: ");
  1727 	__DATA_MEMORY_BARRIER__(r12);
  1728 	__JUMP(,lr);
  1729 	}
  1730 
  1731 /**	Transition the state of an IDFC/DFC at the end of a cancel operation
  1732 
  1733 	XXYY->XX00, XX00->0000
  1734 	Return original state
  1735 
  1736 	Enter and return with interrupts disabled.
  1737 */
  1738 __NAKED__ TUint32 TDfc::CancelFinalStateChange()
  1739 	{
  1740 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1741 	__DATA_MEMORY_BARRIER_Z__(r12);
  1742 
  1743 	asm("1: ");
  1744 	LDREXH(0,3);
  1745 	asm("tst r0, #0x00FF ");
  1746 	asm("bicne r2, r0, #0x00FF ");	// XXYY->XX00
  1747 	asm("moveq r2, #0 ");			// xx00->0000
  1748 	STREXH(12,2,3);
  1749 	asm("cmp r12, #0 ");
  1750 	asm("bne 1b ");
  1751 	__DATA_MEMORY_BARRIER__(r12);
  1752 	__JUMP(,lr);
  1753 	}
  1754 
  1755 /**	Transition the state of an IDFC or DFC when QueueOnIdle() is called
  1756 
  1757 	0000->002g where g = TheScheduler.iIdleGeneration,
  1758 	00Cn->006n, all other states unchanged
  1759 	Return original state.
  1760 
  1761 	Enter and return with interrupts disabled and IdleSpinLock held.
  1762 */
  1763 __NAKED__ TUint32 TDfc::QueueOnIdleStateChange()
  1764 	{
  1765 	asm("ldr r12, __IdleGeneration ");
  1766 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1767 	asm("ldrb r1, [r12] ");			// r1 = TheScheduler.iIdleGeneration
  1768 	__DATA_MEMORY_BARRIER_Z__(r12);
  1769 	asm("1: ");
  1770 	LDREXH(0,3);
  1771 	asm("cmp r0, #0 ");				// original state 0000 ?
  1772 	asm("orreq r2, r1, #0x0020 ");	// yes -> 002g
  1773 	asm("movne r2, r0 ");			// no -> R2=original state ...
  1774 	asm("eorne r12, r0, #0x00C0 ");	// ... and R12=original state^00C0 ...
  1775 	asm("cmpne r12, #0x0020 ");		// ... and check if result < 0020 (i.e. original==00C0..00DF)
  1776 	asm("sublo r2, r2, #0x0060 ");	// 00Cn->006n otherwise leave R2 alone
  1777 	STREXH(12,2,3);
  1778 	asm("cmp r12, #0 ");
  1779 	asm("bne 1b ");
  1780 	__DATA_MEMORY_BARRIER__(r12);
  1781 	__JUMP(,lr);
  1782 	}
  1783 
  1784 
  1785 __NAKED__ void TDfc::ResetState()
  1786 	{
  1787 	asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
  1788 	__DATA_MEMORY_BARRIER_Z__(r2);
  1789 #ifdef _DEBUG
  1790 	asm("1: ");
  1791 	LDREXH(0,3);
  1792 	asm("cmp r0, #0 ");
  1793 	asm("beq 0f ");				// if state already zero, die
  1794 	STREXH(12,2,3);
  1795 	asm("cmp r12, #0 ");
  1796 	asm("bne 1b ");
  1797 #else
  1798 	asm("strh r2, [r3] ");		// __e32_atomic_store_rel16(&iDfcState, 0)
  1799 #endif
  1800 	__JUMP(,lr);
  1801 #ifdef _DEBUG
  1802 	asm("0: ");
  1803 	__ASM_CRASH();
  1804 #endif
  1805 	}
  1806 
  1807 
  1808