Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\arm\ncthrd.cia
18 #define __INCLUDE_NTHREADBASE_DEFINES__
24 #define iDfcState i8816.iHState16
26 extern "C" void send_accumulated_resched_ipis();
28 /******************************************************************************
30 ******************************************************************************/
31 extern "C" __NAKED__ void __StartThread()
34 // R0->TSubScheduler, R1=0, R2=1, R3->current thread
36 // Interrupts disabled
38 // need to send any outstanding reschedule IPIs
40 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
41 #ifdef __USER_MEMORY_GUARDS_ENABLED__
42 asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack, iCPSR));
43 asm("tst r0, #0x0f ");
45 USER_MEMORY_GUARD_OFF(,r0,r0);
48 asm("ldmia sp, {r0-r14}^ "); // load initial values for R0-R12, R13_usr, R14_usr
49 asm("nop "); // don't access banked register immediately after
50 asm("add sp, sp, #64 "); // point to saved PC, CPSR (skip iExcCode)
51 asm("adr lr, 1f "); // set lr_svc in case thread returns
52 RFEIAW(13); // restore PC and CPSR - jump to thread entry point
55 asm("b " CSM_ZN5NKern4ExitEv); // if control returns, call NKern::Exit()
59 extern "C" __NAKED__ TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/)
65 asm("ldr r2, [r1], #4 ");
66 asm("add r0, r0, #1 ");
76 asm(".word " CSM_CFUNC(__StartThread));
77 asm(".word nkern_unlock_resched_return ");
78 asm(".word nkern_preemption_point_resched_return ");
79 asm(".word nkern_wfar_resched_return ");
80 asm(".word irq_resched_return ");
81 asm(".word exec_wfar_resched_return ");
86 /** Mark the beginning of an event handler tied to a thread or thread group
88 Return the number of the CPU on which the event handler should run
90 __NAKED__ TInt NSchedulable::BeginTiedEvent()
92 asm("add r1, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
94 LDREX(0,1); // r0 = original value of iEventState
95 asm("add r2, r0, #%a0" : : "i" ((TInt)EEventCountInc));
99 __DATA_MEMORY_BARRIER__(r3);
100 asm("tst r0, #%a0" : : "i" ((TInt)EEventParent));
101 asm("ldrne r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
102 asm("beq bte0 "); // EEventParent not set so don't look at group
104 asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
107 asm("beq bte2 "); // parent not yet updated, use iNewParent
109 LDREX(0,2); // r0 = original value of iEventState
110 asm("add r3, r0, #%a0" : : "i" ((TInt)EEventCountInc));
114 __DATA_MEMORY_BARRIER__(r12);
116 asm("and r0, r0, #%a0" : : "i" ((TInt)EEventCpuMask));
117 __JUMP(,lr); // return event CPU
120 __DATA_MEMORY_BARRIER__(r3); // make sure iNewParent is read after iParent
121 asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
123 asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
124 asm("bne bte1 "); // iNewParent set so OK
125 __DATA_MEMORY_BARRIER__(r3); // make sure iParent is read after iNewParent
126 asm("ldr r2, [r1, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
128 asm("addne r2, r2, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
130 asm("bne bte1 "); // iParent now set so OK, otherwise something is wrong
137 /** Mark the end of an event handler tied to a thread or thread group
140 __NAKED__ void NSchedulable::EndTiedEvent()
142 __DATA_MEMORY_BARRIER_Z__(r12);
143 asm("ldr r1, [r0, #%a0]!" : : "i" _FOFF(NSchedulable, iEventState));
144 asm("tst r1, #%a0" : : "i" ((TInt)EEventParent));
145 asm("bne etep0 "); // branch out if EEventParent set
147 // r0->NSchedulable::iEventState
150 asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc)); // decrement event count
151 asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc)); // check if now zero
152 asm("biccc r1, r1, #0xFF "); // if so, mask event CPU ...
153 asm("andcc r2, r1, #0x1F00 "); // ... and r2 = thread CPU << 8 ...
154 asm("orrcc r1, r1, r2, lsr #8 "); // ... and event CPU = thread CPU
156 asm("teq r12, #0 "); // test for success, leave carry alone
157 asm("bne ete1 "); // retry if STREX failed
158 asm("bcs ete2 "); // if not last tied event, finish
159 asm("tst r1, #%a0" : : "i" ((TInt)EDeferredReady));
160 asm("addne r0, r0, #%a0" : : "i" (_FOFF(NSchedulable,i_IDfcMem) - _FOFF(NSchedulable,iEventState)));
161 asm("bne " CSM_ZN4TDfc3AddEv ); // if deferred ready, add IDFC to action it
162 asm("ete2: "); // ready not deferred so finish
166 __DATA_MEMORY_BARRIER__(r12); // make sure iParent is read after seeing parent flag set
167 asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
169 asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
170 asm("beq ete_bad "); // no parent - shouldn't happen
171 asm("cmp r3, r0 "); // parent == this ?
172 asm("beq etep1 "); // if so, parent not yet updated so use iNewParent
175 asm("stmfd sp!, {r0,lr} "); // save this and return address
176 asm("mov r0, r3 "); // operate on parent
177 asm("bl ete1 "); // update parent state
178 asm("ldmfd sp!, {r0,lr} ");
181 asm("sub r1, r1, #%a0" : : "i" ((TInt)EEventCountInc)); // decrement event count
188 __DATA_MEMORY_BARRIER__(r12); // make sure iNewParent is read after iParent
189 asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NThreadBase,iNewParent) - _FOFF(NSchedulable,iEventState)));
191 asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
192 asm("bne etep2 "); // iNewParent set so OK
193 __DATA_MEMORY_BARRIER__(r12); // make sure iParent is read after iNewParent
194 asm("ldr r3, [r0, #%a0]" : : "i" (_FOFF(NSchedulable,iParent) - _FOFF(NSchedulable,iEventState)));
196 asm("addne r3, r3, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
198 asm("bne etep2 "); // iParent now set so OK, otherwise something is wrong
205 /** Check for concurrent tied events when a thread/group becomes ready
207 This is only ever called on a lone thread or a group, not on a thread
208 which is part of a group.
210 Update the thread CPU field in iEventState
211 If thread CPU != event CPU and event count nonzero, atomically
212 set the ready deferred flag and return TRUE, else return FALSE.
213 If event count zero, set event CPU = thread CPU atomically.
215 @param aCpu the CPU on which the thread/group is to become ready
216 @return TRUE if the ready must be deferred.
218 __NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt /*aCpu*/)
220 asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
222 LDREX(2,0); // r2 = original iEventState
223 asm("bic r3, r2, #0x1F00 "); // r3 = original iEventState with thread CPU zeroed out
224 asm("orr r3, r3, r1, lsl #8 "); // set thread CPU field = aCpu
225 asm("cmp r3, #%a0" : : "i" ((TInt)EEventCountInc));
226 asm("bhs 2f "); // branch if event count nonzero
227 asm("bic r3, r3, #0xFF "); // else mask event CPU ...
228 asm("orr r3, r3, r1 "); // ... and set event CPU = thread CPU = aCpu
233 asm("eor r0, r2, r3 "); // r0 = old event state ^ new event state
234 asm("and r0, r0, #%a0" : : "i" ((TInt)EDeferredReady));
235 __JUMP(,lr); // return TRUE if we just set EDeferredReady
237 // event count is nonzero
239 asm("eor r12, r3, r3, lsr #8 "); // r12 bottom 5 bits = thread CPU ^ event CPU
240 asm("tst r12, #0x1F "); // thread CPU == event CPU?
241 asm("orrne r3, r3, #%a0" : : "i" ((TInt)EDeferredReady)); // if not, set EDeferredReady
246 /** Check for concurrent tied events when a thread leaves a group
248 If event count zero, atomically set the event and thread CPUs to the
249 current CPU, clear the parent flag and return TRUE, else return FALSE.
251 @return TRUE if the parent flag has been cleared
252 @pre Preemption disabled
254 __NAKED__ TBool NThreadBase::TiedEventLeaveInterlock()
256 GET_RWNO_TID(, r1); // R1->SubScheduler
257 asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
258 asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
259 asm("orr r1, r1, r1, lsl #8 "); // event CPU = thread CPU = this CPU, EDeferredReady, EEventParent clear
262 asm("cmp r2, #%a0" : : "i" ((TInt)EEventCountInc)); // check if event count zero
263 asm("bhs 0f "); // if not, finish and return FALSE
264 STREX(3,1,0); // else update CPUs and clear parent flag
265 // NOTE: Deferred ready flag must have been clear since thread is running
268 __JUMP(,lr); // return TRUE (assumes this!=0)
271 __JUMP(,lr); // return FALSE
275 /** Check for concurrent tied events when a thread joins a group
277 If event count zero, atomically set the parent flag and return TRUE,
280 @return TRUE if the parent flag has been set
281 @pre Preemption disabled
283 __NAKED__ TBool NThreadBase::TiedEventJoinInterlock()
285 asm("add r0, r0, #%a0" : : "i" _FOFF(NSchedulable,iEventState));
288 asm("cmp r1, #%a0" : : "i" ((TInt)EEventCountInc)); // check if event count zero
289 asm("bhs 0f "); // if not, finish and return FALSE
290 asm("orr r2, r1, #%a0" : : "i" ((TInt)EEventParent)); // else set parent flag
294 __JUMP(,lr); // return TRUE (assumes this!=0)
297 __JUMP(,lr); // return FALSE
301 #ifdef __FAST_SEM_MACHINE_CODED__
302 /******************************************************************************
304 ******************************************************************************/
306 /** Waits on a fast semaphore.
308 Decrements the signal count for the semaphore and
309 removes the calling thread from the ready-list if the semaphore becomes
310 unsignalled. Only the thread that owns a fast semaphore can wait on it.
312 Note that this function does not block, it merely updates the NThread state,
313 rescheduling will only occur when the kernel is unlocked. Generally threads
314 would use NKern::FSWait() which manipulates the kernel lock for you.
316 @pre The calling thread must own the semaphore.
317 @pre No fast mutex can be held.
318 @pre Kernel must be locked.
320 @post Kernel is locked.
322 @see NFastSemaphore::Signal()
326 EXPORT_C __NAKED__ void NFastSemaphore::Wait()
328 ASM_DEBUG1(FSWait,r0);
331 asm("stmfd sp!, {r4-r7} ");
332 asm("ldr r6, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
334 asm("mov r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
335 asm("add r7, r6, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
336 asm("orr r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
342 asm("str r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
346 __DATA_MEMORY_BARRIER__(r12);
348 LDREX( 2,0); // count
349 asm("mov r5, r6, lsr #2 "); // thread>>2
350 asm("orr r5, r5, #0x80000000 ");
351 asm("subs r4, r2, #1 ");
352 asm("movlt r4, r5 "); // if --count<0, r4=(thread>>2)|0x80000000
356 __DATA_MEMORY_BARRIER__(r12);
358 asm("cmp r2, #0 "); // original count zero ?
359 asm("bne 2f "); // if yes, don't need to wait
361 asm("strb r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); // else we need to reschedule
362 asm("ldmfd sp!, {r4-r7} ");
373 asm("tst r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
375 asm("ldmfd sp!, {r4-r7} ");
383 /** Waits on a fast semaphore.
385 Decrements the signal count for the semaphore
386 and waits for a signal if the semaphore becomes unsignalled. Only the
387 thread that owns a fast semaphore can wait on it.
389 @param aSem The semaphore to wait on.
391 @pre The calling thread must own the semaphore.
392 @pre No fast mutex can be held.
394 @see NFastSemaphore::Wait()
396 EXPORT_C __NAKED__ void NKern::FSWait(NFastSemaphore* /*aSem*/)
398 ASM_DEBUG1(NKFSWait,r0);
400 __ASM_CLI(); // all interrupts off
402 asm("stmfd sp!, {r4,r5,r11,lr} ");
403 asm("ldr r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
404 asm("mov r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
405 asm("orr r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
407 asm("add r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
412 /** Waits for a signal on the current thread's I/O semaphore.
414 @pre No fast mutex can be held.
415 @pre Call in a thread context.
416 @pre Kernel must be unlocked
417 @pre interrupts enabled
419 EXPORT_C __NAKED__ void NKern::WaitForAnyRequest()
423 __ASM_CLI(); // all interrupts off
425 asm("stmfd sp!, {r4,r5,r11,lr} ");
426 asm("ldr r11, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
427 asm("mov r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
428 asm("orr r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
429 asm("add r0, r11, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
430 asm("add r3, r11, #%a0" : : "i" _FOFF(NThreadBase, iRequestSemaphore));
438 asm("str r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
442 __DATA_MEMORY_BARRIER__(r12);
444 LDREX( 2,3); // count
445 asm("mov r5, r11, lsr #2 "); // thread>>2
446 asm("orr r5, r5, #0x80000000 ");
447 asm("subs r4, r2, #1 ");
448 asm("movlt r4, r5 "); // if --count<0, r4=(thread>>2)|0x80000000
452 __DATA_MEMORY_BARRIER__(r12);
454 asm("cmp r2, #0 "); // original count zero ?
455 asm("beq 2f "); // if so we must wait
463 asm("tst r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
466 __POPRET("r4,r5,r11,");
472 asm("ldmfd sp!, {r4-r5} ");
474 asm("str r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); // else lock the kernel
476 asm("strb r2, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); // and set the reschedule flag
477 asm("stmfd sp!, {r0,r4-r10} ");
478 asm("bl " CSM_ZN10TScheduler10RescheduleEv ); // reschedule
479 asm(".global nkern_wfar_resched_return ");
480 asm("nkern_wfar_resched_return: ");
482 // need to send any outstanding reschedule IPIs
484 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
486 __POPRET("r0,r4-r11,");
488 asm(".global wait_for_any_request ");
489 asm("wait_for_any_request: ");
490 asm("add r3, r9, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
491 asm("mov r2, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore << 8));
492 asm("add r7, r9, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
493 asm("orr r2, r2, #%a0" : : "i" ((TInt)NThreadWaitState::EWtStWaitPending));
499 asm("str r12, [r7, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
503 __DATA_MEMORY_BARRIER__(r12);
505 LDREX( 0,3); // count
506 asm("mov r5, r9, lsr #2 "); // thread>>2
507 asm("orr r5, r5, #0x80000000 ");
508 asm("subs r4, r0, #1 ");
509 asm("movlt r4, r5 "); // if --count<0, r4=(thread>>2)|0x80000000
513 __DATA_MEMORY_BARRIER__(r12);
514 #ifdef __RECORD_STATE__
515 asm("str r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iNThreadBaseSpare6));
518 asm("cmp r0, #0 "); // original count zero ?
519 asm("beq exec_wfar_wait "); // yes - must wait
527 asm("tst r4, #%a0" : : "i" ((TInt)(NThreadWaitState::EWtStDead|NThreadWaitState::EWtStWaitActive)));
528 asm("ldreq r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks)); // check for callbacks
529 asm("beq exec_wfar_finish ");
534 /** Signals a fast semaphore.
536 Increments the signal count of a fast semaphore by
537 one and releases any waiting thread if the semphore becomes signalled.
539 Note that a reschedule will not occur before this function returns, this will
540 only take place when the kernel is unlocked. Generally threads
541 would use NKern::FSSignal() which manipulates the kernel lock for you.
543 @pre Kernel must be locked.
544 @pre Call either in a thread or an IDFC context.
546 @post Kernel is locked.
548 @see NFastSemaphore::Wait()
549 @see NKern::FSSignal()
552 EXPORT_C __NAKED__ void NFastSemaphore::Signal()
554 ASM_DEBUG1(FSSignal,r0);
558 __DATA_MEMORY_BARRIER_Z__(r12);
560 LDREX( 2,0); // count
562 asm("sublt r3, r1, #1 "); // if count<0, replace with aCount-1
563 asm("addges r3, r2, r1 "); // if count>=0, add aCount
564 asm("bvs 0f "); // if overflow, leave alone
569 asm("movlt r1, r2, lsl #2 "); // if original count<0 r1 = original count<<2 = thread
570 asm("blt fs_signal_wake ");
572 __JUMP(, lr); // else finished
574 asm("fs_signal_wake: ");
575 asm("stmfd sp!, {r4-r6,lr} ");
579 asm("bl AcqSLock__12NSchedulable ");
580 asm("add r0, r5, #%a0" : : "i" _FOFF(NThreadBase, iWaitState));
581 asm("mov r1, #%a0" : : "i" ((TInt)NThreadBase::EWaitFastSemaphore));
584 asm("bl UnBlockT__16NThreadWaitStateUiPvi ");
586 asm("ldmfd sp!, {r4-r6,lr} ");
587 asm("b RelSLock__12NSchedulable ");
591 /** Signals a fast semaphore multiple times.
593 @pre Kernel must be locked.
594 @pre Call either in a thread or an IDFC context.
596 @post Kernel is locked.
600 EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/)
602 ASM_DEBUG2(FSSignalN,r0,r1);
605 asm("bgt fssignal1 ");
610 /** Signals the request semaphore of a nanothread several times.
612 This function is intended to be used by the EPOC layer and personality
613 layers. Device drivers should use Kern::RequestComplete instead.
615 @param aThread Nanothread to signal. If NULL, the current thread is signaled.
616 @param aCount Number of times the request semaphore must be signaled.
620 @see Kern::RequestComplete()
622 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, TInt /*aCount*/)
624 ASM_DEBUG2(NKThreadRequestSignalN,r0,r1);
629 asm("addne r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
630 asm("bne nkfssignal1 ");
633 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
634 asm("add r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
635 asm("b nkfssignal2 ");
643 /** Signals the request semaphore of a nanothread.
645 This function is intended to be used by the EPOC layer and personality
646 layers. Device drivers should use Kern::RequestComplete instead.
648 @param aThread Nanothread to signal. Must be non NULL.
650 @see Kern::RequestComplete()
652 @pre Interrupts must be enabled.
653 @pre Do not call from an ISR
655 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
657 ASM_DEBUG1(NKThreadRequestSignal,r0);
658 asm("add r0, r0, #%a0" : : "i" _FOFF(NThreadBase,iRequestSemaphore));
660 /* fall through to FSSignal() ... */
664 /** Signals a fast semaphore.
666 Increments the signal count of a fast semaphore
667 by one and releases any waiting thread if the semaphore becomes signalled.
669 @param aSem The semaphore to signal.
673 @pre Interrupts must be enabled.
674 @pre Do not call from an ISR
676 EXPORT_C __NAKED__ void NKern::FSSignal(NFastSemaphore* /*aSem*/)
678 ASM_DEBUG1(NKFSSignal,r0);
681 asm("nkfssignal1: ");
683 asm("nkfssignal2: ");
684 __DATA_MEMORY_BARRIER_Z__(r12);
686 LDREX( 2,0); // count
688 asm("sublt r3, r1, #1 "); // if count<0, replace with aCount-1
689 asm("addges r3, r2, r1 "); // if count>=0, add aCount
690 asm("bvs 0f "); // if overflow, leave alone
698 __JUMP(, lr); // else finished
702 asm("mov r1, r2, lsl #2 "); // if original count<0 r1 = original count<<2 = thread
703 asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
704 asm("stmfd sp!, {r4,lr} ");
705 asm("add r12, r12, #1 "); // lock the kernel
706 asm("str r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
708 asm("bl fs_signal_wake "); // wake up the thread
709 asm("ldmfd sp!, {r4,lr} ");
710 asm("b Unlock__5NKern ");
714 /** Signals a fast semaphore multiple times.
716 Increments the signal count of a
717 fast semaphore by aCount and releases any waiting thread if the semphore
720 @param aSem The semaphore to signal.
721 @param aCount The number of times to signal the semaphore.
725 @pre Interrupts must be enabled.
726 @pre Do not call from an ISR
728 EXPORT_C __NAKED__ void NKern::FSSignalN(NFastSemaphore* /*aSem*/, TInt /*aCount*/)
730 ASM_DEBUG2(NKFSSignalN,r0,r1);
733 asm("bgt nkfssignal1 ");
738 /** Cancels a wait on a fast semaphore.
740 @pre Kernel must be locked.
741 @pre Call either in a thread or an IDFC context.
743 @post Kernel is locked.
747 __NAKED__ void NFastSemaphore::WaitCancel()
750 /* Fall through ... */
753 /* Fall through ... */
755 /* Fall through ... */
757 /** Increment a fast semaphore count
760 If iCount >= 0, increment by aCount and return 0
761 If iCount < 0, set count equal to aCount-1 and return (original count << 2)
765 __NAKED__ NThreadBase* NFastSemaphore::Inc(TInt /*aCount*/)
767 __DATA_MEMORY_BARRIER_Z__(r12);
771 asm("sublt r3, r1, #1 "); // if count<0, replace with aCount-1
772 asm("addges r3, r2, r1 "); // if count>=0, add aCount
773 asm("bvs 0f "); // if overflow leave alone
779 asm("movlt r0, r2, lsl #2 "); // if original count<0, return count<<2
780 asm("movge r0, #0 "); // else return 0
785 /** Decrement a fast semaphore count
787 If count > 0, decrement
788 If count = 0, set equal to (thread>>2)|0x80000000
789 Return original count
790 Full barrier semantics
792 __NAKED__ TInt NFastSemaphore::Dec(NThreadBase*)
794 __DATA_MEMORY_BARRIER_Z__(r12);
797 asm("subs r3, r2, #1 ");
798 asm("movlt r3, #0x80000000 ");
799 asm("orrlt r3, r3, r1, lsr #2 "); // if --count<0, r3=(thread>>2)|0x80000000
803 __DATA_MEMORY_BARRIER__(r12);
804 asm("mov r0, r2 "); // return original count
808 /** Reset a fast semaphore count
811 If iCount >= 0, set iCount=0 and return 0
812 If iCount < 0, set iCount=0 and return (original count << 2)
816 __NAKED__ NThreadBase* NFastSemaphore::DoReset()
818 __DATA_MEMORY_BARRIER_Z__(r3);
821 STREX(12,3,0); // zero count
825 asm("movlt r0, r2, lsl #2 "); // if original count<0, return count<<2
826 asm("movge r0, #0 "); // else return 0
831 #ifdef __NTHREAD_WAITSTATE_MACHINE_CODED__
832 /******************************************************************************
834 ******************************************************************************/
836 __NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/)
838 asm("stmfd sp!, {r4-r5} ");
839 asm("and r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
840 asm("and r1, r1, #0xff ");
841 asm("orr r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
842 asm("orr r2, r2, r1, lsl #8 ");
850 asm("ldmfd sp!, {r4-r5} ");
851 asm("str r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
858 __NAKED__ void NThreadWaitState::SetUpWait(TUint /*aType*/, TUint /*aFlags*/, TAny* /*aWaitObj*/, TUint32 /*aTimeout*/)
860 asm("stmfd sp!, {r4-r5} ");
861 asm("and r2, r2, #%a0" : : "i" ((TInt)EWtStObstructed));
862 asm("and r1, r1, #0xff ");
863 asm("orr r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
864 asm("orr r2, r2, r1, lsl #8 ");
870 asm("ldr r12, [sp, #8] ");
873 asm("ldmfd sp!, {r4-r5} ");
874 asm("str r12, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState, iTimer.iTriggerTime));
881 __NAKED__ void NThreadWaitState::CancelWait()
891 asm("tst r0, #%a0" : : "i" ((TInt)(EWtStDead|EWtStWaitActive)));
899 __NAKED__ TInt NThreadWaitState::DoWait()
901 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iTriggerTime));
905 asm("orrne r2, r2, #%a0" : : "i" ((TInt)EWtStTimeout));
906 asm("tst r2, #%a0" : : "i" ((TInt)EWtStDead));
908 asm("tst r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
910 asm("bic r2, r2, #%a0" : : "i" ((TInt)EWtStWaitPending));
911 asm("orr r2, r2, #%a0" : : "i" ((TInt)EWtStWaitActive));
917 asm("mov r0, r2, lsr #8 ");
921 asm("stmfd sp!, {r2-r4,lr} ");
923 asm("add r0, r0, #%a0" : : "i" _FOFF(NThreadWaitState,iTimer));
925 asm("bl " CSM_ZN6NTimer7OneShotEii );
926 asm("ldr r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
929 asm("add r1, r1, #1 ");
930 asm("str r1, [r4, #%a0]" : : "i" _FOFF(NThreadWaitState,iTimer.iNTimerSpare1));
931 asm("ldmfd sp!, {r2-r4,lr} ");
932 asm("mov r0, r2, lsr #8 ");
936 asm("mvn r0, #%a0" : : "i" (~KErrDied));
939 asm("mvn r0, #%a0" : : "i" (~KErrGeneral));
945 __NAKED__ TInt NThreadWaitState::UnBlockT(TUint /*aType*/, TAny* /*aWaitObj*/, TInt /*aReturnValue*/)
947 asm("stmfd sp!, {r4-r6,lr} ");
948 asm("mov r6, r2 "); // r6 = aWaitObj
950 __DATA_MEMORY_BARRIER__(r2);
952 LDREXD( 4,0); // r5:r4 = oldws64
953 asm("cmp r5, r6 "); // does iWaitObj match?
954 asm("bne 2f "); // no
955 asm("eor r12, r4, r1, lsl #8 "); // does wait type match?
956 asm("cmp r12, #%a0" : : "i" ((TInt)EWtStDead));
957 asm("bhs 2f "); // no
958 STREXD( 12,2,0); // yes - wait matches - try to write return value
959 asm("cmp r12, #0 "); // success?
960 asm("bne 1b "); // no - retry
962 asm("tst r4, #%a0" : : "i" ((TInt)EWtStTimeout));
963 asm("blne CancelTimerT__16NThreadWaitState ");
964 asm("tst r4, #%a0" : : "i" ((TInt)EWtStWaitActive));
966 asm("ldr r1, [r6, #%a0]" : : "i" (_FOFF(NThreadBase,iPauseCount)-_FOFF(NThreadBase,iWaitState)));
967 asm("sub r0, r6, #%a0" : : "i" _FOFF(NThreadBase,iWaitState)); // r0 = Thread()
968 asm("movs r1, r1, lsl #16 "); // check if iPauseCount=iSuspendCount=0
969 asm("bleq ReadyT__12NSchedulableUi "); // if so, make thread ready
972 __POPRET(" r4-r6,"); // return KErrNone
975 STREXD( 12,4,0); // no matching wait - write back to check atomicity
976 asm("cmp r12, #0 "); // success?
977 asm("bne 1b "); // no - retry
978 asm("mvn r0, #%a0" : : "i" (~KErrGeneral));
979 __POPRET(" r4-r6,"); // no matching wait - return KErrGeneral
982 __NAKED__ TUint32 NThreadWaitState::ReleaseT(TAny*& /*aWaitObj*/, TInt /*aReturnValue*/)
984 asm("stmfd sp!, {r4-r5} ");
987 __DATA_MEMORY_BARRIER__(r2);
990 asm("and r2, r4, #%a0" : : "i" ((TInt)EWtStDead));
994 __DATA_MEMORY_BARRIER__(r12);
995 asm("str r5, [r1] ");
996 asm("tst r4, #%a0" : : "i" ((TInt)EWtStTimeout));
999 asm("ldmfd sp!, {r4-r5} ");
1004 asm("bl CancelTimerT__16NThreadWaitState ");
1007 asm("ldmfd sp!, {r4-r5} ");
1013 #ifdef __FAST_MUTEX_MACHINE_CODED__
1014 /******************************************************************************
1016 ******************************************************************************/
1018 /** Releases a previously acquired fast mutex.
1020 Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
1023 @pre The calling thread holds the mutex.
1024 @pre Kernel must be locked.
1026 @post Kernel is locked.
1028 @see NFastMutex::Wait()
1029 @see NKern::FMSignal()
1031 EXPORT_C __NAKED__ void NFastMutex::Signal()
1033 ASM_DEBUG1(FMSignal,r0);
1034 #ifdef BTRACE_FAST_MUTEX
1035 // BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
1036 asm("stmfd sp!, {r0,lr} ");
1038 asm("ldr r0, btrace_hdr_fmsignal ");
1041 asm("bl OutX__6BTraceUlUlUlUl ");
1042 asm("ldmfd sp!, {r0,lr} ");
1045 asm("mov r12, #0 ");
1046 __DATA_MEMORY_BARRIER__(r12);
1047 asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1050 LDREX( 2,0); // r2=aMutex->iHoldingThread
1051 asm("cmp r2, r1 "); // anyone else waiting?
1053 asm("bne 2f "); // branch out if someone else waiting
1054 STREX( 12,2,0); // else try to clear the holding thread
1055 asm("teq r12, #0 ");
1057 asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1059 __JUMP(,lr); // mutex released without contention
1061 #ifdef BTRACE_FAST_MUTEX
1062 asm("btrace_hdr_fmsignal: ");
1063 asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexSignal)));
1066 // there is contention
1068 asm("orr r12, r0, #1 ");
1069 asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1071 asm("b DoSignalL__10NFastMutex ");
1075 /** Acquires the System Lock.
1077 This will block until the mutex is available, and causes
1078 the thread to enter an implicit critical section until the mutex is released.
1080 @post System lock is held.
1082 @see NKern::UnlockSystem()
1083 @see NKern::FMWait()
1085 @pre No fast mutex can be held.
1086 @pre Call in a thread context.
1087 @pre Kernel must be unlocked
1088 @pre interrupts enabled
1091 EXPORT_C __NAKED__ void NKern::LockSystem()
1093 asm("ldr r0, __SystemLock ");
1095 /* fall through to FMWait() ... */
1098 /** Acquires a fast mutex.
1100 This will block until the mutex is available, and causes
1101 the thread to enter an implicit critical section until the mutex is released.
1103 @param aMutex The fast mutex to acquire.
1105 @post The calling thread holds the mutex.
1107 @see NFastMutex::Wait()
1108 @see NKern::FMSignal()
1110 @pre No fast mutex can be held.
1111 @pre Call in a thread context.
1112 @pre Kernel must be unlocked
1113 @pre interrupts enabled
1116 EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex* /*aMutex*/)
1118 ASM_DEBUG1(NKFMWait,r0);
1122 asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1124 LDREX( 2,0); // r2=aMutex->iHoldingThread
1125 asm("cmp r2, #0 "); //
1126 asm("bne 2f "); // branch out if mutex held
1127 STREX( 12,1,0); // else try to set us as holding thread
1128 asm("teq r12, #0 ");
1130 asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1131 __DATA_MEMORY_BARRIER__(r12);
1133 #ifdef BTRACE_FAST_MUTEX
1134 // BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
1136 asm("ldr r0, btrace_hdr_fmwait ");
1139 asm("b OutX__6BTraceUlUlUlUl ");
1141 __JUMP(,lr); // mutex acquired without contention
1143 // there is contention
1146 asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1147 asm("str r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1149 __DATA_MEMORY_BARRIER_Z__(r12);
1150 asm("stmfd sp!, {r4,lr} ");
1151 asm("bl DoWaitL__10NFastMutex ");
1152 asm("ldmfd sp!, {r4,lr} ");
1153 asm("b Unlock__5NKern ");
1155 asm("__SystemLock: ");
1156 asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
1157 #ifdef BTRACE_FAST_MUTEX
1158 asm("btrace_hdr_fmwait: ");
1159 asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexWait)));
1164 /** Releases the System Lock.
1166 @pre System lock must be held.
1168 @see NKern::LockSystem()
1169 @see NKern::FMSignal()
1171 EXPORT_C __NAKED__ void NKern::UnlockSystem()
1173 asm("ldr r0, __SystemLock ");
1175 /* fall through to FMSignal() ... */
1178 /** Releases a previously acquired fast mutex.
1180 @param aMutex The fast mutex to release.
1182 @pre The calling thread holds the mutex.
1184 @see NFastMutex::Signal()
1185 @see NKern::FMWait()
1187 EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex* /*aMutex*/)
1189 ASM_DEBUG1(NKFMSignal,r0);
1190 #ifdef BTRACE_FAST_MUTEX
1191 // BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
1192 asm("stmfd sp!, {r0,lr} ");
1194 asm("ldr r0, btrace_hdr_fmsignal ");
1197 asm("bl OutX__6BTraceUlUlUlUl ");
1198 asm("ldmfd sp!, {r0,lr} ");
1202 asm("mov r12, #0 ");
1203 __DATA_MEMORY_BARRIER__(r12);
1204 asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1206 LDREX( 12,0); // r12=aMutex->iHoldingThread
1208 asm("cmp r12, r1 "); // anyone else waiting?
1209 asm("bne 2f "); // branch out if someone else waiting
1210 STREX( 12,2,0); // else try to clear the holding thread
1211 asm("teq r12, #0 ");
1213 asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1215 __JUMP(,lr); // mutex released without contention
1217 // there is contention
1219 asm("stmfd sp!, {r4,lr} ");
1220 asm("mov r12, #1 ");
1221 asm("orr r4, r0, #1 ");
1222 asm("str r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1223 asm("str r4, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1225 asm("bl DoSignalL__10NFastMutex ");
1226 asm("ldmfd sp!, {r4,lr} ");
1227 asm("b Unlock__5NKern ");
1231 /** Temporarily releases the System Lock if there is contention.
1234 is another thread attempting to acquire the System lock, the calling
1235 thread releases the mutex and then acquires it again.
1237 This is more efficient than the equivalent code:
1240 NKern::UnlockSystem();
1241 NKern::LockSystem();
1244 Note that this can only allow higher priority threads to use the System
1245 lock as lower priority cannot cause contention on a fast mutex.
1247 @return TRUE if the system lock was relinquished, FALSE if not.
1249 @pre System lock must be held.
1251 @post System lock is held.
1253 @see NKern::LockSystem()
1254 @see NKern::UnlockSystem()
1256 EXPORT_C __NAKED__ TBool NKern::FlashSystem()
1258 // CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
1259 asm("ldr r0, __SystemLock ");
1261 /* fall through to FMFlash() ... */
1264 /** Temporarily releases a fast mutex if there is contention.
1266 If there is another thread attempting to acquire the mutex, the calling
1267 thread releases the mutex and then acquires it again.
1269 This is more efficient than the equivalent code:
1276 @return TRUE if the mutex was relinquished, FALSE if not.
1278 @pre The mutex must be held.
1280 @post The mutex is held.
1282 EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex* /*aM*/)
1284 ASM_DEBUG1(NKFMFlash,r0);
1287 asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1288 asm("ldrb r2, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iMutexPri));
1289 asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iBasePri));
1290 asm("cmp r2, r12 ");
1291 asm("bhs 1f "); // a thread of greater or equal priority is waiting
1293 #ifdef BTRACE_FAST_MUTEX
1294 // BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
1296 asm("ldr r0, btrace_hdr_fmsignal ");
1297 asm("stmfd sp!, {r4,lr} ");
1300 asm("bl OutX__6BTraceUlUlUlUl ");
1301 asm("ldmfd sp!, {r4,lr} ");
1304 __JUMP(,lr); // return FALSE
1306 #ifdef BTRACE_FAST_MUTEX
1307 asm("btrace_hdr_fmflash: ");
1308 asm(".word %a0" : : "i" (BTRACE_HEADER_C(8,BTrace::EFastMutex,BTrace::EFastMutexFlash)));
1312 asm("mov r12, #1 ");
1313 asm("str r12, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1315 asm("stmfd sp!, {r4,lr} ");
1317 asm("bl Signal__10NFastMutex ");
1318 asm("bl PreemptionPoint__5NKern ");
1320 asm("bl Wait__10NFastMutex ");
1321 asm("bl Unlock__5NKern ");
1322 asm("ldmfd sp!, {r4,lr} ");
1324 __JUMP(,lr); // return TRUE
1330 /** Check whether a thread holds a fast mutex.
1331 If so set the mutex contention flag and return TRUE, else return FALSE.
1333 Called with kernel lock held
1337 __NAKED__ TBool NThreadBase::CheckFastMutexDefer()
1339 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1340 asm("bics r2, r1, #3 "); // r2 = pointer to mutex if any, r1 bit 0 = flag
1342 asm("mov r0, #0 "); // no mutex - return FALSE
1345 // iHeldFastMutex points to a mutex
1347 asm("tst r1, #1 "); // test flag
1348 asm("beq 2f "); // branch if not being released
1350 // mutex being released
1352 LDREX(3,2); // r3 = m->iHoldingThread
1353 asm("sub r3, r3, r0 "); // m->iHoldingThread - this
1355 asm("bhi 4f "); // if m->iHoldingThread != this or this+1, skip
1356 asm("orr r3, r0, #1 "); // if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
1358 asm("teq r12, #0 ");
1360 asm("mov r0, #1 "); // return TRUE
1364 asm("mov r3, #0 "); // already released, so set iHeldFastMutex=0
1365 asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1367 asm("mov r0, #0 "); // no mutex - return FALSE
1370 // mutex being acquired or has been acquired
1371 // if it has been acquired set the contention flag and return TRUE, else return FALSE
1373 LDREX(3,2); // r3 = m->iHoldingThread
1374 asm("sub r3, r3, r0 "); // m->iHoldingThread - this
1376 asm("bhi 0b "); // if m->iHoldingThread != this or this+1, finish and return FALSE
1377 asm("orr r3, r0, #1 "); // if m->iHoldingThread = this or this+1, set m->iHoldingThread = this+1
1379 asm("teq r12, #0 ");
1381 asm("mov r0, #1 "); // return TRUE
1385 asm("mov r3, #0 "); // already released, so set iHeldFastMutex=0
1386 asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iHeldFastMutex));
1387 asm("mov r0, #0 "); // no mutex - return FALSE
1392 /******************************************************************************
1394 ******************************************************************************/
1396 /** Transition the state of an IDFC or DFC when Add() is called
1398 0000->008n, 00Cn->00En, all other states unchanged
1399 Return original state.
1401 Enter and return with interrupts disabled.
1403 __NAKED__ TUint32 TDfc::AddStateChange()
1405 GET_RWNO_TID(, r1); // r1->SubScheduler
1406 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1407 asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r1 = current CPU number
1408 __DATA_MEMORY_BARRIER_Z__(r12);
1411 asm("cmp r0, #0 "); // original state 0000 ?
1412 asm("orreq r2, r1, #0x0080 "); // yes -> 008n
1413 asm("movne r2, r0 "); // no -> R2=original state ...
1414 asm("eorne r12, r0, #0x00C0 "); // ... and R12=original state^00C0 ...
1415 asm("cmpne r12, #0x0020 "); // ... and check if result < 0020 (i.e. original==00C0..00DF)
1416 asm("addlo r2, r2, #0x0020 "); // 00Cn->00En otherwise leave R2 alone
1418 asm("cmp r12, #0 ");
1420 __DATA_MEMORY_BARRIER__(r12);
1424 /** Transition the state of an IDFC just before running it.
1426 002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000
1427 other initial states invalid
1428 Return original state
1430 Enter and return with interrupts disabled.
1432 __NAKED__ TUint32 TDfc::RunIDFCStateChange()
1434 GET_RWNO_TID(, r1); // r1->SubScheduler
1435 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1436 asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r1 = current CPU number
1437 __DATA_MEMORY_BARRIER_Z__(r12);
1439 asm("str r4, [sp, #-4]! ");
1440 asm("ldr r4, __IdleGeneration ");
1441 asm("ldrb r4, [r4] "); // r4 = TheScheduler.iIdleGeneration
1442 asm("eor r4, r4, #0x0021 "); // r4 = expected state of idle IDFCs
1446 asm("eor r2, r0, #0x0080 ");
1447 asm("cmp r2, #0x0040 ");
1448 asm("bhs 2f "); // branch out unless 008n or 00An
1450 asm("and r2, r0, #0x001F ");
1452 asm("bne 0f "); // if n!=current CPU number, die
1454 asm("orr r2, r1, #0x00C0 "); // 008n->00Cn, 00An->00Cn
1457 asm("cmp r12, #0 ");
1459 __DATA_MEMORY_BARRIER__(r12);
1461 asm("ldr r4, [sp], #4 ");
1466 asm("bic r2, r0, #1 ");
1467 asm("cmp r2, #0x0020 ");
1468 asm("orreq r2, r1, #0x00C0 "); // 002g->00Cn
1472 asm("bne 0f "); // wrong idle state
1476 asm("cmp r0, #0x0100 "); // C=1 if XXYY or XX00, C=0 if bad state
1477 asm("bic r2, r0, #0x00FF "); // XXYY->XX00, C unchanged
1478 asm("tst r0, #0x00FF "); // C unchanged
1479 asm("moveq r2, #0 "); // XX00->0000, C unchanged
1480 asm("bcs 3b "); // branch to STREX if valid state
1483 __ASM_CRASH(); // bad state
1485 asm("__IdleGeneration: ");
1486 asm(".word %a0 " : : "i" ((TInt)&TheScheduler.iIdleGeneration));
1489 /** Transition the state of an IDFC just after running it.
1491 First swap aS->iCurrentIDFC with 0
1492 If original value != this, return 0xFFFFFFFF and don't touch *this
1493 Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000
1494 other initial states invalid
1495 Return original state
1497 Enter and return with interrupts disabled.
1499 __NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/)
1501 asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler,iCurrentIDFC));
1502 __DATA_MEMORY_BARRIER_Z__(r12);
1505 asm("subs r2, r2, r0 "); // aS->iCurrentIDFC == this?
1506 asm("bne 9f "); // no - bail out immediately
1507 STREX(12,2,1); // yes - set aS->iCurrentIDFC=0
1508 asm("cmp r12, #0 ");
1511 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1512 __DATA_MEMORY_BARRIER__(r12);
1514 asm("str r4, [sp, #-4]! ");
1515 GET_RWNO_TID(, r4); // r4->SubScheduler
1516 asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r4 = current CPU number
1519 LDREXH(0,3); // r0 = original DFC state
1520 asm("mov r2, #0 "); // r2 = 0 to begin with
1522 asm("tst r0, #0x00FF ");
1524 asm("eor r12, r0, r4 "); // original state ^ CPU number, should be xxC0, xxE0 or xx60
1525 asm("and r12, r12, #0x00E0 ");
1526 asm("cmp r12, #0x00E0 ");
1527 asm("cmpne r12, #0x00C0 ");
1528 asm("cmpne r12, #0x0060 ");
1530 __ASM_CRASH(); // bad state
1533 asm("bic r12, r0, #0x001F ");
1534 asm("cmp r12, #0x00E0 ");
1535 asm("bhi 4f "); // branch out if XXYY or XX00
1536 asm("subeq r2, r0, #0x0060 "); // 00En->008n
1537 asm("cmp r12, #0x0060 ");
1538 asm("moveq r2, r0 "); // 006n->006n, else R2=0
1541 asm("cmp r12, #0 ");
1543 __DATA_MEMORY_BARRIER__(r12);
1545 asm("ldr r4, [sp], #4 ");
1550 asm("tst r0, #0x00FF ");
1551 asm("bicne r2, r0, #0x00FF "); // XXYY->XX00, XX00->0000
1555 asm("mvn r0, #0 "); // return 0xFFFFFFFF
1559 /** Transition the state of an IDFC just after running it.
1561 006n->002g where g = TheScheduler.iIdleGeneration
1563 other initial states invalid
1564 Return original state
1566 Enter and return with interrupts disabled.
1568 __NAKED__ TUint32 TDfc::EndIDFCStateChange2()
1570 asm("ldr r12, __IdleGeneration ");
1571 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1573 asm("str r4, [sp, #-4]! ");
1574 GET_RWNO_TID(, r4); // r4->SubScheduler
1575 asm("ldr r4, [r4, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r4 = current CPU number
1577 asm("ldrb r1, [r12] "); // r1 = TheScheduler.iIdleGeneration
1581 asm("eor r12, r0, r4 ");
1582 asm("and r12, r12, #0x00FF ");
1583 asm("cmp r12, #0x0060 "); // should be 006n or XX6n
1585 __ASM_CRASH(); // if not, die
1588 asm("tst r0, #0xFF00 "); // XX6n or 006n ?
1589 asm("orreq r2, r1, #0x0020 "); // 006n->002g
1590 asm("bicne r2, r0, #0x00FF "); // XX6n->XX00
1592 asm("cmp r12, #0 ");
1594 __DATA_MEMORY_BARRIER__(r12);
1596 asm("ldr r4, [sp], #4 ");
1601 /** Transition the state of a DFC just before moving it from the IDFC queue to
1604 002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000
1605 other initial states invalid
1606 Return original state
1608 __NAKED__ TUint32 TDfc::MoveToFinalQStateChange()
1610 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1611 __DATA_MEMORY_BARRIER_Z__(r12);
1613 asm("str r4, [sp, #-4]! ");
1614 asm("ldr r4, __IdleGeneration ");
1615 GET_RWNO_TID(, r1); // r1->SubScheduler
1616 asm("ldrb r4, [r4] "); // r4 = TheScheduler.iIdleGeneration
1617 asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r1 = current CPU number
1618 asm("eor r4, r4, #0x0021 "); // r4 = expected state of idle IDFCs
1619 asm("orr r1, r1, #0x0080 ");
1625 asm("beq 0f "); // 0000 -> die
1626 asm("ands r2, r0, #0x00FF ");
1627 asm("beq 3f "); // XX00 -> OK
1628 asm("cmp r2, r4 "); // 002g ?
1629 asm("beq 3f "); // yes -> OK
1630 asm("cmp r2, r1 "); // 008n ?
1631 asm("beq 3f "); // yes -> OK
1633 __ASM_CRASH(); // otherwise die
1636 asm("bics r2, r0, #0x00FF "); // XXYY->XX00
1637 asm("moveq r2, #0x0001 "); // 002g,008n->0001
1639 asm("tst r0, #0x00FF ");
1640 asm("moveq r2, #0 "); // XX00->0000
1643 asm("cmp r12, #0 ");
1645 __DATA_MEMORY_BARRIER__(r12);
1647 asm("ldr r4, [sp], #4 ");
1652 /** Transition the state of an IDFC when transferring it to another CPU
1654 002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000
1655 other initial states invalid
1656 Return original state
1658 Enter and return with interrupts disabled and target CPU's ExIDfcLock held.
1660 __NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/)
1662 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1663 __DATA_MEMORY_BARRIER_Z__(r12);
1665 asm("stmfd sp!, {r4-r5} ");
1666 asm("ldr r4, __IdleGeneration ");
1667 GET_RWNO_TID(, r5); // r5->SubScheduler
1668 asm("ldrb r4, [r4] "); // r4 = TheScheduler.iIdleGeneration
1669 asm("ldr r5, [r5, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum)); // r5 = current CPU number
1670 asm("eor r4, r4, #0x0021 "); // r4 = expected state of idle IDFCs
1671 asm("orr r5, r5, #0x0080 ");
1677 asm("beq 0f "); // 0000 -> die
1678 asm("ands r2, r0, #0x00FF ");
1679 asm("beq 3f "); // XX00 -> OK
1680 asm("cmp r2, r4 "); // 002g ?
1681 asm("beq 3f "); // yes -> OK
1682 asm("cmp r2, r5 "); // 008n ?
1683 asm("beq 3f "); // yes -> OK
1685 __ASM_CRASH(); // otherwise die
1688 asm("bics r2, r0, #0x00FF "); // XXYY->XX00
1689 asm("orreq r2, r1, #0x00A0 "); // 002g,008n->00Am
1691 asm("tst r0, #0x00FF ");
1692 asm("moveq r2, #0 "); // XX00->0000
1695 asm("cmp r12, #0 ");
1697 __DATA_MEMORY_BARRIER__(r12);
1699 asm("ldmfd sp!, {r4-r5} ");
1704 /** Transition the state of an IDFC/DFC just before cancelling it.
1706 0000->0000, XX00->ZZ00, xxYY->zzYY
1707 Return original state
1709 Enter and return with interrupts disabled.
1711 __NAKED__ TUint32 TDfc::CancelInitialStateChange()
1714 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1715 __DATA_MEMORY_BARRIER_Z__(r12);
1716 asm("ldr r1, [r1, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuMask)); // r1 = mask of current CPU number
1721 asm("beq 2f "); // if original state 0000 leave alone
1722 asm("orr r2, r0, r1, lsl #8 "); // else set bit 8-15 corresponding to CPU number
1724 asm("cmp r12, #0 ");
1727 __DATA_MEMORY_BARRIER__(r12);
1731 /** Transition the state of an IDFC/DFC at the end of a cancel operation
1733 XXYY->XX00, XX00->0000
1734 Return original state
1736 Enter and return with interrupts disabled.
1738 __NAKED__ TUint32 TDfc::CancelFinalStateChange()
1740 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1741 __DATA_MEMORY_BARRIER_Z__(r12);
1745 asm("tst r0, #0x00FF ");
1746 asm("bicne r2, r0, #0x00FF "); // XXYY->XX00
1747 asm("moveq r2, #0 "); // xx00->0000
1749 asm("cmp r12, #0 ");
1751 __DATA_MEMORY_BARRIER__(r12);
1755 /** Transition the state of an IDFC or DFC when QueueOnIdle() is called
1757 0000->002g where g = TheScheduler.iIdleGeneration,
1758 00Cn->006n, all other states unchanged
1759 Return original state.
1761 Enter and return with interrupts disabled and IdleSpinLock held.
1763 __NAKED__ TUint32 TDfc::QueueOnIdleStateChange()
1765 asm("ldr r12, __IdleGeneration ");
1766 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1767 asm("ldrb r1, [r12] "); // r1 = TheScheduler.iIdleGeneration
1768 __DATA_MEMORY_BARRIER_Z__(r12);
1771 asm("cmp r0, #0 "); // original state 0000 ?
1772 asm("orreq r2, r1, #0x0020 "); // yes -> 002g
1773 asm("movne r2, r0 "); // no -> R2=original state ...
1774 asm("eorne r12, r0, #0x00C0 "); // ... and R12=original state^00C0 ...
1775 asm("cmpne r12, #0x0020 "); // ... and check if result < 0020 (i.e. original==00C0..00DF)
1776 asm("sublo r2, r2, #0x0060 "); // 00Cn->006n otherwise leave R2 alone
1778 asm("cmp r12, #0 ");
1780 __DATA_MEMORY_BARRIER__(r12);
1785 __NAKED__ void TDfc::ResetState()
1787 asm("add r3, r0, #%a0" : : "i" _FOFF(TDfc,iDfcState));
1788 __DATA_MEMORY_BARRIER_Z__(r2);
1793 asm("beq 0f "); // if state already zero, die
1795 asm("cmp r12, #0 ");
1798 asm("strh r2, [r3] "); // __e32_atomic_store_rel16(&iDfcState, 0)