sl@0: // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkern\arm\ncsched.cia sl@0: // sl@0: // sl@0: sl@0: // NThreadBase member data sl@0: #define __INCLUDE_NTHREADBASE_DEFINES__ sl@0: sl@0: // TDfc member data sl@0: #define __INCLUDE_TDFC_DEFINES__ sl@0: sl@0: #include sl@0: #include sl@0: #include "highrestimer.h" sl@0: #include "nkern.h" sl@0: #include "emievents.h" sl@0: sl@0: #if defined(MONITOR_THREAD_CPU_TIME) && !defined(HAS_HIGH_RES_TIMER) sl@0: #error MONITOR_THREAD_CPU_TIME is defined, but high res timer is not supported sl@0: #endif sl@0: sl@0: #ifdef _DEBUG sl@0: #define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\ sl@0: asm("orr "#rs", "#rs", "#rs", lsl #8 ");\ sl@0: asm("orr "#rs", "#rs", "#rs", lsl #16 ");\ sl@0: asm("str "#rs", ["#rp"] ");\ sl@0: asm("str "#rs", ["#rp", #4] "); sl@0: #else sl@0: #define ASM_KILL_LINK(rp,rs) sl@0: #endif sl@0: sl@0: #define ALIGN_STACK_START \ sl@0: asm("mov r12, sp"); \ sl@0: asm("tst sp, #4"); \ sl@0: asm("subeq sp, sp, #4"); \ sl@0: asm("str r12, [sp,#-4]!") sl@0: sl@0: #define ALIGN_STACK_END \ sl@0: asm("ldr sp, [sp]") sl@0: sl@0: sl@0: #ifdef __CPU_HAS_VFP sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: #define FPEXC_REG 10 sl@0: #define FPEXC_REG3 4 sl@0: #else sl@0: #define FPEXC_REG 11 sl@0: #define FPEXC_REG3 10 sl@0: #endif sl@0: #endif sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // Macros to define which standard ARM registers are used to save sl@0: // required co-processor registers on a reschedule. sl@0: // They rely on the fact that the compiler will concatenate adjacent strings sl@0: // so "r" "9" "," "r" "10" "," will be converted in the assembler file to: sl@0: // r9,r10 sl@0: ///////////////////////////////////////////////////////////////////////////// sl@0: sl@0: #ifdef __CPU_HAS_CP15_THREAD_ID_REG sl@0: #define TID_SP_REG(reg) "r"#reg"," sl@0: #else sl@0: #define TID_SP_REG(reg) sl@0: #endif //__CPU_HAS_CP15_THREAD_ID_REG sl@0: sl@0: #ifdef __CPU_HAS_VFP sl@0: #define FPEXC_SP_REG(reg) "r"#reg"," sl@0: #else sl@0: #define FPEXC_SP_REG(reg) sl@0: #endif //__CPU_HAS_VFP sl@0: sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: #define CAR_SP_REG(reg) "r"#reg"," sl@0: #else sl@0: #define CAR_SP_REG(reg) sl@0: #endif //__CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: #define DACR_SP_REG(reg) "r"#reg"," sl@0: #else sl@0: #define DACR_SP_REG(reg) sl@0: #endif //__CPU_ARM_USE_DOMAINS sl@0: sl@0: #ifdef __CPU_SUPPORT_THUMB2EE sl@0: #define THUMB2EE_SP_REG(reg) "r"#reg"," sl@0: #else sl@0: #define THUMB2EE_SP_REG(reg) sl@0: #endif // __CPU_SUPPORT_THUMB2EE sl@0: sl@0: // NOTE THIS WILL PRODUCE A WARNING IF REGISTERS ARE NOT IN ASCENDING ORDER sl@0: #define EXTRA_STACK_LIST(thumb2ee, tid, fpexc, car, dacr)\ sl@0: THUMB2EE_SP_REG(thumb2ee) TID_SP_REG(tid) FPEXC_SP_REG(fpexc) CAR_SP_REG(car) DACR_SP_REG(dacr) sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: sl@0: //#define __DEBUG_BAD_ADDR sl@0: sl@0: extern "C" void PanicFastSemaphoreWait(); sl@0: sl@0: #ifdef __DFC_MACHINE_CODED__ sl@0: sl@0: __ASSERT_COMPILE(_FOFF(TDfcQue,iPresent) == 0); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iNext) == 0); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iPrev) == 4); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iPriority) % 4 == 0); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iOnFinalQ) == _FOFF(TDfc,iPriority) + 2); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iQueued) == _FOFF(TDfc,iOnFinalQ) + 1); sl@0: sl@0: __NAKED__ void TDfcQue::ThreadFunction(TAny* /*aDfcQ*/) sl@0: { sl@0: asm("ldr r11, __TheScheduler2 "); sl@0: sl@0: asm("mov r4, r0 "); // r4=aDfcQ sl@0: asm("ldr r10, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("mov r7, #0 "); sl@0: asm("mov r9, #1 "); sl@0: SET_INTS_1(r5, MODE_SVC, INTS_ALL_ON); sl@0: SET_INTS_1(r6, MODE_SVC, INTS_ALL_OFF); sl@0: sl@0: asm("dfc_thrd_fn_check_queue: "); sl@0: SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // enable interrupts sl@0: sl@0: asm("dfc_thrd_fn_check_queue2: "); sl@0: asm("str r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel sl@0: asm("ldr r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent)); // r3=aDfcQ->iPresent sl@0: asm("add lr, r4, #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // lr=address of priority 0 queue sl@0: #ifdef __CPU_ARM_HAS_CLZ sl@0: CLZ(12,3); // r12=31-MSB(r3), 32 if r3=0 sl@0: asm("rsbs r12, r12, #31 "); // r12=ms bit number set, -1 if queue empty sl@0: asm("bmi dfc_thrd_fn_wait "); // if empty, wait for next request sl@0: #else sl@0: asm("movs r2, r3 "); // check if queue empty sl@0: asm("beq dfc_thrd_fn_wait "); // if empty, wait for next request sl@0: asm("mov r12, #7 "); sl@0: asm("cmp r2, #0x10 "); sl@0: asm("movcc r2, r2, lsl #4 "); sl@0: asm("subcc r12, r12, #4 "); sl@0: asm("cmp r2, #0x40 "); sl@0: asm("movcc r2, r2, lsl #2 "); sl@0: asm("subcc r12, r12, #2 "); sl@0: asm("cmp r2, #0x80 "); sl@0: asm("subcc r12, r12, #1 "); // r12=ms bit number set sl@0: #endif sl@0: asm("ldr r8, [lr, r12, lsl #2]! "); // lr=address of highest priority non-empty queue, r8=address of first DFC sl@0: asm("ldmia r8, {r0-r1} "); // r0=first->next, r1=first->prev sl@0: asm("cmp r0, r8 "); // check if this is the only one at this priority sl@0: asm("strne r0, [r1, #0] "); // if not, prev->next=next sl@0: asm("strne r1, [r0, #4] "); // and next->prev=prev sl@0: asm("streq r7, [lr] "); // if this was only one, set head pointer for this priority to NULL sl@0: asm("strne r0, [lr] "); // else set head pointer to first->next sl@0: ASM_KILL_LINK(r8,r1); sl@0: asm("strh r7, [r8, #%a0]" : : "i" _FOFF(TDfc, iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - can't touch link pointers after this sl@0: asm("biceq r3, r3, r9, lsl r12 "); // if no more at this priority clear bit in iPresent sl@0: asm("streq r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent)); sl@0: sl@0: SET_INTS_2(r6, MODE_SVC, INTS_ALL_OFF); // interrupts off sl@0: asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // check if reschedule required sl@0: asm("cmp r3, #0 "); sl@0: asm("streq r7, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if no reschedule required unlock the kernel sl@0: asm("blne " CSM_ZN10TScheduler10RescheduleEv); // if reschedule required, do it sl@0: SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // restore interrupts sl@0: sl@0: asm("ldr r1, [r8, #%a0]" : : "i" _FOFF(TDfc, iFunction)); // r1=function address sl@0: asm("adr lr, dfc_thrd_fn_check_queue2 "); // set up return address sl@0: asm("ldr r0, [r8, #%a0]" : : "i" _FOFF(TDfc, iPtr)); // r0=DFC argument sl@0: __JUMP(,r1); // call DFC sl@0: sl@0: asm("dfc_thrd_fn_wait: "); sl@0: asm("mov r0, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc)); sl@0: asm("strb r0, [r10, #%a0]" : : "i" _FOFF(NThreadBase,iNState)); sl@0: asm("strb r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: asm("mov r0, r11 "); sl@0: asm("mov r1, r10 "); sl@0: asm("bl unready "); sl@0: asm("adr lr, dfc_thrd_fn_check_queue "); // set up return address sl@0: asm("b " CSM_ZN10TScheduler10RescheduleEv); sl@0: sl@0: asm("__TheScheduler2: "); sl@0: asm(".word TheScheduler "); sl@0: } sl@0: sl@0: sl@0: /** Cancels an IDFC or DFC. sl@0: sl@0: This function does nothing if the IDFC or DFC is not queued. sl@0: sl@0: @return TRUE if the DFC was actually dequeued by this call. In that case sl@0: it is guaranteed that the DFC will not execute until it is sl@0: queued again. sl@0: FALSE if the DFC was not queued on entry to the call, or was in sl@0: the process of being executed or cancelled. In this case sl@0: it is possible that the DFC executes after this call sl@0: returns. sl@0: sl@0: @post However in either case it is safe to delete the DFC object on sl@0: return from this call provided only that the DFC function does not sl@0: refer to the DFC object itself. sl@0: sl@0: @pre IDFC or thread context. Do not call from ISRs. sl@0: sl@0: @pre If the DFC function accesses the DFC object itself, the user must ensure that sl@0: Cancel() cannot be called while the DFC function is running. sl@0: */ sl@0: __NAKED__ EXPORT_C TBool TDfc::Cancel() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); sl@0: sl@0: asm("ldr r1, __TheScheduler2 "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("add r3, r3, #1 "); sl@0: asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r2=priority/flags sl@0: SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF); sl@0: asm("tst r2, #0xff000000 "); // test queued flag sl@0: asm("moveq r0, #0 "); // if not queued, return FALSE sl@0: asm("beq 0f "); sl@0: SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // otherwise disable interrupts while we dequeue sl@0: asm("ldmia r0, {r3,r12} "); // r3=next, r12=prev sl@0: SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON); sl@0: asm("str r3, [r12, #0] "); // prev->next=next sl@0: asm("str r12, [r3, #4] "); // next->prev=prev sl@0: SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // reenable interrupts sl@0: asm("tst r2, #0x00ff0000 "); // check iOnFinalQ sl@0: asm("beq 1f "); // if FALSE, finish up sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r1=iDfcQ sl@0: asm("and r2, r2, #0xff "); // r2=iPriority sl@0: asm("subs r12, r3, r0 "); // check if queue is now empty, r12=0 if it is sl@0: asm("beq 2f "); // branch if now empty sl@0: asm("add r1, r1, r2, lsl #2 "); // r1=&iDfcQ->iQueue[iPriority]-_FOFF(TDfcQue.iPriority) sl@0: asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // r12=iDfcQ->iQueue[iPriority] sl@0: asm("cmp r12, r0 "); // is this one first? sl@0: asm("streq r3, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // if so, iQueue[pri]=next sl@0: asm("b 1f "); sl@0: asm("2: "); // r0=this, r1=iDfcQ, r2=priority, r3=next, r12=0 sl@0: asm("ldr r3, [r1], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r3=iDfcQ->iPresent, r1=&iDfcQ->iQueue[0] sl@0: asm("str r12, [r1, r2, lsl #2] "); // iDfcQ->iQueue[iPriority]=NULL sl@0: asm("mov r12, #1 "); sl@0: asm("bic r3, r3, r12, lsl r2 "); // clear present bit sl@0: asm("str r3, [r1, #-%a0]" : : "i" _FOFF(TDfcQue,iQueue)); sl@0: asm("1: "); sl@0: ASM_KILL_LINK(r0,r1); sl@0: asm("mov r3, #0 "); sl@0: asm("strh r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - must be done last sl@0: sl@0: // R0=this != 0 here sl@0: sl@0: asm("0: "); sl@0: asm("stmfd sp!, {r0,lr} "); sl@0: asm("bl " CSM_ZN5NKern6UnlockEv); // unlock the kernel sl@0: __POPRET("r0,"); sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __FAST_SEM_MACHINE_CODED__ sl@0: /** Waits on a fast semaphore. sl@0: sl@0: Decrements the signal count for the semaphore and sl@0: removes the calling thread from the ready-list if the sempahore becomes sl@0: unsignalled. Only the thread that owns a fast semaphore can wait on it. sl@0: sl@0: Note that this function does not block, it merely updates the NThread state, sl@0: rescheduling will only occur when the kernel is unlocked. Generally threads sl@0: would use NKern::FSWait() which manipulates the kernel lock for you. sl@0: sl@0: @pre The calling thread must own the semaphore. sl@0: @pre Kernel must be locked. sl@0: @pre No fast mutex can be held. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastSemaphore::Signal() sl@0: @see NKern::FSWait() sl@0: @see NKern::Unlock() sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastSemaphore::Wait() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); sl@0: sl@0: asm("mov r2, r0 "); sl@0: asm("ldr r0, __TheScheduler "); sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); // r1=owning thread sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=current thread sl@0: asm("cmp r1, r3 "); sl@0: asm("bne PanicFastSemaphoreWait "); // if wrong thread, fault sl@0: // wait on a NFastSemaphore pointed to by r2 sl@0: // enter with r0=&TheScheduler, r1=the current thread, already validated sl@0: asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: asm("mov r12, #%a0" : : "i" (NThread::EWaitFastSemaphore)); sl@0: asm("subs r3, r3, #1 "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); // decrement iCount sl@0: __JUMP(ge,lr); // if result>=0, finished sl@0: asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: asm("strb r12, [r1, #%a0]" : : "i" _FOFF(NThread,iNState)); sl@0: asm("mov r3, #1 "); sl@0: asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: sl@0: // remove thread from ready list sl@0: asm("b unready "); sl@0: } sl@0: sl@0: sl@0: /** Waits for a signal on the current thread's I/O semaphore. sl@0: @pre No fast mutex can be held. sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::WaitForAnyRequest() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX); sl@0: sl@0: asm("ldr r0, __TheScheduler "); sl@0: asm("str lr, [sp, #-4]! "); // save lr sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("bl wait_for_any_request2 "); sl@0: SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // turn interrupts back on sl@0: asm("ldr pc, [sp], #4 "); sl@0: sl@0: // Special case handler for Exec::WaitForAnyRequest() for efficiency reasons sl@0: // Called from __ArmVectorSwi with R11=&TheScheduler, R1=current thread sl@0: // Returns with interrupts disabled sl@0: asm(".global wait_for_any_request "); sl@0: asm("wait_for_any_request: "); sl@0: sl@0: ASM_DEBUG0(WaitForAnyRequest); sl@0: asm("mov r0, r11 "); sl@0: asm("wait_for_any_request2: "); sl@0: SET_INTS_1(r2, MODE_SVC, INTS_ALL_OFF); sl@0: #ifdef _DEBUG sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("cmp r3, #0 "); sl@0: asm("movne r12, #0xd8000001 "); // FAULT - calling Exec::WaitForAnyRequest() with the kernel locked is silly sl@0: asm("strne r12, [r12] "); sl@0: #endif sl@0: SET_INTS_2(r2, MODE_SVC, INTS_ALL_OFF); // turn off interrupts sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount)); sl@0: asm("mov r3, #1 "); sl@0: SET_INTS_1(r12, MODE_SVC, INTS_ALL_ON); sl@0: asm("subs r2, r2, #1 "); sl@0: asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount)); // decrement iCount sl@0: __JUMP(ge,lr); // if result non-negative, finished sl@0: sl@0: asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel sl@0: SET_INTS_2(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts sl@0: asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: sl@0: // r2 points to NFastSemaphore sl@0: asm("add r2, r1, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore)); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: asm("mov r3, #%a0" : : "i" (NThread::EWaitFastSemaphore)); sl@0: asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NThread,iNState)); // mark thread waiting on semaphore sl@0: asm("bl unready "); // remove thread from ready list - DOESN'T CLOBBER R0 sl@0: asm("bl " CSM_ZN10TScheduler10RescheduleEv); // Reschedule sl@0: asm("ldr lr, [sp], #4 "); sl@0: asm("mov r3, #%a0 " : : "i" (NThread::EContextWFARCallback)); sl@0: asm("b callUserModeCallbacks "); // exit and call callbacks sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore multiple times. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); sl@0: sl@0: asm("req_sem_signaln: "); sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: asm("adds r2, r2, r1 "); sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: __JUMP(cc,lr); // if count did not cross 0 nothing more to do sl@0: asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: asm("b check_suspend_then_ready "); sl@0: } sl@0: sl@0: /** @internalComponent */ sl@0: __NAKED__ void NFastSemaphore::WaitCancel() sl@0: { sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: asm("mov r0, r3 "); sl@0: asm("b check_suspend_then_ready "); sl@0: } sl@0: sl@0: sl@0: /** Resets a fast semaphore. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastSemaphore::Reset() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); sl@0: sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: asm("cmp r2, #0 "); sl@0: __JUMP(ge,lr); // if count was not negative, nothing to do sl@0: asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: asm("b check_suspend_then_ready "); sl@0: } sl@0: sl@0: #endif sl@0: sl@0: #ifdef __SCHEDULER_MACHINE_CODED__ sl@0: sl@0: __ASSERT_COMPILE(_FOFF(SDblQueLink,iNext) == 0); sl@0: __ASSERT_COMPILE(_FOFF(SDblQueLink,iPrev) == 4); sl@0: __ASSERT_COMPILE(_FOFF(TScheduler,iPresent) == 0); sl@0: __ASSERT_COMPILE(_FOFF(NFastSemaphore,iCount) == 0); sl@0: __ASSERT_COMPILE(_FOFF(NFastSemaphore,iOwningThread) == 4); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iPtr) == _FOFF(TDfc,iPriority) + 4); sl@0: __ASSERT_COMPILE(_FOFF(TDfc,iFunction) == _FOFF(TDfc,iPtr) + 4); sl@0: sl@0: __NAKED__ void TScheduler::Remove(NThreadBase* /*aThread*/) sl@0: // sl@0: // Remove a thread from the ready list sl@0: // sl@0: { sl@0: asm("unready: "); sl@0: #ifdef _DEBUG sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); sl@0: asm("mov r12, #0xd8000003 "); sl@0: asm("cmp r2, #0 "); sl@0: asm("strne r12, [r12] "); // crash if fast mutex held sl@0: #endif sl@0: asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTimeslice)); sl@0: asm("ldmia r1, {r2,r3} "); // r2=next, r3=prev sl@0: asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice for next time sl@0: sl@0: asm("pri_list_remove: "); sl@0: ASM_KILL_LINK(r1,r12); sl@0: asm("subs r12, r1, r2 "); // check if more threads at this priority, r12=0 if not sl@0: asm("bne unready_1 "); // branch if there are more at same priority sl@0: asm("ldrb r2, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r2=thread priority sl@0: asm("add r1, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r1->iQueue[0] sl@0: asm("str r12, [r1, r2, lsl #2] "); // iQueue[priority]=NULL sl@0: asm("ldrb r1, [r0, r2, lsr #3] "); // r1=relevant byte in present mask sl@0: asm("and r3, r2, #7 "); // r3=priority & 7 sl@0: asm("mov r12, #1 "); sl@0: asm("bic r1, r1, r12, lsl r3 "); // clear bit in present mask sl@0: asm("strb r1, [r0, r2, lsr #3] "); // update relevant byte in present mask sl@0: __JUMP(,lr); sl@0: asm("unready_1: "); // get here if there are other threads at same priority sl@0: asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r12=thread priority sl@0: asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r0=&iQueue[0] sl@0: asm("str r3, [r2, #4] "); // next->prev=prev sl@0: asm("ldr r12, [r0, r12, lsl #2]! "); // r12=iQueue[priority], r0=&iQueue[priority] sl@0: asm("str r2, [r3, #0] "); // and prev->next=next sl@0: asm("cmp r12, r1 "); // if aThread was first... sl@0: asm("streq r2, [r0, #0] "); // iQueue[priority]=aThread->next sl@0: __JUMP(,lr); // finished sl@0: } sl@0: sl@0: sl@0: /** Removes an item from a priority list. sl@0: sl@0: @param aLink A pointer to the item - this must not be NULL. sl@0: */ sl@0: EXPORT_C __NAKED__ void TPriListBase::Remove(TPriListLink* /*aLink*/) sl@0: { sl@0: asm("ldmia r1, {r2,r3} "); // r2=aLink->iNext, r3=aLink->iPrev sl@0: asm("b pri_list_remove "); sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore. sl@0: sl@0: Increments the signal count of a fast semaphore by sl@0: one and releases any waiting thread if the semphore becomes signalled. sl@0: sl@0: Note that a reschedule will not occur before this function returns, this will sl@0: only take place when the kernel is unlocked. Generally threads sl@0: would use NKern::FSSignal() which manipulates the kernel lock for you. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastSemaphore::Wait() sl@0: @see NKern::FSSignal() sl@0: @see NKern::Unlock() sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastSemaphore::Signal() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); sl@0: sl@0: asm("req_sem_signal: "); sl@0: asm("ldmia r0, {r1,r2} "); // r1=iCount, r2=iOwningThread sl@0: asm("mov r3, #0 "); sl@0: asm("adds r1, r1, #1 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); sl@0: __JUMP(gt,lr); // if count after incrementing is >0, nothing more to do sl@0: asm("mov r0, r2 "); sl@0: asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj)); sl@0: sl@0: // fall through to NThreadBase::CheckSuspendThenReady() sl@0: } sl@0: sl@0: sl@0: /** Makes a nanothread ready provided that it is not explicitly suspended. sl@0: sl@0: For use by RTOS personality layers. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C __NAKED__ void NThreadBase::CheckSuspendThenReady() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); sl@0: sl@0: asm("check_suspend_then_ready: "); sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThread,iSuspendCount)); sl@0: asm("mov r2, #%a0" : : "i" (NThread::ESuspended)); sl@0: asm("cmp r1, #0 "); sl@0: asm("bne mark_thread_suspended "); // branch out if suspend count nonzero sl@0: sl@0: // fall through to NThreadBase::Ready() sl@0: } sl@0: sl@0: sl@0: /** Makes a nanothread ready. sl@0: sl@0: For use by RTOS personality layers. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre The calling thread must not be explicitly suspended. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C __NAKED__ void NThreadBase::Ready() sl@0: { sl@0: // on release builds just fall through to DoReady sl@0: #ifdef _DEBUG sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_KERNEL_LOCKED); sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iSuspendCount)); sl@0: asm("cmp r1, #0 "); sl@0: asm("beq 1f "); sl@0: ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL); sl@0: asm("1: "); sl@0: asm("stmfd sp!, {r0,lr} "); sl@0: asm("mov r0, #%a0" : : "i" ((TInt)KCRAZYSCHEDDELAY)); sl@0: asm("bl " CSM_Z9KDebugNumi ); sl@0: asm("cmp r0, #0 "); // Z=1 => no delayed scheduler sl@0: asm("ldmfd sp!, {r0,lr} "); sl@0: asm("ldr r1, __TheScheduler "); sl@0: asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread sl@0: asm("beq DoReadyInner "); // delayed scheduler is disabled sl@0: asm("ldr r12, __TheTimerQ "); sl@0: asm("cmp r2, #0 "); sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount)); sl@0: asm("cmpne r12, #0 "); // tick hasn't happened yet or this is priority 0 sl@0: asm("beq DoReadyInner "); // so ready it as usual sl@0: asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr)); sl@0: asm("tst r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed)); sl@0: __JUMP(ne,lr); // thread is already on the delayed queue sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iDelayedQ)); sl@0: asm("ldr r12, [r3, #4] "); // r12->last thread sl@0: asm("str r0, [r3, #4] "); // first->prev=this sl@0: asm("str r0, [r12, #0] "); // old last->next=this sl@0: asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last sl@0: asm("orr r2, r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed)); sl@0: asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr)); sl@0: __JUMP(,lr); sl@0: sl@0: asm("__TheTimerQ: "); sl@0: asm(".word TheTimerQ "); sl@0: asm("__SuperPageAddress: "); sl@0: asm(".word SuperPageAddress "); sl@0: #endif sl@0: // on release builds just fall through to DoReady sl@0: } sl@0: sl@0: __NAKED__ void NThreadBase::DoReady() sl@0: { sl@0: asm("ldr r1, __TheScheduler "); sl@0: asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread sl@0: asm("DoReadyInner: "); sl@0: asm("mov r3, #%a0" : : "i" (NThread::EReady)); sl@0: asm("strb r3, [r0, #%a0]" : : "i" _FOFF(NThread,iNState)); sl@0: asm("ldmia r1!, {r3,r12} "); // r3=present mask low, r12=present mask high, r1=&iQueue[0] sl@0: asm("cmp r2, #31 "); sl@0: asm("bhi 1f "); sl@0: asm("cmp r12, #0 "); sl@0: asm("mov r12, r3 "); sl@0: asm("mov r3, #1 "); sl@0: asm("bne 2f "); // branch if high word set, so this has lower priority sl@0: asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority) sl@0: asm("beq 3f "); // branch if equality case (no need to update bitmask) sl@0: asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary sl@0: asm("2: "); sl@0: asm("tst r12, r3, lsl r2 "); // test bit in present mask sl@0: asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ... sl@0: asm("ldrne r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue sl@0: asm("streq r12, [r1, #-8] "); // ... and update present mask low word sl@0: asm("bne 4f "); // branch if not alone (don't need to touch bitmask) sl@0: asm("6: "); // get here if thread is alone at this priority sl@0: asm("str r0, [r1, r2, lsl #2] "); // thread is alone at this priority, so point queue to it sl@0: asm("str r0, [r0, #0] "); // next=prev=this sl@0: asm("str r0, [r0, #4] "); sl@0: __JUMP(,lr); // NOTE: R0=this != 0 sl@0: asm("5: "); // get here if this thread has joint highest priority >= 32 sl@0: asm("add r2, r2, #32 "); // restore thread priority sl@0: asm("3: "); // get here if this thread has joint highest priority < 32 sl@0: asm("ldr r3, [r1, r2, lsl #2] "); // r3->first thread on queue sl@0: asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); // r12=first thread->time remaining sl@0: asm("subs r12, r12, #1 "); // timeslice expired? if so, r12=-1 and C=0 else C=1 sl@0: asm("strccb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary sl@0: asm("4: "); // get here when adding to non-empty queue; r1->queue, r3->first thread on queue sl@0: asm("ldr r12, [r3, #4] "); // r12->last thread sl@0: asm("str r0, [r3, #4] "); // first->prev=this sl@0: asm("str r0, [r12, #0] "); // old last->next=this sl@0: asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last sl@0: __JUMP(,lr); // NOTE: R0=this != 0 sl@0: asm("1: "); // get here if this thread priority > 31 sl@0: asm("and r2, r2, #31 "); sl@0: asm("mov r3, #1 "); sl@0: asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority) sl@0: asm("beq 5b "); // branch if equality case (no need to update bitmask) sl@0: asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary sl@0: asm("tst r12, r3, lsl r2 "); // test bit in present mask sl@0: asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ... sl@0: asm("add r2, r2, #32 "); sl@0: asm("streq r12, [r1, #-4] "); // ... and update present mask high word sl@0: asm("beq 6b "); // branch if alone sl@0: asm("ldr r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue sl@0: asm("b 4b "); // branch if not alone (don't need to touch bitmask) sl@0: sl@0: asm("mark_thread_suspended: "); // continuation of CheckSuspendThenReady in unusual case sl@0: asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iNState)); // set state to suspended sl@0: __JUMP(,lr); // NOTE: R0=this != 0 sl@0: } sl@0: sl@0: __NAKED__ void TScheduler::QueueDfcs() sl@0: { sl@0: // move DFCs from pending queue to their final queues sl@0: // enter with interrupts off and kernel locked sl@0: // leave with interrupts off and kernel locked sl@0: // NOTE: WE MUST NOT CLOBBER R0 OR R2! sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: sl@0: sl@0: SET_INTS(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: asm("mov r1, #1 "); // (not necessary on ARMV5 as SET_INTS above leaves r1 == 0x13) sl@0: #endif sl@0: asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC)); sl@0: asm("stmfd sp!, {r2,r5,r11,lr} "); // save registers sl@0: sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); sl@0: asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); sl@0: asm("mov r11, sp "); // r11 points to saved registers sl@0: asm("cmp r1, #0"); sl@0: asm("blne idfc_start_trace"); sl@0: #else sl@0: asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); sl@0: asm("mov r11, sp "); // r11 points to saved registers sl@0: #endif sl@0: sl@0: asm("queue_dfcs_1: "); sl@0: SET_INTS(r0, MODE_SVC, INTS_ALL_OFF); // disable interrupts sl@0: asm("ldr r0, [r5, #0] "); // r0 points to first pending DFC sl@0: SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON); sl@0: asm("subs r2, r0, r5 "); // check if queue empty sl@0: asm("ldrne r3, [r0, #0] "); // r3 points to next DFC sl@0: asm("beq queue_dfcs_0 "); // if so, exit sl@0: asm("str r3, [r5, #0] "); // next one is now first sl@0: asm("str r5, [r3, #4] "); // next->prev=queue head sl@0: SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts sl@0: sl@0: asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r12=iPriority sl@0: asm("adr lr, queue_dfcs_1 "); // return to queue_dfcs_1 sl@0: asm("cmp r12, #%a0" : : "i" ((TInt)KNumDfcPriorities)); // check for immediate DFC sl@0: asm("bcs do_immediate_dfc "); sl@0: sl@0: // enqueue the DFC and signal the DFC thread sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r2=iDfcQ sl@0: asm("mov r3, #1 "); sl@0: asm("dfc_enque_1: "); sl@0: asm("ldr r1, [r2], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r1=present mask, r2 points to first queue sl@0: asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // set flag to show DFC on final queue sl@0: asm("tst r1, r3, lsl r12 "); // test bit in present mask sl@0: asm("ldrne r1, [r2, r12, lsl #2] "); // if not originally empty, r1->first sl@0: asm("orreq r1, r1, r3, lsl r12 "); // if bit clear, set it sl@0: asm("streq r1, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iPresent)-_FOFF(TDfcQue,iQueue))); // if bit originally clear update present mask sl@0: asm("ldrne r3, [r1, #4] "); // if not originally empty, r3->last sl@0: asm("streq r0, [r2, r12, lsl #2] "); // if queue originally empty, iQueue[p]=this sl@0: asm("streq r0, [r0, #0] "); // this->next=this sl@0: asm("ldr r2, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iThread)-_FOFF(TDfcQue,iQueue))); // r2=iDfcQ->iThread sl@0: asm("stmneia r0, {r1,r3} "); // this->next=first, this->prev=last sl@0: asm("streq r0, [r0, #4] "); // this->prev=this sl@0: asm("ldrb r12, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iNState)); // r2=thread NState sl@0: asm("strne r0, [r1, #4] "); // first->prev=this sl@0: asm("strne r0, [r3, #0] "); // last->next=this sl@0: asm("cmp r12, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc)); // check for EWaitDfc sl@0: asm("mov r0, r2 "); // r0->thread sl@0: asm("beq check_suspend_then_ready "); // if it is, release thread sl@0: __JUMP(,lr); // else we are finished - NOTE R0=thread ptr != 0 sl@0: sl@0: asm("queue_dfcs_0: "); sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("ldrb r1, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iCpuUsageFilter)-_FOFF(TScheduler,iDfcs))); sl@0: asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs))); sl@0: asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs))); sl@0: asm("cmp r1, #0"); sl@0: asm("blne idfc_end_trace"); sl@0: #else sl@0: asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs))); sl@0: asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs))); sl@0: #endif sl@0: asm("sub r0, r5, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); // restore r0 sl@0: asm("mov sp, r11 "); // retrieve stack pointer before alignment sl@0: asm("ldmfd sp!, {r2,r5,r11,pc} "); sl@0: sl@0: asm("do_immediate_dfc: "); sl@0: ASM_KILL_LINK(r0,r1); sl@0: asm("mov r1, #0x000000ff "); // pri=0xff (IDFC), spare1=0 (unused), spare2=0 (iOnFinalQ), spare3=0 (iQueued) sl@0: asm("str r1, [r0, #%a0]!" : : "i" _FOFF(TDfc,iPriority)); // dfc->iQueued=FALSE, r0->iPriority sl@0: asm("ldmib r0, {r0,r1} "); // r0 = DFC parameter, r1 = DFC function pointer sl@0: asm("bic sp, sp, #4 "); // align stack sl@0: __JUMP(,r1); // call DFC, return to queue_dfcs_1 sl@0: sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("idfc_start_trace_header:"); sl@0: asm(".word %a0" : : "i" ((TInt)(4<iNext=this ... sl@0: asm("streq r2, [r0, #4] "); // ...iPrev=old last sl@0: sl@0: // NOTE: R0=this != 0 sl@0: sl@0: asm("dontswap: "); sl@0: __JUMP(,lr); sl@0: sl@0: asm("__PendingDfcQueue: "); sl@0: asm(".word %a0" : : "i" ((TInt)&TheScheduler.iDfcs)); sl@0: } sl@0: sl@0: sl@0: /** Queues a DFC (not an IDFC) from an IDFC or thread with preemption disabled. sl@0: sl@0: This function is the preferred way to queue a DFC from an IDFC. It should not sl@0: be used to queue an IDFC - use TDfc::Add() for this. sl@0: sl@0: This function does nothing if the DFC is already queued. sl@0: sl@0: @pre Call only from IDFC or thread with the kernel locked. sl@0: @pre Do not call from ISR or thread with the kernel unlocked. sl@0: @return TRUE if DFC was actually queued by this call sl@0: FALSE if DFC was already queued on entry so this call did nothing sl@0: sl@0: @see TDfc::Add() sl@0: @see TDfc::Enque() sl@0: */ sl@0: __NAKED__ EXPORT_C TBool TDfc::DoEnque() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NO_RESCHED); sl@0: #ifdef _DEBUG sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); sl@0: asm("cmp r1, #0 "); sl@0: asm("bne 1f "); sl@0: ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL); sl@0: asm("1: "); sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM_HAS_LDREX_STREX_V6K) sl@0: asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued's byte offset sl@0: asm("mov r3, #1 "); sl@0: sl@0: asm("tryagain8: "); sl@0: LDREXB(1, 2); // r1 = iQueued sl@0: STREXB(12, 3, 2); // Try setting iQueued = True sl@0: asm(" teq r12, #1 "); // worked? sl@0: asm(" beq tryagain8 "); // nope sl@0: // r3 = 1, r1 = old iQueued sl@0: #elif defined(__CPU_ARM_HAS_LDREX_STREX) sl@0: asm(" add r0, r0, #8 "); // align address (struct always aligned) sl@0: asm("tryagain8: "); sl@0: LDREX(2, 0); // do the load/store half sl@0: asm(" bic r12, r2, #0xff000000 "); // knock out unwanted bits sl@0: asm(" orr r12, r12, #0x01000000 "); // 'looking' value sl@0: STREX(1, 12, 0); // write looking value sl@0: asm(" teq r1, #1 "); // worked? sl@0: asm(" beq tryagain8 "); // nope sl@0: asm(" mov r1, r2, lsr #24 "); // extract previous value byte sl@0: asm(" sub r0, r0, #8 "); // restore base pointer sl@0: asm(" mov r3, #1 "); // dfc_enque_1 expects r3 = 1 sl@0: #else sl@0: asm("add r12, r0, #11 "); // r12=&iQueued sl@0: asm("mov r3, #1 "); sl@0: asm("swpb r1, r3, [r12] "); // ATOMIC {r1=iQueued; iQueued=TRUE} sl@0: #endif sl@0: sl@0: asm("ldrb r12, [r0, #8] "); // r12=iPriority sl@0: asm("ldr r2, [r0, #20] "); // r2=iDfcQ sl@0: asm("cmp r1, #0 "); // check if queued sl@0: asm("beq dfc_enque_1 "); // if not, queue it and return with R0 nonzero sl@0: asm("mov r0, #0 "); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __FAST_MUTEX_MACHINE_CODED__ sl@0: sl@0: __ASSERT_COMPILE(_FOFF(NFastMutex,iHoldingThread) == 0); sl@0: sl@0: /** Releases a previously acquired fast mutex. sl@0: sl@0: Generally threads would use NKern::FMSignal() which manipulates the kernel lock sl@0: for you. sl@0: sl@0: @pre The calling thread must hold the mutex. sl@0: @pre Kernel must be locked. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastMutex::Wait() sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastMutex::Signal() sl@0: { sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: ASM_DEBUG1(FMSignal,r0); sl@0: asm("ldr r2, __TheScheduler "); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r1, #0"); sl@0: asm("bne fastmutex_signal_trace"); sl@0: asm("no_fastmutex_signal_trace:"); sl@0: #endif sl@0: asm("mov r12, #0 "); sl@0: asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: asm("ldr r3, [r0] "); // r3=iWaiting sl@0: asm("str r12, [r0] "); // iWaiting=FALSE sl@0: asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL sl@0: asm("cmp r3, #0 "); // check waiting flag sl@0: asm("bne 2f "); sl@0: asm("1: "); sl@0: __JUMP(,lr); // if clear, finished sl@0: asm("2: "); sl@0: asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); sl@0: asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // Assumes iWaiting!=0 mod 256 sl@0: asm("cmp r12, #0 "); // check for outstanding CS function sl@0: asm("beq 1b "); // if none, finished sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // else check CS count sl@0: asm("mov r0, r1 "); sl@0: asm("cmp r2, #0 "); sl@0: __JUMP(ne,lr); // if nonzero, finished sl@0: asm("DoDoCsFunction: "); sl@0: asm("stmfd sp!, {r11,lr} "); sl@0: asm("mov r11, sp "); sl@0: asm("bic sp, sp, #4 "); sl@0: asm("bl " CSM_ZN11NThreadBase12DoCsFunctionEv); // if iCsCount=0, DoCsFunction() sl@0: asm("mov sp, r11 "); sl@0: asm("ldmfd sp!, {r11,pc} "); sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("fastmutex_signal_trace:"); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl fmsignal_lock_trace_unlock"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: asm("b no_fastmutex_signal_trace"); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** Acquires the fast mutex. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: Generally threads would use NKern::FMWait() which manipulates the kernel lock sl@0: for you. sl@0: sl@0: @pre Kernel must be locked, with lock count 1. sl@0: sl@0: @post Kernel is locked, with lock count 1. sl@0: @post The calling thread holds the mutex. sl@0: sl@0: @see NFastMutex::Signal() sl@0: @see NKern::FMWait() sl@0: */ sl@0: EXPORT_C __NAKED__ void NFastMutex::Wait() sl@0: { sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: ASM_DEBUG1(FMWait,r0); sl@0: asm("ldr r2, __TheScheduler "); sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: asm("cmp r3, #0 "); // check if mutex held sl@0: asm("bne fastmutex_wait_block "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread sl@0: asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r12, #0"); sl@0: asm("bne fmwait_trace2"); sl@0: #endif sl@0: __JUMP(,lr); // and we're done sl@0: asm("fastmutex_wait_block:"); sl@0: asm("str lr, [sp, #-4]! "); // We must wait - save return address sl@0: asm("mov r12, #1 "); sl@0: asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=TRUE sl@0: asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this sl@0: asm("mov r0, r3 "); // parameter for YieldTo sl@0: ASM_DEBUG1(FMWaitYield,r0); sl@0: asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread sl@0: // will not return until the mutex is free sl@0: // on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled sl@0: asm("mov r12, #1 "); sl@0: asm("str r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel sl@0: SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this sl@0: asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL sl@0: asm("str r3, [r2, #0] "); // iHoldingThread=current thread sl@0: asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r12, #0"); sl@0: asm("bne fastmutex_wait_trace2"); sl@0: #endif sl@0: asm("ldr pc, [sp], #4 "); sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("fastmutex_wait_trace2:"); sl@0: // r0=scheduler r2=mutex r3=thread sl@0: asm("ldr lr, [sp], #4 "); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl fmwait_lockacquiredwait_trace"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** Releases the System Lock. sl@0: sl@0: @pre System lock must be held. sl@0: sl@0: @see NKern::LockSystem() sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::UnlockSystem() sl@0: { sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: ASM_CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED); sl@0: asm("ldr r0, __SystemLock "); sl@0: } sl@0: sl@0: sl@0: /** Releases a previously acquired fast mutex. sl@0: sl@0: @param aMutex The fast mutex to be released. sl@0: sl@0: @pre The calling thread must hold the mutex. sl@0: sl@0: @see NFastMutex::Signal() sl@0: @see NKern::FMWait() sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex*) sl@0: { sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: ASM_DEBUG1(NKFMSignal,r0); sl@0: sl@0: asm("ldr r2, __TheScheduler "); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r1, #0"); sl@0: asm("bne fmsignal_trace1"); sl@0: asm("no_fmsignal_trace1:"); sl@0: #endif sl@0: sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: asm("mov r12, #0 "); sl@0: CPSIDIF; // disable interrupts sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // r3=iWaiting sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=NULL sl@0: asm("cmp r3, #0 "); // check waiting flag sl@0: asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=FALSE sl@0: asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL sl@0: asm("bne 1f "); sl@0: CPSIEIF; // reenable interrupts sl@0: __JUMP(,lr); // if clear, finished sl@0: asm("1: "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel if set (assumes iWaiting always 0 or 1) sl@0: CPSIEIF; // reenable interrupts sl@0: #else sl@0: SET_INTS_1(r3, MODE_SVC, INTS_ALL_OFF); sl@0: asm("mov r12, #0 "); sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: SET_INTS_2(r3, MODE_SVC, INTS_ALL_OFF); // disable interrupts sl@0: asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting sl@0: asm("ldr r3, [r0] "); // r3=iWaiting sl@0: asm("str r12, [r0] "); // iWaiting=FALSE sl@0: asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL sl@0: asm("mov r12, #0x13 "); sl@0: asm("cmp r3, #0 "); // check waiting flag sl@0: __MSR_CPSR_C(eq, r12); // if clear, finished sl@0: __JUMP(eq,lr); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1) sl@0: asm("msr cpsr_c, r12 "); // reenable interrupts sl@0: #endif sl@0: asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("cmp r3, #0 "); // outstanding CS function? sl@0: asm("beq 2f "); // branch if not sl@0: asm("cmp r2, #0 "); // iCsCount!=0 ? sl@0: asm("moveq r0, r1 "); // if iCsCount=0, DoCsFunction() sl@0: asm("bleq DoDoCsFunction "); sl@0: asm("2: "); sl@0: asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in sl@0: SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts after reschedule sl@0: asm("ldr pc, [sp], #4 "); sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("fmsignal_trace1:"); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl fmsignal_lock_trace_unlock"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: asm("b no_fmsignal_trace1"); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** Acquires the System Lock. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: @post System lock is held. sl@0: sl@0: @see NKern::UnlockSystem() sl@0: @see NKern::FMWait() sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::LockSystem() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: asm("ldr r0, __SystemLock "); sl@0: } sl@0: sl@0: sl@0: /** Acquires a fast mutex. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: @param aMutex The fast mutex to be acquired. sl@0: sl@0: @post The calling thread holds the mutex. sl@0: sl@0: @see NFastMutex::Wait() sl@0: @see NKern::FMSignal() sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex*) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: ASM_DEBUG1(NKFMWait,r0); sl@0: asm("ldr r2, __TheScheduler "); sl@0: sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: CPSIDIF; // disable interrupts sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: asm("cmp r3, #0 "); // check if mutex held sl@0: asm("bne 1f"); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=current thread sl@0: asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this sl@0: CPSIEIF; // reenable interrupts sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r12, #0"); sl@0: asm("bne fmwait_trace2"); sl@0: #endif sl@0: __JUMP(,lr); // we're finished sl@0: asm("1: "); sl@0: asm("mov r3, #1 "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel sl@0: CPSIEIF; // reenable interrupts sl@0: #else sl@0: asm("mov r3, #0xd3 "); sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: asm("msr cpsr, r3 "); // disable interrupts sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread sl@0: asm("mov r12, #0x13 "); sl@0: asm("cmp r3, #0"); // check if mutex held sl@0: asm("streq r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread sl@0: asm("streq r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this sl@0: __MSR_CPSR_C(eq, r12); // and we're finished sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("bne no_fmwait_trace2"); sl@0: asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r12, #0"); sl@0: asm("bne fmwait_trace2"); sl@0: __JUMP(,lr); sl@0: asm("no_fmwait_trace2:"); sl@0: #endif sl@0: __JUMP(eq,lr); sl@0: asm("mov r3, #1 "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel sl@0: asm("msr cpsr_c, r12 "); // and reenable interrupts sl@0: #endif sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("str r3, [r0, #4] "); // iWaiting=TRUE sl@0: asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this sl@0: asm("ldr r0, [r0, #0] "); // parameter for YieldTo sl@0: ASM_DEBUG1(NKFMWaitYield,r0); sl@0: asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread sl@0: // will not return until the mutex is free sl@0: // on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this sl@0: asm("ldr lr, [sp], #4 "); sl@0: asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL sl@0: asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this sl@0: asm("str r3, [r2, #0] "); // iHoldingThread=current thread sl@0: SET_INTS(r12, MODE_SVC, INTS_ALL_ON); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r12, #0"); sl@0: asm("bne fmwait_trace3"); sl@0: #endif sl@0: __JUMP(,lr); sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("fmwait_trace2:"); sl@0: // r0=mutex r1=thread r2=scheduler sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl fmwait_lockacquiredwait_trace2"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: __JUMP(,lr); sl@0: sl@0: asm("fmwait_trace3:"); sl@0: // r0=scheduler r2=mutex r3=thread sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl fmwait_lockacquiredwait_trace"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ void TScheduler::YieldTo(NThreadBase*) sl@0: { sl@0: // sl@0: // Enter in mode_svc with kernel locked, interrupts can be on or off sl@0: // Exit in mode_svc with kernel unlocked, interrupts off sl@0: // On exit r0=&TheScheduler, r1=0, r2!=0, r3=TheCurrentThread, r4-r11 unaltered sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: // sl@0: asm("mrs r1, spsr "); // r1=spsr_svc sl@0: asm("mov r2, r0 "); // r2=new thread sl@0: asm("ldr r0, __TheScheduler "); // r0 points to scheduler data sl@0: asm("stmfd sp!, {r1,r4-r11,lr} "); // store registers and return address sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR sl@0: #endif sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC sl@0: #endif sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: GET_CAR(,r11); // r11=CAR sl@0: #endif sl@0: #ifdef __CPU_HAS_CP15_THREAD_ID_REG sl@0: GET_RWRW_TID(,r9); // r9=Thread ID sl@0: #endif sl@0: #ifdef __CPU_SUPPORT_THUMB2EE sl@0: GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base sl@0: #endif sl@0: sl@0: asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for original thread, extras, sp_usr and lr_usr sl@0: sl@0: // Save the sp_usr and lr_usr and only the required coprocessor registers sl@0: // Thumb-2EE TID FPEXC CAR DACR sl@0: asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ "); sl@0: #if defined(__CPU_ARMV4) || defined(__CPU_ARMV4T) || defined(__CPU_ARMV5T) sl@0: asm("nop "); // Can't have banked register access immediately after LDM/STM user registers sl@0: #endif sl@0: asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer sl@0: asm("b switch_threads "); sl@0: } sl@0: sl@0: #ifdef MONITOR_THREAD_CPU_TIME sl@0: sl@0: #ifdef HIGH_RES_TIMER_COUNTS_UP sl@0: #define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("sub "#Rd", "#Rn", "#Rm) sl@0: #else sl@0: #define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("rsb "#Rd", "#Rn", "#Rm) sl@0: #endif sl@0: sl@0: // Update thread cpu time counters sl@0: // Called just before thread switch with r2 == new thread sl@0: // Corrupts r3-r8, Leaves r5=current Time, r6=current thread sl@0: #define UPDATE_THREAD_CPU_TIME \ sl@0: asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); \ sl@0: GET_HIGH_RES_TICK_COUNT(r5); \ sl@0: asm("ldr r3, [r6, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \ sl@0: asm("str r5, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \ sl@0: CALC_HIGH_RES_DIFF(r4, r5, r3); \ sl@0: asm("add r3, r6, #%a0" : : "i" _FOFF(NThreadBase,iTotalCpuTime)); \ sl@0: asm("ldmia r3, {r7-r8}"); \ sl@0: asm("adds r7, r7, r4"); \ sl@0: asm("adc r8, r8, #0"); \ sl@0: asm("stmia r3, {r7-r8}") sl@0: sl@0: #else sl@0: #define UPDATE_THREAD_CPU_TIME sl@0: #endif sl@0: sl@0: // EMI - Schedule Logging sl@0: // Needs: r0=TScheduler, r2 = new thread sl@0: // If CPU_TIME, needs: r5=time, r6=current thread sl@0: // preserve r0 r2 r9(new address space), r10(&iLock), sp. Trashes r3-r8, lr sl@0: sl@0: #ifdef __EMI_SUPPORT__ sl@0: #define EMI_EVENTLOGGER \ sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLogging)); \ sl@0: asm("cmp r3,#0"); \ sl@0: asm("blne AddTaskSwitchEvent"); sl@0: sl@0: // Needs: r0=TScheduler, r2 = new thread sl@0: #define EMI_CHECKDFCTAG(no) \ sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iEmiMask)); \ sl@0: asm("ldr r4, [r2,#%a0]" : : "i" _FOFF(NThread, iTag)); \ sl@0: asm("ands r3, r3, r4"); \ sl@0: asm("bne emi_add_dfc" #no); \ sl@0: asm("check_dfc_tag_done" #no ": "); sl@0: sl@0: #define EMI_ADDDFC(no) \ sl@0: asm("emi_add_dfc" #no ": "); \ sl@0: asm("ldr r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \ sl@0: asm("mov r5, r2"); \ sl@0: asm("orr r4, r3, r4"); \ sl@0: asm("str r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \ sl@0: asm("mov r6, r0"); \ sl@0: asm("ldr r0, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfc)); \ sl@0: asm("bl " CSM_ZN4TDfc3AddEv); \ sl@0: asm("mov r2, r5"); \ sl@0: asm("mov r0, r6"); \ sl@0: asm("b check_dfc_tag_done" #no); sl@0: sl@0: #else sl@0: #define EMI_EVENTLOGGER sl@0: #define EMI_CHECKDFCTAG(no) sl@0: #define EMI_ADDDFC(no) sl@0: #endif sl@0: sl@0: sl@0: __ASSERT_COMPILE(_FOFF(NThread,iPriority) == _FOFF(NThread,iPrev) + 4); sl@0: __ASSERT_COMPILE(_FOFF(NThread,i_ThrdAttr) == _FOFF(NThread,iPriority) + 2); sl@0: __ASSERT_COMPILE(_FOFF(NThread,iHeldFastMutex) == _FOFF(NThread,i_ThrdAttr) + 2); sl@0: __ASSERT_COMPILE(_FOFF(NThread,iWaitFastMutex) == _FOFF(NThread,iHeldFastMutex) + 4); sl@0: __ASSERT_COMPILE(_FOFF(NThread,iAddressSpace) == _FOFF(NThread,iWaitFastMutex) + 4); sl@0: sl@0: __NAKED__ void TScheduler::Reschedule() sl@0: { sl@0: // sl@0: // Enter in mode_svc with kernel locked, interrupts can be on or off sl@0: // Exit in mode_svc with kernel unlocked, interrupts off sl@0: // On exit r0=&TheScheduler, r1=0, r3=TheCurrentThread, r4-r11 unaltered sl@0: // r2=0 if no reschedule occurred, non-zero if a reschedule did occur. sl@0: // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY sl@0: // sl@0: asm("ldr r0, __TheScheduler "); // r0 points to scheduler data sl@0: asm("str lr, [sp, #-4]! "); // save return address sl@0: SET_INTS(r3, MODE_SVC, INTS_ALL_OFF); // interrupts off sl@0: asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iDfcPendingFlag)); sl@0: asm("mov r2, #0 "); // start with r2=0 sl@0: asm("cmp r1, #0 "); // check if DFCs pending sl@0: sl@0: asm("start_resched: "); sl@0: asm("blne " CSM_ZN10TScheduler9QueueDfcsEv); // queue any pending DFCs - PRESERVES R2 sl@0: asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: SET_INTS_1(r3, MODE_SVC, INTS_ALL_ON); sl@0: asm("cmp r1, #0 "); // check if a reschedule is required sl@0: asm("beq no_resched_needed "); // branch out if not sl@0: SET_INTS_2(r3, MODE_SVC, INTS_ALL_ON); // enable interrupts sl@0: asm("mrs r2, spsr "); // r2=spsr_svc sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("stmfd sp!, {r2,r4-r11} "); // store registers and return address sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC sl@0: #endif sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: GET_CAR(,r11); // r11=CAR sl@0: #endif sl@0: #ifdef __CPU_HAS_CP15_THREAD_ID_REG sl@0: GET_RWRW_TID(,r9); // r9=Thread ID sl@0: #endif sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR sl@0: #endif sl@0: #ifdef __CPU_SUPPORT_THUMB2EE sl@0: GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base sl@0: #endif sl@0: asm("ldr lr, [r0, #4] "); // lr=present mask high sl@0: asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for extras, sp_usr and lr_usr sl@0: asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer sl@0: sl@0: sl@0: // Save the sp_usr and lr_usr and only the required coprocessor registers sl@0: // Thumb-2EE TID FPEXC CAR DACR sl@0: asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ "); sl@0: // NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers sl@0: sl@0: asm("ldr r1, [r0], #%a0" : : "i" _FOFF(TScheduler,iQueue)); // r1=present mask low, r0=&iQueue[0] sl@0: #ifdef __CPU_ARM_HAS_CLZ sl@0: CLZ(12,14); // r12=31-MSB(r14) sl@0: asm("subs r12, r12, #32 "); // r12=-1-MSB(r14), 0 if r14=0 sl@0: CLZcc(CC_EQ,12,1); // if r14=0, r12=31-MSB(r1) sl@0: asm("rsb r12, r12, #31 "); // r12=highest ready thread priority sl@0: #else sl@0: asm("mov r12, #31 "); // find the highest priority ready thread sl@0: asm("cmp r14, #0 "); // high word nonzero? sl@0: asm("moveq r14, r1 "); // if zero, r14=low word sl@0: asm("movne r12, #63 "); // else start at pri 63 sl@0: asm("cmp r14, #0x00010000 "); sl@0: asm("movlo r14, r14, lsl #16 "); sl@0: asm("sublo r12, r12, #16 "); sl@0: asm("cmp r14, #0x01000000 "); sl@0: asm("movlo r14, r14, lsl #8 "); sl@0: asm("sublo r12, r12, #8 "); sl@0: asm("cmp r14, #0x10000000 "); sl@0: asm("movlo r14, r14, lsl #4 "); sl@0: asm("sublo r12, r12, #4 "); sl@0: asm("cmp r14, #0x40000000 "); sl@0: asm("movlo r14, r14, lsl #2 "); sl@0: asm("sublo r12, r12, #2 "); sl@0: asm("cmp r14, #0x80000000 "); sl@0: asm("sublo r12, r12, #1 "); // r12 now equals highest ready priority sl@0: #endif sl@0: asm("ldr r2, [r0, r12, lsl #2] "); // r2=pointer to highest priority thread's link field sl@0: asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); sl@0: asm("mov r4, #0 "); sl@0: asm("ldmia r2, {r3,r5-r9,lr} "); // r3=next r5=prev r6=attributes, r7=heldFM, r8=waitFM, r9=address space sl@0: // lr=time sl@0: asm("add r10, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); sl@0: asm("strb r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // clear flag sl@0: ASM_DEBUG1(InitSelection,r2); sl@0: asm("cmp lr, #0 "); // check if timeslice expired sl@0: asm("bne no_other "); // skip if not sl@0: asm("cmp r3, r2 "); // check for thread at same priority sl@0: asm("bne round_robin "); // branch if there is one sl@0: asm("no_other: "); sl@0: asm("cmp r7, #0 "); // does this thread hold a fast mutex? sl@0: asm("bne holds_fast_mutex "); // branch if it does sl@0: asm("cmp r8, #0 "); // is thread blocked on a fast mutex? sl@0: asm("bne resched_blocked "); // branch out if it is sl@0: sl@0: asm("resched_not_blocked: "); sl@0: asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttImplicitSystemLock<<16)); // implicit system lock required? sl@0: #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) sl@0: asm("beq resched_end "); // no, switch to this thread sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread sl@0: asm("cmp r1, #0 "); // lock held? sl@0: asm("beq resched_end "); // no, switch to this thread sl@0: asm("b resched_imp_sys_held "); sl@0: #else sl@0: asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread sl@0: asm("beq resched_end "); // no, switch to this thread sl@0: asm("cmp r1, #0 "); // lock held? sl@0: asm("ldreq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // no, get current address space ptr sl@0: asm("bne resched_imp_sys_held "); sl@0: asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttAddressSpace<<16)); // does thread require address space switch? sl@0: asm("cmpne r9, r5 "); // change of address space required? sl@0: asm("beq resched_end "); // branch if not sl@0: sl@0: ASM_DEBUG1(Resched,r2) // r2->new thread sl@0: UPDATE_THREAD_CPU_TIME; sl@0: EMI_EVENTLOGGER; sl@0: EMI_CHECKDFCTAG(1) sl@0: sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); sl@0: asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 sl@0: asm("cmp r1, #0"); sl@0: asm("blne context_switch_trace"); sl@0: #else sl@0: asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 sl@0: #endif sl@0: sl@0: #ifdef __CPU_HAS_ETM_PROCID_REG sl@0: asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread sl@0: #endif sl@0: SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF); sl@0: #if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG) sl@0: asm("mov r1, sp "); sl@0: asm("ldmia r1, {r13,r14}^ "); // restore sp_usr and lr_usr sl@0: // NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers sl@0: #else sl@0: // Load the sp_usr and lr_usr and only the required coprocessor registers sl@0: // Thumb-2EE TID FPEXC CAR DACR sl@0: asm("ldmia sp, {" EXTRA_STACK_LIST( 3, 4, 5, 6, 11) "r13-r14}^ "); sl@0: // NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers sl@0: #endif sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=new thread sl@0: asm("str r10, [r2, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp lr, #0"); sl@0: asm("blne reschedule_syslock_wait_trace"); sl@0: #endif sl@0: sl@0: #ifdef __CPU_SUPPORT_THUMB2EE sl@0: SET_THUMB2EE_HNDLR_BASE(,r3); sl@0: #endif sl@0: #ifdef __CPU_HAS_CP15_THREAD_ID_REG sl@0: SET_RWRW_TID(,r4); sl@0: #endif sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: SET_CAR(,r6) sl@0: #endif sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: asm("mcr p15, 0, r11, c3, c0, 0 "); sl@0: #endif sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMXR(,VFP_XREG_FPEXC,5); // restore FPEXC from R5 sl@0: #endif sl@0: asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr sl@0: sl@0: // Do process switching sl@0: // Handler called with: sl@0: // r0->scheduler, r2->current thread sl@0: // r9->new address space, r10->system lock sl@0: // Must preserve r0,r2, can modify other registers sl@0: CPWAIT(,r1); sl@0: SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: asm("mov r3, r2 "); sl@0: asm("cmp r1, #0 "); sl@0: asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // unlock the kernel sl@0: asm("blne " CSM_ZN10TScheduler10RescheduleEv); sl@0: SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // kernel is now unlocked, interrupts enabled, system lock held sl@0: asm("mov r2, r3 "); sl@0: asm("mov lr, pc "); sl@0: asm("ldr pc, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // do process switch sl@0: sl@0: asm("mov r1, #1 "); sl@0: asm("mov r4, #0 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel sl@0: asm("mov r3, r2 "); // r3->new thread sl@0: asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // check system lock wait flag sl@0: asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // release system lock sl@0: asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); sl@0: asm("str r4, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp lr, #0"); sl@0: asm("blne reschedule_syslock_signal_trace"); sl@0: #endif sl@0: asm("cmp r2, #0 "); sl@0: asm("beq switch_threads_2 "); // no contention on system lock sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); sl@0: asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iCsCount)); sl@0: asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // contention - need to reschedule again sl@0: asm("cmp r2, #0 "); // outstanding CS function? sl@0: asm("beq switch_threads_2 "); // branch if not sl@0: asm("cmp r12, #0 "); // iCsCount!=0 ? sl@0: asm("bne switch_threads_2 "); // branch if it is sl@0: asm("ldr r1, [sp, #0] "); // r1=spsr_svc for this thread sl@0: asm("mov r4, r0 "); sl@0: asm("mov r5, r3 "); sl@0: asm("msr spsr, r1 "); // restore spsr_svc sl@0: asm("mov r0, r3 "); // if iCsCount=0, DoCsFunction() sl@0: asm("bl DoDoCsFunction "); sl@0: asm("mov r0, r4 "); sl@0: asm("mov r3, r5 "); sl@0: asm("b switch_threads_2 "); sl@0: #endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__ sl@0: sl@0: asm("round_robin: "); // get here if thread's timeslice has expired and there is another sl@0: // thread ready at the same priority sl@0: asm("cmp r7, #0 "); // does this thread hold a fast mutex? sl@0: asm("bne rr_holds_fast_mutex "); sl@0: asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTimeslice)); sl@0: asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); sl@0: asm("str r3, [r0, r12, lsl #2] "); // first thread at this priority is now the next one sl@0: asm("str lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice sl@0: ASM_DEBUG1(RR,r3); sl@0: asm("add r3, r3, #%a0" : : "i" _FOFF(NThread,iPriority)); sl@0: asm("ldmia r3, {r6-r9} "); // r6=attributes, r7=heldFM, r8=waitFM, r9=address space sl@0: asm("sub r2, r3, #%a0" : : "i" _FOFF(NThread,iPriority)); // move to next thread at this priority sl@0: asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue)); sl@0: asm("b no_other "); sl@0: sl@0: asm("resched_blocked: "); // get here if thread is blocked on a fast mutex sl@0: ASM_DEBUG1(BlockedFM,r8) sl@0: asm("ldr r3, [r8, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if so, get holding thread sl@0: asm("cmp r3, #0 "); // mutex now free? sl@0: asm("beq resched_not_blocked "); sl@0: asm("mov r2, r3 "); // no, switch to holding thread sl@0: asm("b resched_end "); sl@0: sl@0: asm("holds_fast_mutex: "); sl@0: #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) sl@0: asm("cmp r7, r10 "); // does this thread hold system lock? sl@0: asm("tstne r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // if not, is implicit system lock required? sl@0: asm("beq resched_end "); // if neither, switch to this thread sl@0: asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // check if system lock held sl@0: asm("cmp r5, #0 "); sl@0: asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread sl@0: asm("b resched_end "); // else switch to thread and finish sl@0: #else sl@0: asm("cmp r7, r10 "); // does this thread hold system lock? sl@0: asm("beq resched_end "); // if so, switch to it sl@0: asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // implicit system lock required? sl@0: asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // if so, check if system lock held sl@0: asm("beq resched_end "); // if lock not required, switch to thread and finish sl@0: asm("cmp r5, #0 "); sl@0: asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread sl@0: asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required? sl@0: asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // if so, get current address space ptr sl@0: asm("beq resched_end "); // if not, switch to thread and finish sl@0: asm("cmp r5, r9 "); // do we have correct address space? sl@0: asm("beq resched_end "); // yes, switch to thread and finish sl@0: asm("b rr_holds_fast_mutex "); // no, set waiting flag on fast mutex sl@0: #endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__ sl@0: sl@0: asm("resched_imp_sys_held: "); // get here if thread requires implicit system lock and lock is held sl@0: ASM_DEBUG1(ImpSysHeld,r1) sl@0: asm("mov r2, r1 "); // switch to holding thread sl@0: asm("add r7, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // set waiting flag on system lock sl@0: sl@0: asm("rr_holds_fast_mutex: "); // get here if round-robin deferred due to fast mutex held sl@0: asm("mov r6, #1 "); sl@0: asm("str r6, [r7, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // if so, set waiting flag sl@0: sl@0: asm("resched_end: "); sl@0: ASM_DEBUG1(Resched,r2) sl@0: sl@0: asm("switch_threads: "); sl@0: UPDATE_THREAD_CPU_TIME; sl@0: EMI_EVENTLOGGER; sl@0: EMI_CHECKDFCTAG(2) sl@0: sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); sl@0: asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 sl@0: asm("cmp r1, #0"); sl@0: asm("blne context_switch_trace"); sl@0: #else sl@0: asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer sl@0: asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2 sl@0: #endif sl@0: sl@0: #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) sl@0: asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThread,iPriority)); // attributes into r6 sl@0: asm("ldr r9, [r2, #%a0]" : : "i" _FOFF(NThread,iAddressSpace)); // address space into r9 sl@0: #else sl@0: #ifdef __CPU_HAS_ETM_PROCID_REG sl@0: asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread sl@0: #endif sl@0: #endif sl@0: #if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG) sl@0: asm("mov r3, sp "); sl@0: asm("ldmia r3, {r13,r14}^ "); // restore sp_usr and lr_usr sl@0: // NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers sl@0: #else sl@0: // Load the sp_usr and lr_usr and only the required coprocessor registers sl@0: // Thumb-2EE TID FPEXC CAR DACR sl@0: asm("ldmia sp, {" EXTRA_STACK_LIST( 1, 3, FPEXC_REG3, 10, 11) "r13-r14}^ "); sl@0: // NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers sl@0: #endif sl@0: #ifdef __CPU_SUPPORT_THUMB2EE sl@0: SET_THUMB2EE_HNDLR_BASE(,r1); sl@0: #endif sl@0: #ifdef __CPU_HAS_CP15_THREAD_ID_REG sl@0: SET_RWRW_TID(,r3) // restore Thread ID from r3 sl@0: #endif sl@0: asm("mov r3, r2 "); // r3=TheCurrentThread sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: SET_CAR(,r10) sl@0: #endif sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: asm("mcr p15, 0, r11, c3, c0, 0 "); sl@0: #endif sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMXR(,VFP_XREG_FPEXC,FPEXC_REG3); // restore FPEXC from R4 or R10 sl@0: #endif sl@0: asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr sl@0: #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) sl@0: // r2=r3=current thread here sl@0: asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required? sl@0: asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // if so, get pointer to process handler sl@0: asm("mov r2, r2, lsr #6 "); // r2=current thread>>6 sl@0: asm("beq switch_threads_3 "); // skip if address space change not required sl@0: sl@0: // Do address space switching sl@0: // Handler called with: sl@0: // r0->scheduler, r3->current thread sl@0: // r9->new address space, r5->old address space sl@0: // Return with r2 = (r2<<8) | ASID sl@0: // Must preserve r0,r3, can modify other registers sl@0: asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // get current address space ptr sl@0: #ifdef __MEMMODEL_FLEXIBLE__ sl@0: asm("adr lr, switch_threads_5 "); sl@0: #else sl@0: asm("adr lr, switch_threads_4 "); sl@0: #endif sl@0: __JUMP(,r1); sl@0: sl@0: asm("switch_threads_3: "); sl@0: asm("mrc p15, 0, r4, c13, c0, 1 "); // r4 = CONTEXTID (threadID:ASID) sl@0: asm("and r4, r4, #0xff "); // isolate ASID sl@0: asm("orr r2, r4, r2, lsl #8 "); // r2 = new thread ID : ASID sl@0: __DATA_SYNC_BARRIER_Z__(r12); // needed before change to ContextID sl@0: sl@0: asm("switch_threads_4: "); sl@0: #if (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)) && !defined(__CPU_ARM1136_ERRATUM_408022_FIXED) sl@0: asm("nop"); sl@0: #endif sl@0: asm("mcr p15, 0, r2, c13, c0, 1 "); // set ContextID (ASID + debugging thread ID) sl@0: __INST_SYNC_BARRIER_Z__(r12); sl@0: #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE sl@0: asm("mcr p15, 0, r12, c7, c5, 6 "); // flush BTAC sl@0: #endif sl@0: sl@0: // asm("switch_threads_3: "); // TEMPORARY UNTIL CONTEXTID BECOMES READABLE sl@0: asm("switch_threads_5: "); sl@0: #if defined(__CPU_ARM1136__) && defined(__CPU_HAS_VFP) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) sl@0: VFP_FMRX(,14,VFP_XREG_FPEXC); sl@0: asm("mrc p15, 0, r4, c1, c0, 1 "); sl@0: asm("tst r14, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); sl@0: asm("bic r4, r4, #2 "); // clear DB bit (disable dynamic prediction) sl@0: asm("and r12, r4, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) sl@0: asm("orreq r4, r4, r12, lsl #1 "); // if VFP is being disabled set DB = RS sl@0: asm("mcr p15, 0, r4, c1, c0, 1 "); sl@0: #endif sl@0: #endif sl@0: CPWAIT(,r12); sl@0: sl@0: asm("switch_threads_2: "); sl@0: asm("resched_trampoline_hook_address: "); sl@0: asm("ldmia sp!, {r2,r4-r11,lr} "); // r2=spsr_svc, restore r4-r11 and return address sl@0: asm("resched_trampoline_return: "); sl@0: sl@0: SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts sl@0: asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); sl@0: asm("msr spsr, r2 "); // restore spsr_svc sl@0: asm("cmp r1, #0 "); // check for another reschedule sl@0: asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if not needed unlock the kernel sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) sl@0: asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround sl@0: // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr sl@0: #endif sl@0: __JUMP(eq,lr); // and return in context of new thread, with r2 non zero sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("b start_resched "); // if necessary, go back to beginning sl@0: sl@0: asm("no_resched_needed: "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else unlock the kernel sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=iCurrentThread sl@0: asm("ldr pc, [sp], #4 "); // and exit immediately with r2=0 iff no reschedule occurred sl@0: sl@0: asm("__TheScheduler: "); sl@0: asm(".word TheScheduler "); sl@0: asm("__SystemLock: "); sl@0: asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock)); sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("context_switch_trace_header:"); sl@0: asm(".word %a0" : : "i" ((TInt)(8< iBufferEnd ? sl@0: asm("ldrlo r6,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferStart)); sl@0: sl@0: asm("ldrb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags)); sl@0: asm("orr r5, r5, #%a0" : : "i" ((TInt) KTskEvtFlag_EventLost)); sl@0: asm("strb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags)); sl@0: sl@0: asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferTail)); sl@0: sl@0: __JUMP(,lr); sl@0: sl@0: #if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_FLEXIBLE__) sl@0: EMI_ADDDFC(1) sl@0: #endif sl@0: EMI_ADDDFC(2) sl@0: #endif sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("reschedule_syslock_wait_trace:"); sl@0: // r0=scheduler r2=thread sl@0: asm("stmdb sp!, {r3,r12}"); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl syslock_wait_trace"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: asm("ldmia sp!, {r3,r12}"); sl@0: __JUMP(,lr); sl@0: sl@0: asm("reschedule_syslock_signal_trace:"); sl@0: // r0=scheduler r3=thread sl@0: asm("stmdb sp!, {r3,r12}"); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("bl syslock_signal_trace"); sl@0: asm("ldmia sp!, {r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: asm("ldmia sp!, {r3,r12}"); sl@0: __JUMP(,lr); sl@0: #endif sl@0: }; sl@0: sl@0: sl@0: /** sl@0: * Returns the range of linear memory which inserting the scheduler hooks needs to modify. sl@0: * sl@0: * @param aStart Set to the lowest memory address which needs to be modified. sl@0: * @param aEnd Set to the highest memory address +1 which needs to be modified. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: #ifdef __DEBUGGER_SUPPORT__ sl@0: asm("adr r2,resched_trampoline_hook_address"); sl@0: asm("str r2,[r0]"); sl@0: asm("adr r2,resched_trampoline_hook_address+4"); sl@0: asm("str r2,[r1]"); sl@0: #else sl@0: asm("mov r2,#0"); sl@0: asm("str r2,[r0]"); sl@0: asm("str r2,[r1]"); sl@0: #endif sl@0: __JUMP(,lr); sl@0: }; sl@0: sl@0: sl@0: /** sl@0: * Modifies the scheduler code so that it can call the function set by sl@0: * NKern::SetRescheduleCallback(). sl@0: * sl@0: * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: #ifdef __DEBUGGER_SUPPORT__ sl@0: asm("adr r0,resched_trampoline_hook_address"); sl@0: asm("adr r1,resched_trampoline"); sl@0: asm("sub r1, r1, r0"); sl@0: asm("sub r1, r1, #8"); sl@0: asm("mov r1, r1, asr #2"); sl@0: asm("add r1, r1, #0xea000000"); // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline sl@0: sl@0: #if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS) sl@0: // These platforms have shadow memory in non-writable page. We cannot use the standard sl@0: // Epoc::CopyToShadowMemory interface as we hold Kernel lock here. sl@0: // Instead, we'll temporarily disable access permission checking in MMU by switching sl@0: // domain#0 into Manager Mode (see Domain Access Control Register). sl@0: asm("mrs r12, CPSR "); // save cpsr setting and ... sl@0: CPSIDAIF; // ...disable interrupts sl@0: asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR sl@0: asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b sl@0: asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR sl@0: asm("str r1,[r0]"); sl@0: asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR sl@0: asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts) sl@0: #else sl@0: asm("str r1,[r0]"); sl@0: #endif sl@0: sl@0: #endif sl@0: __JUMP(,lr); sl@0: }; sl@0: sl@0: sl@0: /** sl@0: * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks() sl@0: * sl@0: * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: #ifdef __DEBUGGER_SUPPORT__ sl@0: asm("adr r0,resched_trampoline_hook_address"); sl@0: asm("ldr r1,resched_trampoline_unhook_data"); sl@0: sl@0: #if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS) sl@0: // See comments above in InsertSchedulerHooks sl@0: asm("mrs r12, CPSR "); // save cpsr setting and ... sl@0: CPSIDAIF; // ...disable interrupts sl@0: asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR sl@0: asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b sl@0: asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR sl@0: asm("str r1,[r0]"); sl@0: asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR sl@0: asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts) sl@0: #else sl@0: asm("str r1,[r0]"); sl@0: #endif sl@0: sl@0: #endif sl@0: __JUMP(,lr); sl@0: }; sl@0: sl@0: sl@0: /** sl@0: * Set the function which is to be called on every thread reschedule. sl@0: * sl@0: * @param aCallback Pointer to callback function, or NULL to disable callback. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: #ifdef __DEBUGGER_SUPPORT__ sl@0: asm("ldr r1, __TheScheduler "); sl@0: asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook)); sl@0: #endif sl@0: __JUMP(,lr); sl@0: }; sl@0: sl@0: sl@0: sl@0: /** Disables interrupts to specified level. sl@0: sl@0: Note that if we are not disabling all interrupts we must lock the kernel sl@0: here, otherwise a high priority interrupt which is still enabled could sl@0: cause a reschedule and the new thread could then reenable interrupts. sl@0: sl@0: @param aLevel Interrupts are disbabled up to and including aLevel. On ARM, sl@0: level 1 stands for IRQ only and level 2 stands for IRQ and FIQ. sl@0: @return CPU-specific value passed to RestoreInterrupts. sl@0: sl@0: @pre 1 <= aLevel <= maximum level (CPU-specific) sl@0: sl@0: @see NKern::RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/) sl@0: { sl@0: asm("cmp r0, #1 "); sl@0: asm("bhi " CSM_ZN5NKern20DisableAllInterruptsEv); // if level>1, disable all sl@0: asm("ldreq r12, __TheScheduler "); sl@0: asm("mrs r2, cpsr "); // r2=original CPSR sl@0: asm("bcc 1f "); // skip if level=0 sl@0: asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("and r0, r2, #0xc0 "); sl@0: INTS_OFF_1(r2, r2, INTS_IRQ_OFF); // disable level 1 interrupts sl@0: asm("cmp r3, #0 "); // test if kernel locked sl@0: asm("addeq r3, r3, #1 "); // if not, lock the kernel sl@0: asm("streq r3, [r12] "); sl@0: asm("orreq r0, r0, #0x80000000 "); // and set top bit to indicate kernel locked sl@0: INTS_OFF_2(r2, r2, INTS_IRQ_OFF); sl@0: __JUMP(,lr); sl@0: asm("1: "); sl@0: asm("and r0, r2, #0xc0 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Disables all interrupts (e.g. both IRQ and FIQ on ARM). sl@0: sl@0: @return CPU-specific value passed to NKern::RestoreInterrupts(). sl@0: sl@0: @see NKern::RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts() sl@0: { sl@0: asm("mrs r1, cpsr "); sl@0: asm("and r0, r1, #0xc0 "); // return I and F bits of CPSR sl@0: INTS_OFF(r1, r1, INTS_ALL_OFF); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Enables all interrupts (e.g. IRQ and FIQ on ARM). sl@0: sl@0: This function never unlocks the kernel. So it must be used sl@0: only to complement NKern::DisableAllInterrupts. Never use it sl@0: to complement NKern::DisableInterrupts. sl@0: sl@0: @see NKern::DisableInterrupts() sl@0: @see NKern::DisableAllInterrupts() sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::EnableAllInterrupts() sl@0: { sl@0: #ifndef __CPU_ARM_HAS_CPS sl@0: asm("mrs r0, cpsr "); sl@0: asm("bic r0, r0, #0xc0 "); sl@0: asm("msr cpsr_c, r0 "); sl@0: #else sl@0: CPSIEIF; sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Restores interrupts to previous level and unlocks the kernel if it was sl@0: locked when disabling them. sl@0: sl@0: @param aRestoreData CPU-specific data returned from NKern::DisableInterrupts sl@0: or NKern::DisableAllInterrupts specifying the previous interrupt level. sl@0: sl@0: @see NKern::DisableInterrupts() sl@0: @see NKern::DisableAllInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/) sl@0: { sl@0: asm("tst r0, r0 "); // test state of top bit of aLevel sl@0: asm("mrs r1, cpsr "); sl@0: asm("and r0, r0, #0xc0 "); sl@0: asm("bic r1, r1, #0xc0 "); sl@0: asm("orr r1, r1, r0 "); // replace I and F bits with those supplied sl@0: asm("msr cpsr_c, r1 "); // flags are unchanged (in particular N) sl@0: __JUMP(pl,lr); // if top bit of aLevel clear, finished sl@0: sl@0: // if top bit of aLevel set, fall through to unlock the kernel sl@0: } sl@0: sl@0: sl@0: /** Unlocks the kernel. sl@0: sl@0: Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are sl@0: pending, calls the scheduler to process them. sl@0: Must be called in mode_svc. sl@0: sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre Do not call from an ISR. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Unlock() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); sl@0: sl@0: asm("ldr r1, __TheScheduler "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("subs r2, r3, #1 "); sl@0: asm("str r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("ldreq r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // if kernel now unlocked, check flags sl@0: asm("bne 1f "); // if kernel still locked, return sl@0: asm("cmp r2, #0 "); // check for DFCs or reschedule sl@0: asm("bne 2f"); // branch if needed sl@0: asm("1: "); sl@0: __JUMP(,lr); sl@0: asm("2: "); sl@0: asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else lock the kernel again sl@0: asm("str lr, [sp, #-4]! "); // save return address sl@0: asm("bl " CSM_ZN10TScheduler10RescheduleEv); // run DFCs and reschedule, return with kernel unlocked, interrupts disabled sl@0: SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // reenable interrupts sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: /** Locks the kernel. sl@0: sl@0: Increments iKernCSLocked, thereby deferring IDFCs and preemption. sl@0: Must be called in mode_svc. sl@0: sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre Do not call from an ISR. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Lock() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); sl@0: sl@0: asm("ldr r12, __TheScheduler "); sl@0: asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("add r3, r3, #1 "); // lock the kernel sl@0: asm("str r3, [r12] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Locks the kernel and returns a pointer to the current thread sl@0: Increments iKernCSLocked, thereby deferring IDFCs and preemption. sl@0: sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre Do not call from an ISR. sl@0: */ sl@0: EXPORT_C __NAKED__ NThread* NKern::LockC() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); sl@0: sl@0: asm("ldr r12, __TheScheduler "); sl@0: asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("add r3, r3, #1 "); // lock the kernel sl@0: asm("str r3, [r12] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: __ASSERT_COMPILE(_FOFF(TScheduler,iKernCSLocked) == _FOFF(TScheduler,iRescheduleNeededFlag) + 4); sl@0: sl@0: /** Allows IDFCs and rescheduling if they are pending. sl@0: sl@0: If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 sl@0: calls the scheduler to process the IDFCs and possibly reschedule. sl@0: Must be called in mode_svc. sl@0: sl@0: @return Nonzero if a reschedule actually occurred, zero if not. sl@0: sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre Do not call from an ISR. sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::PreemptionPoint() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR); sl@0: sl@0: asm("ldr r3, __RescheduleNeededFlag "); sl@0: asm("ldmia r3, {r0,r1} "); // r0=RescheduleNeededFlag, r1=KernCSLocked sl@0: asm("cmp r0, #0 "); sl@0: __JUMP(eq,lr); // if no reschedule required, return 0 sl@0: asm("subs r1, r1, #1 "); sl@0: __JUMP(ne,lr); // if kernel still locked, exit sl@0: asm("str lr, [sp, #-4]! "); // store return address sl@0: sl@0: // reschedule - this also switches context if necessary sl@0: // enter this function in mode_svc, interrupts on, kernel locked sl@0: // exit this function in mode_svc, all interrupts off, kernel unlocked sl@0: asm("bl " CSM_ZN10TScheduler10RescheduleEv); sl@0: sl@0: asm("mov r1, #1 "); sl@0: asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel again sl@0: SET_INTS(r3, MODE_SVC, INTS_ALL_ON); // interrupts back on sl@0: asm("mov r0, r2 "); // Return 0 if no reschedule, non-zero if reschedule occurred sl@0: asm("ldr pc, [sp], #4 "); sl@0: sl@0: asm("__RescheduleNeededFlag: "); sl@0: asm(".word %a0" : : "i" ((TInt)&TheScheduler.iRescheduleNeededFlag)); sl@0: } sl@0: sl@0: sl@0: /** Returns the current processor context type (thread, IDFC or interrupt). sl@0: sl@0: @return A value from NKern::TContext enumeration (but never EEscaped). sl@0: sl@0: @pre Call in any context. sl@0: sl@0: @see NKern::TContext sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::CurrentContext() sl@0: { sl@0: asm("mrs r1, cpsr "); sl@0: asm("mov r0, #2 "); // 2 = interrupt sl@0: asm("and r1, r1, #0x1f "); // r1 = mode sl@0: asm("cmp r1, #0x13 "); sl@0: asm("ldreq r2, __TheScheduler "); sl@0: __JUMP(ne,lr); // if not svc, must be interrupt sl@0: asm("ldrb r0, [r2, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC)); sl@0: asm("cmp r0, #0 "); sl@0: asm("movne r0, #1 "); // if iInIDFC, return 1 else return 0 sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: #ifdef __FAST_MUTEX_MACHINE_CODED__ sl@0: sl@0: /** Temporarily releases the System Lock if there is contention. sl@0: sl@0: If there sl@0: is another thread attempting to acquire the System lock, the calling sl@0: thread releases the mutex and then acquires it again. sl@0: sl@0: This is more efficient than the equivalent code: sl@0: sl@0: @code sl@0: NKern::UnlockSystem(); sl@0: NKern::LockSystem(); sl@0: @endcode sl@0: sl@0: Note that this can only allow higher priority threads to use the System sl@0: lock as lower priority cannot cause contention on a fast mutex. sl@0: sl@0: @return TRUE if the system lock was relinquished, FALSE if not. sl@0: sl@0: @pre System lock must be held. sl@0: sl@0: @post System lock is held. sl@0: sl@0: @see NKern::LockSystem() sl@0: @see NKern::UnlockSystem() sl@0: */ sl@0: EXPORT_C __NAKED__ TBool NKern::FlashSystem() sl@0: { sl@0: asm("ldr r0, __SystemLock "); sl@0: } sl@0: sl@0: sl@0: /** Temporarily releases a fast mutex if there is contention. sl@0: sl@0: If there is another thread attempting to acquire the mutex, the calling sl@0: thread releases the mutex and then acquires it again. sl@0: sl@0: This is more efficient than the equivalent code: sl@0: sl@0: @code sl@0: NKern::FMSignal(); sl@0: NKern::FMWait(); sl@0: @endcode sl@0: sl@0: @return TRUE if the mutex was relinquished, FALSE if not. sl@0: sl@0: @pre The mutex must be held. sl@0: sl@0: @post The mutex is held. sl@0: */ sl@0: EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex*) sl@0: { sl@0: ASM_DEBUG1(NKFMFlash,r0); sl@0: sl@0: asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); sl@0: asm("cmp r1, #0"); sl@0: asm("bne fmflash_contended"); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("ldr r1, __TheScheduler "); sl@0: asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter)); sl@0: asm("cmp r2, #0"); sl@0: asm("bne fmflash_trace"); sl@0: #endif sl@0: asm("mov r0, #0"); sl@0: __JUMP(,lr); sl@0: sl@0: asm("fmflash_contended:"); sl@0: asm("stmfd sp!,{r4,lr}"); sl@0: asm("mov r4, r0"); sl@0: asm("bl " CSM_ZN5NKern4LockEv); sl@0: asm("mov r0, r4"); sl@0: asm("bl " CSM_ZN10NFastMutex6SignalEv); sl@0: asm("bl " CSM_ZN5NKern15PreemptionPointEv); sl@0: asm("mov r0, r4"); sl@0: asm("bl " CSM_ZN10NFastMutex4WaitEv); sl@0: asm("bl " CSM_ZN5NKern6UnlockEv); sl@0: asm("mov r0, #-1"); sl@0: __POPRET("r4,"); sl@0: sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: asm("fmflash_trace:"); sl@0: ALIGN_STACK_START; sl@0: asm("stmdb sp!,{r0-r2,lr}"); // 4th item on stack is PC value for trace sl@0: asm("mov r3, r0"); // fast mutex parameter in r3 sl@0: asm("ldr r0, fmflash_trace_header"); // header parameter in r0 sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("mov lr, pc"); sl@0: asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); sl@0: asm("ldmia sp!,{r0-r2,lr}"); sl@0: ALIGN_STACK_END; sl@0: asm("mov r0, #0"); sl@0: __JUMP(,lr); sl@0: sl@0: asm("fmflash_trace_header:"); sl@0: asm(".word %a0" : : "i" ((TInt)(16<