sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkern\x86\ncsched.cia sl@0: // sl@0: // sl@0: sl@0: #include sl@0: sl@0: #if defined(KSCHED) sl@0: extern "C" void __DebugMsgWaitForAnyRequest(); sl@0: extern "C" void __DebugMsgResched(int a); sl@0: extern "C" void __DebugMsgInitSelection(int a); sl@0: extern "C" void __DebugMsgRR(int a); sl@0: extern "C" void __DebugMsgBlockedFM(int a); sl@0: extern "C" void __DebugMsgImpSysHeld(int a); sl@0: #endif sl@0: sl@0: const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule; sl@0: const TLinAddr TheScheduler_iRescheduleNeededFlag = (TLinAddr)&TheScheduler.iRescheduleNeededFlag; sl@0: const TLinAddr NKern_FastCounter = (TLinAddr)&NKern::FastCounter; sl@0: sl@0: __NAKED__ void TScheduler::YieldTo(NThreadBase*) sl@0: { sl@0: // sl@0: // Enter with kernel locked, interrupts can be on or off sl@0: // Exit with kernel unlocked, interrupts off sl@0: // sl@0: asm("mov byte ptr [%a0], 1" : : "i"(&TheScheduler.iRescheduleNeededFlag)); sl@0: asm("call %a0" : : "i"(TScheduler_Reschedule)); sl@0: asm("ret"); sl@0: } sl@0: sl@0: const TUint32 new_thread_trace_header = ((8<new thread, EDI->TheScheduler sl@0: ASM_DEBUG1(Resched,ebx) sl@0: sl@0: #ifdef MONITOR_THREAD_CPU_TIME sl@0: asm("call %a0" : :"i"(NKern_FastCounter)); sl@0: asm("mov ecx, [edi+%0]" : : "i"_FOFF(TScheduler,iCurrentThread)); sl@0: asm("mov edx, [ecx+%0]" : : "i"_FOFF(NThreadBase,iLastStartTime)); sl@0: asm("mov [ebx+%0], eax" : : "i"_FOFF(NThreadBase,iLastStartTime)); sl@0: asm("sub eax, edx"); sl@0: asm("add dword ptr ([ecx+%0]), eax" : : "i"_FOFF(NThreadBase,iTotalCpuTime)); sl@0: asm("adc dword ptr ([ecx+4+%0]), 0" : : "i"_FOFF(NThreadBase,iTotalCpuTime)); sl@0: #endif sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter)); sl@0: asm("jz no_trace"); sl@0: asm("push [%a0]": : "i"(&TheScheduler.iCurrentThread)); sl@0: asm("push 0"); sl@0: asm("push %0": : "i"(new_thread_trace_header)); sl@0: asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler)); sl@0: asm("pop eax"); sl@0: asm("pop eax"); sl@0: asm("pop eax"); sl@0: asm("no_trace:"); sl@0: #endif sl@0: sl@0: asm("mov esi, [edi+%0]": :"i"_FOFF(TScheduler,iCurrentThread)); // ESI -> original thread sl@0: asm("mov [esi+%0], esp": :"i"_FOFF(NThreadBase,iSavedSP)); // Save original thread stack pointer sl@0: asm("mov [edi+%0], ebx": :"i"_FOFF(TScheduler,iCurrentThread)); // EBX -> new thread, update current thread sl@0: asm("cmp ebx, esi"); sl@0: asm("je same_thread"); sl@0: asm("test byte ptr [esp], 8"); // test thread's TS flag sl@0: asm("jnz no_fpu"); // if set, thread did not use FPU sl@0: asm("clts"); sl@0: asm("fnsave [esi+%0]": :"i"_FOFF(NThread,iCoprocessorState)); // else thread did use FPU - save its state sl@0: asm("or byte ptr [esp], 8"); // set TS flag so thread aborts next time it uses FPU sl@0: asm("fwait"); sl@0: sl@0: asm("no_fpu:"); sl@0: asm("same_thread:"); sl@0: sl@0: asm("mov esp, [ebx+%0]": :"i"_FOFF(NThreadBase,iSavedSP)); // Load new thread stack pointer sl@0: asm("mov eax, [ebx+%0]": :"i"_FOFF(NThreadBase,iStackBase)); sl@0: asm("add eax, [ebx+%0]": :"i"_FOFF(NThreadBase,iStackSize)); sl@0: asm("mov ecx, dword ptr [%a0]": :"i"(&X86_TSS_Ptr)); sl@0: asm("mov [ecx+%0], eax": :"i"_FOFF(TX86Tss,iEsp0)); // set ESP0 to top of new thread supervisor stack sl@0: sl@0: asm("test byte ptr [ebx+%0], 2": :"i"_FOFF(TPriListLink,iSpare2)); // test for address space switch sl@0: asm("jz resched_no_as_switch"); sl@0: asm("call [edi+%0]": :"i"_FOFF(TScheduler,iProcessHandler)); // call handler with sl@0: // EBX=pointer to new thread, EDI->scheduler, preserves ESI, EDI sl@0: asm("resched_no_as_switch:"); sl@0: asm("pop eax"); sl@0: asm("mov cr0, eax"); sl@0: asm("pop ebx"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("pop gs"); sl@0: asm("pop fs"); sl@0: asm("cli"); sl@0: asm("lea eax, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag)); sl@0: asm("cmp dword ptr [eax], 0"); sl@0: asm("jnz start_resched"); sl@0: asm("mov eax,0"); sl@0: asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("pop eax"); sl@0: asm("ret"); sl@0: sl@0: asm("round_robin:"); sl@0: asm("mov eax, [ebx+%0]": : "i"_FOFF(NThreadBase,iHeldFastMutex)); sl@0: asm("test eax, eax"); // does this thread hold a fast mutex? sl@0: asm("jnz rr_holds_fast_mutex"); // branch if it does sl@0: asm("mov eax, [ebx+%0]": : "i"_FOFF(NThreadBase,iTimeslice)); sl@0: asm("mov [ebx+%0], eax": : "i"_FOFF(NThreadBase,iTime)); // else new timeslice for this thread next time sl@0: asm("mov ebx, [ebx]"); // candidate thread = next thread in round-robin order sl@0: asm("mov [esi], ebx"); // the latter is now the first at this priority sl@0: ASM_DEBUG1(RR,ebx); sl@0: asm("jmp no_other"); sl@0: sl@0: asm("resched_blocked:"); sl@0: ASM_DEBUG1(BlockedFM,eax) sl@0: asm("mov edx, [eax+%0]": : "i"_FOFF(NFastMutex,iHoldingThread)); sl@0: asm("test edx, edx"); sl@0: asm("jz resched_not_blocked"); sl@0: asm("mov ebx, edx"); sl@0: asm("jmp resched_do_thread_switch"); sl@0: sl@0: asm("holds_fast_mutex:"); sl@0: #ifdef __GCC32__ sl@0: asm("lea ecx, [edi+%0]": : "i"_FOFF(TScheduler,iLock)); sl@0: #else sl@0: _asm lea ecx, [edi]TheScheduler.iLock sl@0: #endif sl@0: asm("cmp eax, ecx"); sl@0: asm("je resched_do_thread_switch"); sl@0: asm("test byte ptr [ebx+10], 1"); // test for implicit system lock sl@0: asm("jz resched_do_thread_switch"); sl@0: #ifdef __GCC32__ sl@0: asm("cmp dword ptr [edi+%0], 0": : "i"(_FOFF(TScheduler,iLock) + _FOFF(NFastMutex,iHoldingThread))); sl@0: #else sl@0: _asm cmp [edi]TheScheduler.iLock.iHoldingThread, 0 sl@0: #endif sl@0: asm("jz resched_do_thread_switch"); sl@0: sl@0: sl@0: asm("rr_holds_fast_mutex:"); sl@0: #ifdef __GCC32__ sl@0: asm("push edx"); // storing an immediate value to an C-offset address appears to be sl@0: asm("mov edx,1"); // impossible in GCC, so we use edx instead sl@0: asm("mov [eax+%0], edx": : "i"_FOFF(NFastMutex,iWaiting)); sl@0: asm("pop edx"); sl@0: #else sl@0: _asm mov [eax]NFastMutex.iWaiting, 1 sl@0: #endif sl@0: asm("jmp resched_do_thread_switch"); sl@0: sl@0: asm("resched_not_needed:"); sl@0: asm("xor eax, eax"); sl@0: asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("pop eax"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Disable interrupts to the specified level sl@0: sl@0: If aLevel = 0 does not affect interrupt state sl@0: If aLevel <>0 disables all maskable interrupts. sl@0: sl@0: @param aLevel level to which to disable sl@0: @return Cookie to pass into RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/) sl@0: { sl@0: asm("pushfd"); sl@0: asm("mov ecx, [esp+4]"); sl@0: asm("pop eax"); sl@0: asm("and eax, 0x200"); sl@0: asm("test ecx, ecx"); sl@0: asm("jz disable_ints_0"); sl@0: asm("cli"); sl@0: asm("disable_ints_0:"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Disable all maskable interrupts sl@0: sl@0: @return Cookie to pass into RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts() sl@0: { sl@0: asm("pushfd"); sl@0: asm("pop eax"); sl@0: asm("and eax, 0x200"); sl@0: asm("cli"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Restore interrupt mask to state preceding a DisableInterrupts() call sl@0: sl@0: @param aLevel Cookie returned by Disable(All)Interrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt aLevel) sl@0: { sl@0: asm("test byte ptr [esp+5], 2"); // test saved I flag sl@0: asm("jz restore_irq_off"); // jump if clear sl@0: asm("sti"); // else reenable interrupts sl@0: asm("ret"); sl@0: asm("restore_irq_off:"); sl@0: asm("cli"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Enable all maskable interrupts sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::EnableAllInterrupts() sl@0: { sl@0: asm("sti"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Unlocks the kernel sl@0: Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are sl@0: pending, calls the scheduler to process them. sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Unlock() sl@0: { sl@0: asm("xor eax, eax"); sl@0: asm("dec dword ptr [%a0]": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("lea edx, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag)); sl@0: asm("jnz unlock_no_resched"); sl@0: asm("cmp eax, [edx]"); sl@0: asm("jz unlock_no_resched"); sl@0: asm("inc eax"); sl@0: asm("mov dword ptr [%a0], eax": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("call %a0" : : "i"(TScheduler_Reschedule)); sl@0: asm("sti"); sl@0: asm("unlock_no_resched:"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Locks the kernel sl@0: Increments iKernCSLocked, thereby deferring IDFCs and preemption. sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Lock() sl@0: { sl@0: asm("inc dword ptr [%a0]": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Locks the kernel and returns a pointer to the current thread sl@0: Increments iKernCSLocked, thereby deferring IDFCs and preemption. sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ NThread* NKern::LockC() sl@0: { sl@0: asm("inc dword ptr [%a0]": :"i"(&TheScheduler.iKernCSLocked)); sl@0: asm("mov eax, [%a0]": :"i"(&TheScheduler.iCurrentThread)); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Allows IDFCs and rescheduling if they are pending. sl@0: If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 sl@0: calls the scheduler to process the IDFCs and possibly reschedule. sl@0: sl@0: @return Nonzero if a reschedule actually occurred, zero if not. sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::PreemptionPoint() sl@0: { sl@0: asm("mov ecx, %0": : "i"(TheScheduler_iRescheduleNeededFlag)); sl@0: asm("xor eax, eax"); sl@0: asm("cmp eax, [ecx]"); sl@0: asm("jz preemption_point_no_resched"); sl@0: asm("cmp dword ptr [%a0], 1": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("jne preemption_point_no_resched"); sl@0: asm("call %a0" : : "i"(TScheduler_Reschedule)); sl@0: asm("mov dword ptr [%a0], 1": : "i"(&TheScheduler.iKernCSLocked)); sl@0: asm("sti"); sl@0: sl@0: asm("preemption_point_no_resched:"); sl@0: asm("ret"); sl@0: }