sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkernsmp\x86\ncsched.cia sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: sl@0: // SubSchedulerLookupTable : global data, type: TSubScheduler* [256]; sl@0: // BTraceLock : global data, type: TSpinLock sl@0: sl@0: const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule; sl@0: //const TLinAddr TheScheduler_iRescheduleNeededFlag = (TLinAddr)&TheScheduler.iRescheduleNeededFlag; sl@0: const TLinAddr NKern_FastCounter = (TLinAddr)&NKern::FastCounter; sl@0: const TLinAddr NKern_Lock = (TLinAddr)&NKern::Lock; sl@0: const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock; sl@0: const TLinAddr addressof_TheScheduler = (TLinAddr)&TheScheduler; sl@0: const TUint32 new_thread_trace_header = ((8< original thread sl@0: asm("mov eax, cr0"); sl@0: asm("push eax"); sl@0: asm("mov [ebp+%0], esp" : : "i" _FOFF(NThreadBase, iSavedSP)); // Save original thread stack pointer sl@0: sl@0: // We must move to a temporary stack before selecting the next thread. sl@0: // This is because another CPU may begin executing this thread before the sl@0: // select_next_thread() function returns and our stack would then be sl@0: // corrupted. We use the stack belonging to this CPU's initial thread since sl@0: // we are guaranteed that will never run on another CPU. sl@0: asm("mov ecx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iInitialThread)); sl@0: asm("mov esp, [ecx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); sl@0: sl@0: asm("select_thread:"); sl@0: asm("mov ecx, esi "); sl@0: asm("call %a0" : : "i" (&select_next_thread)); sl@0: asm("mov ebx, eax "); sl@0: asm("cmp ebx, 0 "); sl@0: asm("jz no_thread "); sl@0: asm("mov esp, [ebx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); // move to new thread's stack sl@0: sl@0: #ifdef BTRACE_CPU_USAGE sl@0: asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4])); sl@0: asm("jz short no_trace "); sl@0: asm("push ebx "); sl@0: asm("call %a0" : : "i" (NewThreadTrace)); sl@0: asm("pop ebx "); sl@0: asm("no_trace: "); sl@0: #endif // BTRACE_CPU_USAGE sl@0: sl@0: asm("cmp ebp, ebx "); sl@0: asm("je same_thread "); sl@0: asm("mov eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackBase)); sl@0: asm("add eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackSize)); sl@0: asm("mov ecx, [esi+60+%0]" : : "i" _FOFF(TSubScheduler, iExtras)); // iExtras[15] points to TSS sl@0: asm("mov [ecx+%0], eax" : : "i" _FOFF(TX86Tss, iEsp0)); // set ESP0 to top of new thread supervisor stack sl@0: sl@0: asm("test byte ptr [ebx+%0], 2" : : "i" _FOFF(NThreadBase,i_ThrdAttr)); // test for address space switch sl@0: asm("jz short resched_no_as_switch "); sl@0: asm("call [edi+%0]" : : "i" _FOFF(TScheduler, iProcessHandler)); // call handler with sl@0: // EBX=pointer to new thread, EDI->scheduler, ESI->subscheduler sl@0: asm("resched_no_as_switch: "); sl@0: asm("same_thread: "); sl@0: asm("pop eax "); sl@0: asm("mov cr0, eax "); sl@0: asm("cli "); sl@0: // asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" sl@0: asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); sl@0: asm("cmp dword ptr [eax], 0 "); sl@0: asm("jnz start_resched "); sl@0: sl@0: asm("resched_not_needed: "); sl@0: asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread)); sl@0: asm("cmp dword ptr [edi+%0], -3" : : "i" _FOFF(NThreadBase, iCsFunction)); // ECSDivertPending sl@0: asm("je resched_thread_divert "); sl@0: asm("mov dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("pop eax "); sl@0: asm("ret "); sl@0: sl@0: asm("resched_thread_divert: "); sl@0: asm("push edi "); sl@0: asm("xor eax, eax "); sl@0: asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); sl@0: asm("test eax, eax "); sl@0: asm("jz short no_resched_ipis "); sl@0: asm("push eax "); sl@0: asm("call %a0" : : "i" (&send_resched_ipis)); sl@0: asm("add esp, 4 "); sl@0: asm("no_resched_ipis: "); sl@0: sl@0: asm("sti "); sl@0: asm("mov ecx, [esp+12] "); // SThreadReschedStack iReason 0 not run 1 unlock 2 IRQ sl@0: asm("cmp ecx, 2 "); sl@0: asm("ja short rtd_unknown "); // unknown - die sl@0: asm("shl ecx, 2 "); // reason * 4 sl@0: asm("mov eax, 0xa1a "); sl@0: asm("shr eax, cl "); sl@0: asm("and eax, 15 "); sl@0: asm("mov gs, [esp+eax*4+16] "); // restore GS sl@0: sl@0: asm("pop ecx "); // exiting thread pointer sl@0: asm("call %a0" : : "i" (&do_forced_exit)); sl@0: asm("int 0xff "); // should never get here sl@0: sl@0: asm("rtd_unknown: "); sl@0: asm("int 0xff "); // should never get here sl@0: sl@0: sl@0: // There is no thread ready to run sl@0: asm("no_thread: "); sl@0: asm("cli "); sl@0: asm("xor eax, eax "); sl@0: asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); sl@0: asm("test eax, eax "); sl@0: asm("jz short no_resched_ipis2 "); sl@0: asm("push eax "); sl@0: asm("call %a0" : : "i" (&send_resched_ipis)); sl@0: asm("add esp, 4 "); sl@0: asm("no_resched_ipis2: "); sl@0: asm("sti "); sl@0: asm("hlt "); sl@0: asm("no_thread2: "); sl@0: // _asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h VC6 ignores the "dword ptr" sl@0: asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); sl@0: asm("cmp dword ptr [eax], 0x10000 "); sl@0: asm("jb short no_thread "); sl@0: asm("mov ecx, esi "); sl@0: asm("call %a0" : : "i" (&queue_dfcs)); sl@0: asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); sl@0: asm("jz short no_thread2 "); sl@0: asm("jmp select_thread "); sl@0: } sl@0: sl@0: sl@0: /** Disable interrupts to the specified level sl@0: sl@0: If aLevel = 0 does not affect interrupt state sl@0: If aLevel <>0 disables all maskable interrupts. sl@0: sl@0: @param aLevel level to which to disable sl@0: @return Cookie to pass into RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/) sl@0: { sl@0: asm("pushfd"); sl@0: asm("mov ecx, [esp+4]"); sl@0: asm("pop eax"); sl@0: asm("and eax, 0x200"); sl@0: asm("test ecx, ecx"); sl@0: asm("jz disable_ints_0"); sl@0: asm("cli"); sl@0: asm("disable_ints_0:"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Disable all maskable interrupts sl@0: sl@0: @return Cookie to pass into RestoreInterrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts() sl@0: { sl@0: asm("pushfd"); sl@0: asm("pop eax"); sl@0: asm("and eax, 0x200"); sl@0: asm("cli"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Restore interrupt mask to state preceding a DisableInterrupts() call sl@0: sl@0: @param aLevel Cookie returned by Disable(All)Interrupts() sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt aLevel) sl@0: { sl@0: asm("test byte ptr [esp+5], 2"); // test saved I flag sl@0: asm("jz restore_irq_off"); // jump if clear sl@0: asm("sti"); // else reenable interrupts sl@0: asm("ret"); sl@0: asm("restore_irq_off:"); sl@0: asm("cli"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Enable all maskable interrupts sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::EnableAllInterrupts() sl@0: { sl@0: asm("sti"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Unlocks the kernel sl@0: Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are sl@0: pending, calls the scheduler to process them. sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Unlock() sl@0: { sl@0: asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked sl@0: asm("shr eax, 24 "); sl@0: asm("push esi "); sl@0: asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable)); sl@0: #ifdef _DEBUG sl@0: asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("jg short _dbg1 "); sl@0: asm("int 0xff "); sl@0: asm("_dbg1: "); sl@0: #endif sl@0: asm("cli "); sl@0: asm("dec dword ptr [esi+%0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("jnz short still_locked "); sl@0: // asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" sl@0: asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); sl@0: asm("cmp dword ptr [eax], 0 "); sl@0: asm("jz short no_resched "); sl@0: sl@0: asm("mov dword ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("push edi "); sl@0: asm("push ebp "); sl@0: asm("push ebx "); sl@0: asm("push gs "); sl@0: asm("push fs "); sl@0: asm("sti "); sl@0: sl@0: // Reschedule - return with local interrupts disabled, iKernLockCount=0 sl@0: asm("push 1 "); sl@0: asm("call %a0" : : "i" (TScheduler_Reschedule)); sl@0: asm("add esp, 4 "); sl@0: sl@0: asm("xor eax, eax "); sl@0: asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); sl@0: asm("test eax, eax "); sl@0: asm("jz short no_resched_ipis_ul "); sl@0: sl@0: asm("unlock_do_resched_ipis: "); sl@0: asm("push eax "); sl@0: asm("call %a0" : : "i" (&send_resched_ipis)); sl@0: asm("add esp, 4 "); sl@0: sl@0: asm("no_resched_ipis_ul: "); sl@0: asm("pop fs "); sl@0: asm("pop gs "); sl@0: asm("pop ebx "); sl@0: asm("pop ebp "); sl@0: asm("pop edi "); sl@0: sl@0: asm("still_locked: "); sl@0: asm("sti "); sl@0: asm("pop esi "); sl@0: asm("ret "); sl@0: sl@0: asm("no_resched: "); sl@0: asm("xor eax, eax "); sl@0: asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs)); sl@0: asm("test eax, eax "); sl@0: asm("jz short still_locked "); sl@0: asm("push edi "); sl@0: asm("push ebp "); sl@0: asm("push ebx "); sl@0: asm("push gs "); sl@0: asm("push fs "); sl@0: asm("jmp short unlock_do_resched_ipis "); sl@0: } sl@0: sl@0: sl@0: /** Locks the kernel sl@0: Defer IDFCs and preemption sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::Lock() sl@0: { sl@0: asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("sti"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Locks the kernel and returns a pointer to the current thread sl@0: Defer IDFCs and preemption sl@0: sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ NThread* NKern::LockC() sl@0: { sl@0: asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("mov eax, [ecx+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread)); sl@0: asm("sti"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Allows IDFCs and rescheduling if they are pending. sl@0: If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1 sl@0: calls the scheduler to process the IDFCs and possibly reschedule. sl@0: sl@0: @return Nonzero if a reschedule actually occurred, zero if not. sl@0: @pre Thread or IDFC context. Don't call from ISRs. sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::PreemptionPoint() sl@0: { sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: #ifdef _DEBUG sl@0: asm("cmp dword ptr [ecx+%0], 0": : "i"_FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("jg _dbg1_pp"); sl@0: asm("int 0xff"); sl@0: asm("_dbg1_pp:"); sl@0: #endif sl@0: asm("cmp dword ptr [ecx+%0], 1": : "i"_FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("jnz still_locked_pp"); sl@0: // asm("cmp dword ptr [ecx]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr" sl@0: asm("lea eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iRescheduleNeededFlag)); sl@0: asm("cmp dword ptr [eax], 0"); sl@0: asm("jnz do_resched"); sl@0: asm("cli"); sl@0: asm("lock xchg eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iReschedIPIs)); sl@0: asm("test eax, eax"); sl@0: asm("jz pp_no_resched_ipis"); sl@0: asm("push eax"); sl@0: asm("call %a0": :"i"(&send_resched_ipis)); sl@0: asm("add esp, 4"); sl@0: asm("pp_no_resched_ipis:"); sl@0: asm("sti"); sl@0: sl@0: asm("still_locked_pp:"); sl@0: asm("xor eax, eax"); sl@0: asm("ret"); sl@0: sl@0: asm("do_resched:"); sl@0: asm("call %a0" : : "i"(NKern_Unlock)); sl@0: asm("call %a0" : : "i"(NKern_Lock)); sl@0: asm("mov eax, 1"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Complete the saving of a thread's context sl@0: sl@0: This saves the FPU registers if necessary once we know that we are definitely sl@0: switching threads. sl@0: sl@0: @internalComponent sl@0: */ sl@0: __NAKED__ void NThread::CompleteContextSave() sl@0: { sl@0: THISCALL_PROLOG0() sl@0: asm("mov edx, [ecx+%0]": : "i"_FOFF(NThreadBase,iSavedSP)); // EDX points to saved state on thread stack sl@0: asm("test byte ptr [edx], 8"); // test thread's saved TS flag sl@0: asm("jnz no_fpu"); // if set, thread did not use FPU sl@0: asm("clts"); sl@0: asm("fnsave [ecx+%0]": : "i"_FOFF(NThread, iCoprocessorState)); // else thread did use FPU - save its state sl@0: asm("or byte ptr [edx], 8"); // set TS flag so thread aborts next time it uses FPU sl@0: asm("fwait"); sl@0: sl@0: asm("no_fpu:"); sl@0: THISCALL_EPILOG0() sl@0: } sl@0: sl@0: sl@0: /** Check if the kernel is locked the specified number of times. sl@0: sl@0: @param aCount The number of times the kernel should be locked sl@0: If zero, tests if it is locked at all sl@0: @return TRUE if the tested condition is true. sl@0: sl@0: @internalTechnology sl@0: */ sl@0: EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/) sl@0: { sl@0: asm("pushfd"); sl@0: asm("cli"); sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("mov edx, [eax+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("popfd"); sl@0: asm("cmp edx, 0"); sl@0: asm("jz not_locked"); sl@0: asm("mov eax, [esp+4]"); sl@0: asm("cmp eax, 0"); sl@0: asm("jz locked"); sl@0: asm("cmp eax, edx"); sl@0: asm("jnz not_locked"); sl@0: asm("locked:"); sl@0: asm("mov eax, 1"); sl@0: asm("ret"); sl@0: asm("not_locked:"); sl@0: asm("xor eax, eax"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: // Only call this if thread migration is disabled, i.e. sl@0: // interrupts disabled, kernel locked or current thread in 'freeze cpu' mode sl@0: extern "C" __NAKED__ TSubScheduler& SubScheduler() sl@0: { sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("ret"); sl@0: } sl@0: sl@0: /** Returns the NThread control block for the currently scheduled thread. sl@0: sl@0: Note that this is the calling thread if called from a thread context, or the sl@0: interrupted thread if called from an interrupt context. sl@0: sl@0: @return A pointer to the NThread for the currently scheduled thread. sl@0: sl@0: @pre Call in any context. sl@0: */ sl@0: EXPORT_C __NAKED__ NThread* NKern::CurrentThread() sl@0: { sl@0: asm("pushfd"); sl@0: asm("cli"); // stop thread migration between reading APIC ID and thread pointer sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("cmp eax, 0"); sl@0: asm("jz done"); sl@0: asm("test al, 3"); sl@0: asm("jnz bad_ct"); sl@0: asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread)); sl@0: asm("done:"); sl@0: asm("popfd"); sl@0: asm("ret"); sl@0: asm("bad_ct:"); sl@0: asm("popfd"); sl@0: asm("xor eax, eax"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Returns the NThread control block for the currently scheduled thread. sl@0: sl@0: Note that this is the calling thread if called from a thread context, or the sl@0: interrupted thread if called from an interrupt context. sl@0: sl@0: @return A pointer to the NThread for the currently scheduled thread. sl@0: sl@0: @pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts sl@0: disabled or with preemption disabled. sl@0: */ sl@0: extern "C" __NAKED__ NThread* NCurrentThreadL() sl@0: { sl@0: asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("shr eax, 24"); sl@0: asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread)); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Returns the CPU number of the calling CPU. sl@0: sl@0: @return the CPU number of the calling CPU. sl@0: sl@0: @pre Call in any context. sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::CurrentCpu() sl@0: { sl@0: asm("xor eax, eax"); sl@0: asm("str ax"); sl@0: asm("sub al, 0x28"); sl@0: asm("shr al, 3"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: /** Return the current processor context type (thread, IDFC or interrupt) sl@0: sl@0: @return A value from NKern::TContext enumeration (but never EEscaped) sl@0: @pre Any context sl@0: sl@0: @see NKern::TContext sl@0: */ sl@0: EXPORT_C __NAKED__ TInt NKern::CurrentContext() sl@0: { sl@0: asm("pushfd"); sl@0: asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff sl@0: asm("mov edx, ds:[%0]": :"i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); sl@0: asm("xor eax, eax"); sl@0: asm("shr edx, 24"); sl@0: asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable)); sl@0: asm("cmp edx, eax"); sl@0: asm("jz bad_cc"); sl@0: asm("test dl, 3"); sl@0: asm("jnz bad_cc"); sl@0: asm("cmp eax, [edx+52+%0]": : "i"_FOFF(TSubScheduler,iExtras)); // i_IrqNestCount sl@0: asm("jle irq"); sl@0: asm("cmp al, [edx+%0]": : "i"_FOFF(TSubScheduler, iInIDFC)); sl@0: asm("jz thread"); sl@0: asm("jmp idfc"); sl@0: sl@0: asm("bad_cc:"); // no subscheduler yet [initialising] - return EInterrupt sl@0: asm("irq:"); // return NKern::EInterrupt [=2] sl@0: asm("inc eax"); sl@0: asm("idfc:"); // return NKern::EIDFC [=1] sl@0: asm("inc eax"); sl@0: asm("thread:"); // return NKern::EThread [=0] sl@0: asm("popfd"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: #ifdef __USE_LOGICAL_DEST_MODE__ sl@0: extern "C" __NAKED__ void __fastcall do_send_resched_ipis(TUint32) sl@0: { sl@0: asm("shl ecx, 24 "); // CPUs mask into bits 24-31 sl@0: asm("jz short sri0 "); // no CPUs, so nothing to do sl@0: asm("pushfd "); sl@0: asm("cli "); sl@0: asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); sl@0: asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4800)); sl@0: asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); sl@0: asm("popfd "); sl@0: asm("sri0: "); sl@0: asm("ret "); sl@0: } sl@0: #endif sl@0: sl@0: extern "C" __NAKED__ void __fastcall send_ipi(TUint32) sl@0: { sl@0: asm("pushfd "); sl@0: asm("cli "); sl@0: asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); sl@0: asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4000)); sl@0: asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); sl@0: asm("popfd "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: // Send a reschedule IPI to the current processor sl@0: // *** DON'T DO ANY TRACING OR INSTRUMENTATION *** sl@0: extern "C" __NAKED__ void send_self_resched_ipi() sl@0: { sl@0: asm("pushfd "); sl@0: asm("cli "); sl@0: asm("xor ecx, ecx "); sl@0: asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); sl@0: asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x44000)); // destination shorthand = self sl@0: asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); sl@0: asm("popfd "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("pushfd "); sl@0: asm("mov edx, [ecx+%0]" : : "i" _FOFF(TSubScheduler, i_APICID)); sl@0: asm("cli "); sl@0: asm("mov ds:[%0], edx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH)); sl@0: asm("mov eax, %0" : : "i" (TRANSFERRED_IRQ_VECTOR | 0x4000)); sl@0: asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL)); sl@0: asm("popfd "); sl@0: asm("ret "); sl@0: } sl@0: