1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/x86/ncsched.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,624 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkernsmp\x86\ncsched.cia
1.18 +//
1.19 +//
1.20 +
1.21 +#include <x86.h>
1.22 +#include <apic.h>
1.23 +
1.24 +// SubSchedulerLookupTable : global data, type: TSubScheduler* [256];
1.25 +// BTraceLock : global data, type: TSpinLock
1.26 +
1.27 +const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
1.28 +//const TLinAddr TheScheduler_iRescheduleNeededFlag = (TLinAddr)&TheScheduler.iRescheduleNeededFlag;
1.29 +const TLinAddr NKern_FastCounter = (TLinAddr)&NKern::FastCounter;
1.30 +const TLinAddr NKern_Lock = (TLinAddr)&NKern::Lock;
1.31 +const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
1.32 +const TLinAddr addressof_TheScheduler = (TLinAddr)&TheScheduler;
1.33 +const TUint32 new_thread_trace_header = ((8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8));
1.34 +
1.35 +extern "C" void __fastcall queue_dfcs(TSubScheduler* aS);
1.36 +extern "C" NThreadBase* __fastcall select_next_thread(TSubScheduler* aS);
1.37 +extern "C" void send_resched_ipis(TUint32 aMask);
1.38 +extern "C" void __fastcall do_forced_exit(NThreadBase* aT);
1.39 +extern "C" void NewThreadTrace(NThread* a);
1.40 +
1.41 +
1.42 +/***************************************************************************
1.43 +* Reschedule
1.44 +* Enter with:
1.45 +* Kernel locked, interrupts enabled or disabled
1.46 +* Return with:
1.47 +* Kernel unlocked, interrupts disabled
1.48 +* EAX=0 if no reschedule occurred, 1 if it did
1.49 +* ESI pointing to TSubScheduler for current CPU
1.50 +* EDI pointing to current NThread
1.51 +***************************************************************************/
1.52 +__NAKED__ void TScheduler::Reschedule()
1.53 + {
1.54 + asm("push 0 ");
1.55 + asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked
1.56 + asm("mov edi, %0" : : "i" (addressof_TheScheduler));
1.57 + asm("shr eax, 24 ");
1.58 + asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
1.59 + asm("cli ");
1.60 + asm("start_resched: ");
1.61 +// _asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h VC6 ignores the "dword ptr"
1.62 + asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.63 + asm("cmp dword ptr [eax], 0x10000 ");
1.64 + asm("jb short resched_no_dfcs ");
1.65 + asm("mov ecx, esi ");
1.66 + asm("call %a0" : : "i" (&queue_dfcs));
1.67 + asm("resched_no_dfcs: ");
1.68 + asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.69 + asm("jz resched_not_needed ");
1.70 + asm("sti ");
1.71 + asm("mov dword ptr [esp], 1 ");
1.72 + asm("mov ebp, [esi+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread)); // EBP -> original thread
1.73 + asm("mov eax, cr0");
1.74 + asm("push eax");
1.75 + asm("mov [ebp+%0], esp" : : "i" _FOFF(NThreadBase, iSavedSP)); // Save original thread stack pointer
1.76 +
1.77 + // We must move to a temporary stack before selecting the next thread.
1.78 + // This is because another CPU may begin executing this thread before the
1.79 + // select_next_thread() function returns and our stack would then be
1.80 + // corrupted. We use the stack belonging to this CPU's initial thread since
1.81 + // we are guaranteed that will never run on another CPU.
1.82 + asm("mov ecx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iInitialThread));
1.83 + asm("mov esp, [ecx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP));
1.84 +
1.85 + asm("select_thread:");
1.86 + asm("mov ecx, esi ");
1.87 + asm("call %a0" : : "i" (&select_next_thread));
1.88 + asm("mov ebx, eax ");
1.89 + asm("cmp ebx, 0 ");
1.90 + asm("jz no_thread ");
1.91 + asm("mov esp, [ebx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); // move to new thread's stack
1.92 +
1.93 +#ifdef BTRACE_CPU_USAGE
1.94 + asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4]));
1.95 + asm("jz short no_trace ");
1.96 + asm("push ebx ");
1.97 + asm("call %a0" : : "i" (NewThreadTrace));
1.98 + asm("pop ebx ");
1.99 + asm("no_trace: ");
1.100 +#endif // BTRACE_CPU_USAGE
1.101 +
1.102 + asm("cmp ebp, ebx ");
1.103 + asm("je same_thread ");
1.104 + asm("mov eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackBase));
1.105 + asm("add eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackSize));
1.106 + asm("mov ecx, [esi+60+%0]" : : "i" _FOFF(TSubScheduler, iExtras)); // iExtras[15] points to TSS
1.107 + asm("mov [ecx+%0], eax" : : "i" _FOFF(TX86Tss, iEsp0)); // set ESP0 to top of new thread supervisor stack
1.108 +
1.109 + asm("test byte ptr [ebx+%0], 2" : : "i" _FOFF(NThreadBase,i_ThrdAttr)); // test for address space switch
1.110 + asm("jz short resched_no_as_switch ");
1.111 + asm("call [edi+%0]" : : "i" _FOFF(TScheduler, iProcessHandler)); // call handler with
1.112 + // EBX=pointer to new thread, EDI->scheduler, ESI->subscheduler
1.113 + asm("resched_no_as_switch: ");
1.114 + asm("same_thread: ");
1.115 + asm("pop eax ");
1.116 + asm("mov cr0, eax ");
1.117 + asm("cli ");
1.118 +// asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr"
1.119 + asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.120 + asm("cmp dword ptr [eax], 0 ");
1.121 + asm("jnz start_resched ");
1.122 +
1.123 + asm("resched_not_needed: ");
1.124 + asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread));
1.125 + asm("cmp dword ptr [edi+%0], -3" : : "i" _FOFF(NThreadBase, iCsFunction)); // ECSDivertPending
1.126 + asm("je resched_thread_divert ");
1.127 + asm("mov dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
1.128 + asm("pop eax ");
1.129 + asm("ret ");
1.130 +
1.131 + asm("resched_thread_divert: ");
1.132 + asm("push edi ");
1.133 + asm("xor eax, eax ");
1.134 + asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
1.135 + asm("test eax, eax ");
1.136 + asm("jz short no_resched_ipis ");
1.137 + asm("push eax ");
1.138 + asm("call %a0" : : "i" (&send_resched_ipis));
1.139 + asm("add esp, 4 ");
1.140 + asm("no_resched_ipis: ");
1.141 +
1.142 + asm("sti ");
1.143 + asm("mov ecx, [esp+12] "); // SThreadReschedStack iReason 0 not run 1 unlock 2 IRQ
1.144 + asm("cmp ecx, 2 ");
1.145 + asm("ja short rtd_unknown "); // unknown - die
1.146 + asm("shl ecx, 2 "); // reason * 4
1.147 + asm("mov eax, 0xa1a ");
1.148 + asm("shr eax, cl ");
1.149 + asm("and eax, 15 ");
1.150 + asm("mov gs, [esp+eax*4+16] "); // restore GS
1.151 +
1.152 + asm("pop ecx "); // exiting thread pointer
1.153 + asm("call %a0" : : "i" (&do_forced_exit));
1.154 + asm("int 0xff "); // should never get here
1.155 +
1.156 + asm("rtd_unknown: ");
1.157 + asm("int 0xff "); // should never get here
1.158 +
1.159 +
1.160 + // There is no thread ready to run
1.161 + asm("no_thread: ");
1.162 + asm("cli ");
1.163 + asm("xor eax, eax ");
1.164 + asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
1.165 + asm("test eax, eax ");
1.166 + asm("jz short no_resched_ipis2 ");
1.167 + asm("push eax ");
1.168 + asm("call %a0" : : "i" (&send_resched_ipis));
1.169 + asm("add esp, 4 ");
1.170 + asm("no_resched_ipis2: ");
1.171 + asm("sti ");
1.172 + asm("hlt ");
1.173 + asm("no_thread2: ");
1.174 +// _asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h VC6 ignores the "dword ptr"
1.175 + asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.176 + asm("cmp dword ptr [eax], 0x10000 ");
1.177 + asm("jb short no_thread ");
1.178 + asm("mov ecx, esi ");
1.179 + asm("call %a0" : : "i" (&queue_dfcs));
1.180 + asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.181 + asm("jz short no_thread2 ");
1.182 + asm("jmp select_thread ");
1.183 + }
1.184 +
1.185 +
1.186 +/** Disable interrupts to the specified level
1.187 +
1.188 +If aLevel = 0 does not affect interrupt state
1.189 +If aLevel <>0 disables all maskable interrupts.
1.190 +
1.191 +@param aLevel level to which to disable
1.192 +@return Cookie to pass into RestoreInterrupts()
1.193 +*/
1.194 +EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
1.195 + {
1.196 + asm("pushfd");
1.197 + asm("mov ecx, [esp+4]");
1.198 + asm("pop eax");
1.199 + asm("and eax, 0x200");
1.200 + asm("test ecx, ecx");
1.201 + asm("jz disable_ints_0");
1.202 + asm("cli");
1.203 + asm("disable_ints_0:");
1.204 + asm("ret");
1.205 + }
1.206 +
1.207 +
1.208 +/** Disable all maskable interrupts
1.209 +
1.210 +@return Cookie to pass into RestoreInterrupts()
1.211 +*/
1.212 +EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
1.213 + {
1.214 + asm("pushfd");
1.215 + asm("pop eax");
1.216 + asm("and eax, 0x200");
1.217 + asm("cli");
1.218 + asm("ret");
1.219 + }
1.220 +
1.221 +
1.222 +/** Restore interrupt mask to state preceding a DisableInterrupts() call
1.223 +
1.224 +@param aLevel Cookie returned by Disable(All)Interrupts()
1.225 +*/
1.226 +EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt aLevel)
1.227 + {
1.228 + asm("test byte ptr [esp+5], 2"); // test saved I flag
1.229 + asm("jz restore_irq_off"); // jump if clear
1.230 + asm("sti"); // else reenable interrupts
1.231 + asm("ret");
1.232 + asm("restore_irq_off:");
1.233 + asm("cli");
1.234 + asm("ret");
1.235 + }
1.236 +
1.237 +
1.238 +/** Enable all maskable interrupts
1.239 +
1.240 +@internalComponent
1.241 +*/
1.242 +EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
1.243 + {
1.244 + asm("sti");
1.245 + asm("ret");
1.246 + }
1.247 +
1.248 +
1.249 +/** Unlocks the kernel
1.250 + Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
1.251 + pending, calls the scheduler to process them.
1.252 +
1.253 + @pre Thread or IDFC context. Don't call from ISRs.
1.254 + */
1.255 +EXPORT_C __NAKED__ void NKern::Unlock()
1.256 + {
1.257 + asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked
1.258 + asm("shr eax, 24 ");
1.259 + asm("push esi ");
1.260 + asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
1.261 +#ifdef _DEBUG
1.262 + asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
1.263 + asm("jg short _dbg1 ");
1.264 + asm("int 0xff ");
1.265 + asm("_dbg1: ");
1.266 +#endif
1.267 + asm("cli ");
1.268 + asm("dec dword ptr [esi+%0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
1.269 + asm("jnz short still_locked ");
1.270 +// asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr"
1.271 + asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.272 + asm("cmp dword ptr [eax], 0 ");
1.273 + asm("jz short no_resched ");
1.274 +
1.275 + asm("mov dword ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iKernLockCount));
1.276 + asm("push edi ");
1.277 + asm("push ebp ");
1.278 + asm("push ebx ");
1.279 + asm("push gs ");
1.280 + asm("push fs ");
1.281 + asm("sti ");
1.282 +
1.283 + // Reschedule - return with local interrupts disabled, iKernLockCount=0
1.284 + asm("push 1 ");
1.285 + asm("call %a0" : : "i" (TScheduler_Reschedule));
1.286 + asm("add esp, 4 ");
1.287 +
1.288 + asm("xor eax, eax ");
1.289 + asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
1.290 + asm("test eax, eax ");
1.291 + asm("jz short no_resched_ipis_ul ");
1.292 +
1.293 + asm("unlock_do_resched_ipis: ");
1.294 + asm("push eax ");
1.295 + asm("call %a0" : : "i" (&send_resched_ipis));
1.296 + asm("add esp, 4 ");
1.297 +
1.298 + asm("no_resched_ipis_ul: ");
1.299 + asm("pop fs ");
1.300 + asm("pop gs ");
1.301 + asm("pop ebx ");
1.302 + asm("pop ebp ");
1.303 + asm("pop edi ");
1.304 +
1.305 + asm("still_locked: ");
1.306 + asm("sti ");
1.307 + asm("pop esi ");
1.308 + asm("ret ");
1.309 +
1.310 + asm("no_resched: ");
1.311 + asm("xor eax, eax ");
1.312 + asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
1.313 + asm("test eax, eax ");
1.314 + asm("jz short still_locked ");
1.315 + asm("push edi ");
1.316 + asm("push ebp ");
1.317 + asm("push ebx ");
1.318 + asm("push gs ");
1.319 + asm("push fs ");
1.320 + asm("jmp short unlock_do_resched_ipis ");
1.321 + }
1.322 +
1.323 +
1.324 +/** Locks the kernel
1.325 + Defer IDFCs and preemption
1.326 +
1.327 + @pre Thread or IDFC context. Don't call from ISRs.
1.328 + */
1.329 +EXPORT_C __NAKED__ void NKern::Lock()
1.330 + {
1.331 + asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff
1.332 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.333 + asm("shr eax, 24");
1.334 + asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.335 + asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
1.336 + asm("sti");
1.337 + asm("ret");
1.338 + }
1.339 +
1.340 +
1.341 +/** Locks the kernel and returns a pointer to the current thread
1.342 + Defer IDFCs and preemption
1.343 +
1.344 + @pre Thread or IDFC context. Don't call from ISRs.
1.345 + */
1.346 +EXPORT_C __NAKED__ NThread* NKern::LockC()
1.347 + {
1.348 + asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff
1.349 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.350 + asm("shr eax, 24");
1.351 + asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.352 + asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
1.353 + asm("mov eax, [ecx+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread));
1.354 + asm("sti");
1.355 + asm("ret");
1.356 + }
1.357 +
1.358 +
1.359 +/** Allows IDFCs and rescheduling if they are pending.
1.360 + If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
1.361 + calls the scheduler to process the IDFCs and possibly reschedule.
1.362 +
1.363 + @return Nonzero if a reschedule actually occurred, zero if not.
1.364 + @pre Thread or IDFC context. Don't call from ISRs.
1.365 + */
1.366 +EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
1.367 + {
1.368 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.369 + asm("shr eax, 24");
1.370 + asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.371 +#ifdef _DEBUG
1.372 + asm("cmp dword ptr [ecx+%0], 0": : "i"_FOFF(TSubScheduler, iKernLockCount));
1.373 + asm("jg _dbg1_pp");
1.374 + asm("int 0xff");
1.375 + asm("_dbg1_pp:");
1.376 +#endif
1.377 + asm("cmp dword ptr [ecx+%0], 1": : "i"_FOFF(TSubScheduler, iKernLockCount));
1.378 + asm("jnz still_locked_pp");
1.379 +// asm("cmp dword ptr [ecx]TSubScheduler.iRescheduleNeededFlag, 0 VC6 ignores the "dword ptr"
1.380 + asm("lea eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iRescheduleNeededFlag));
1.381 + asm("cmp dword ptr [eax], 0");
1.382 + asm("jnz do_resched");
1.383 + asm("cli");
1.384 + asm("lock xchg eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iReschedIPIs));
1.385 + asm("test eax, eax");
1.386 + asm("jz pp_no_resched_ipis");
1.387 + asm("push eax");
1.388 + asm("call %a0": :"i"(&send_resched_ipis));
1.389 + asm("add esp, 4");
1.390 + asm("pp_no_resched_ipis:");
1.391 + asm("sti");
1.392 +
1.393 + asm("still_locked_pp:");
1.394 + asm("xor eax, eax");
1.395 + asm("ret");
1.396 +
1.397 + asm("do_resched:");
1.398 + asm("call %a0" : : "i"(NKern_Unlock));
1.399 + asm("call %a0" : : "i"(NKern_Lock));
1.400 + asm("mov eax, 1");
1.401 + asm("ret");
1.402 + }
1.403 +
1.404 +
1.405 +/** Complete the saving of a thread's context
1.406 +
1.407 +This saves the FPU registers if necessary once we know that we are definitely
1.408 +switching threads.
1.409 +
1.410 +@internalComponent
1.411 +*/
1.412 +__NAKED__ void NThread::CompleteContextSave()
1.413 + {
1.414 + THISCALL_PROLOG0()
1.415 + asm("mov edx, [ecx+%0]": : "i"_FOFF(NThreadBase,iSavedSP)); // EDX points to saved state on thread stack
1.416 + asm("test byte ptr [edx], 8"); // test thread's saved TS flag
1.417 + asm("jnz no_fpu"); // if set, thread did not use FPU
1.418 + asm("clts");
1.419 + asm("fnsave [ecx+%0]": : "i"_FOFF(NThread, iCoprocessorState)); // else thread did use FPU - save its state
1.420 + asm("or byte ptr [edx], 8"); // set TS flag so thread aborts next time it uses FPU
1.421 + asm("fwait");
1.422 +
1.423 + asm("no_fpu:");
1.424 + THISCALL_EPILOG0()
1.425 + }
1.426 +
1.427 +
1.428 +/** Check if the kernel is locked the specified number of times.
1.429 +
1.430 + @param aCount The number of times the kernel should be locked
1.431 + If zero, tests if it is locked at all
1.432 + @return TRUE if the tested condition is true.
1.433 +
1.434 + @internalTechnology
1.435 +*/
1.436 +EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
1.437 + {
1.438 + asm("pushfd");
1.439 + asm("cli");
1.440 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.441 + asm("shr eax, 24");
1.442 + asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.443 + asm("mov edx, [eax+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
1.444 + asm("popfd");
1.445 + asm("cmp edx, 0");
1.446 + asm("jz not_locked");
1.447 + asm("mov eax, [esp+4]");
1.448 + asm("cmp eax, 0");
1.449 + asm("jz locked");
1.450 + asm("cmp eax, edx");
1.451 + asm("jnz not_locked");
1.452 + asm("locked:");
1.453 + asm("mov eax, 1");
1.454 + asm("ret");
1.455 + asm("not_locked:");
1.456 + asm("xor eax, eax");
1.457 + asm("ret");
1.458 + }
1.459 +
1.460 +
1.461 +// Only call this if thread migration is disabled, i.e.
1.462 +// interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
1.463 +extern "C" __NAKED__ TSubScheduler& SubScheduler()
1.464 + {
1.465 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.466 + asm("shr eax, 24");
1.467 + asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.468 + asm("ret");
1.469 + }
1.470 +
1.471 +/** Returns the NThread control block for the currently scheduled thread.
1.472 +
1.473 + Note that this is the calling thread if called from a thread context, or the
1.474 + interrupted thread if called from an interrupt context.
1.475 +
1.476 + @return A pointer to the NThread for the currently scheduled thread.
1.477 +
1.478 + @pre Call in any context.
1.479 +*/
1.480 +EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
1.481 + {
1.482 + asm("pushfd");
1.483 + asm("cli"); // stop thread migration between reading APIC ID and thread pointer
1.484 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.485 + asm("shr eax, 24");
1.486 + asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.487 + asm("cmp eax, 0");
1.488 + asm("jz done");
1.489 + asm("test al, 3");
1.490 + asm("jnz bad_ct");
1.491 + asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread));
1.492 + asm("done:");
1.493 + asm("popfd");
1.494 + asm("ret");
1.495 + asm("bad_ct:");
1.496 + asm("popfd");
1.497 + asm("xor eax, eax");
1.498 + asm("ret");
1.499 + }
1.500 +
1.501 +
1.502 +/** Returns the NThread control block for the currently scheduled thread.
1.503 +
1.504 + Note that this is the calling thread if called from a thread context, or the
1.505 + interrupted thread if called from an interrupt context.
1.506 +
1.507 + @return A pointer to the NThread for the currently scheduled thread.
1.508 +
1.509 + @pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
1.510 + disabled or with preemption disabled.
1.511 +*/
1.512 +extern "C" __NAKED__ NThread* NCurrentThreadL()
1.513 + {
1.514 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.515 + asm("shr eax, 24");
1.516 + asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.517 + asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread));
1.518 + asm("ret");
1.519 + }
1.520 +
1.521 +
1.522 +/** Returns the CPU number of the calling CPU.
1.523 +
1.524 + @return the CPU number of the calling CPU.
1.525 +
1.526 + @pre Call in any context.
1.527 +*/
1.528 +EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
1.529 + {
1.530 + asm("xor eax, eax");
1.531 + asm("str ax");
1.532 + asm("sub al, 0x28");
1.533 + asm("shr al, 3");
1.534 + asm("ret");
1.535 + }
1.536 +
1.537 +
1.538 +/** Return the current processor context type (thread, IDFC or interrupt)
1.539 +
1.540 + @return A value from NKern::TContext enumeration (but never EEscaped)
1.541 + @pre Any context
1.542 +
1.543 + @see NKern::TContext
1.544 + */
1.545 +EXPORT_C __NAKED__ TInt NKern::CurrentContext()
1.546 + {
1.547 + asm("pushfd");
1.548 + asm("cli"); // stop thread migration between reading APIC ID and subscheduler stuff
1.549 + asm("mov edx, ds:[%0]": :"i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.550 + asm("xor eax, eax");
1.551 + asm("shr edx, 24");
1.552 + asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
1.553 + asm("cmp edx, eax");
1.554 + asm("jz bad_cc");
1.555 + asm("test dl, 3");
1.556 + asm("jnz bad_cc");
1.557 + asm("cmp eax, [edx+52+%0]": : "i"_FOFF(TSubScheduler,iExtras)); // i_IrqNestCount
1.558 + asm("jle irq");
1.559 + asm("cmp al, [edx+%0]": : "i"_FOFF(TSubScheduler, iInIDFC));
1.560 + asm("jz thread");
1.561 + asm("jmp idfc");
1.562 +
1.563 + asm("bad_cc:"); // no subscheduler yet [initialising] - return EInterrupt
1.564 + asm("irq:"); // return NKern::EInterrupt [=2]
1.565 + asm("inc eax");
1.566 + asm("idfc:"); // return NKern::EIDFC [=1]
1.567 + asm("inc eax");
1.568 + asm("thread:"); // return NKern::EThread [=0]
1.569 + asm("popfd");
1.570 + asm("ret");
1.571 + }
1.572 +
1.573 +
1.574 +#ifdef __USE_LOGICAL_DEST_MODE__
1.575 +extern "C" __NAKED__ void __fastcall do_send_resched_ipis(TUint32)
1.576 + {
1.577 + asm("shl ecx, 24 "); // CPUs mask into bits 24-31
1.578 + asm("jz short sri0 "); // no CPUs, so nothing to do
1.579 + asm("pushfd ");
1.580 + asm("cli ");
1.581 + asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
1.582 + asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4800));
1.583 + asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
1.584 + asm("popfd ");
1.585 + asm("sri0: ");
1.586 + asm("ret ");
1.587 + }
1.588 +#endif
1.589 +
1.590 +extern "C" __NAKED__ void __fastcall send_ipi(TUint32)
1.591 + {
1.592 + asm("pushfd ");
1.593 + asm("cli ");
1.594 + asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
1.595 + asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4000));
1.596 + asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
1.597 + asm("popfd ");
1.598 + asm("ret ");
1.599 + }
1.600 +
1.601 +// Send a reschedule IPI to the current processor
1.602 +// *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
1.603 +extern "C" __NAKED__ void send_self_resched_ipi()
1.604 + {
1.605 + asm("pushfd ");
1.606 + asm("cli ");
1.607 + asm("xor ecx, ecx ");
1.608 + asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
1.609 + asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x44000)); // destination shorthand = self
1.610 + asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
1.611 + asm("popfd ");
1.612 + asm("ret ");
1.613 + }
1.614 +
1.615 +extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
1.616 + {
1.617 + asm("mov ecx, [esp+4] ");
1.618 + asm("pushfd ");
1.619 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(TSubScheduler, i_APICID));
1.620 + asm("cli ");
1.621 + asm("mov ds:[%0], edx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
1.622 + asm("mov eax, %0" : : "i" (TRANSFERRED_IRQ_VECTOR | 0x4000));
1.623 + asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
1.624 + asm("popfd ");
1.625 + asm("ret ");
1.626 + }
1.627 +