1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/x86/ncthrd.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,799 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkernsmp\x86\ncthrd.cia
1.18 +//
1.19 +//
1.20 +
1.21 +#include <x86.h>
1.22 +#include <apic.h>
1.23 +
1.24 +const TLinAddr NKern_Exit = (TLinAddr)NKern::Exit;
1.25 +//const TLinAddr NKern_Lock = (TLinAddr)NKern::Lock;
1.26 +
1.27 +extern "C" void send_resched_ipis(TUint32 aMask);
1.28 +extern "C" void __fastcall add_dfc(TDfc* aDfc);
1.29 +
1.30 +
1.31 +__NAKED__ void __StartThread()
1.32 + {
1.33 + // On entry interrupts disabled, SThreadExcStack on stack
1.34 + asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
1.35 + asm("add esp, 4 "); // get rid of iReason
1.36 + asm("shr eax, 24 ");
1.37 + asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
1.38 + asm("xor eax, eax ");
1.39 + asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
1.40 + asm("test eax, eax ");
1.41 + asm("jz short no_resched_ipis ");
1.42 + asm("push eax ");
1.43 + asm("call %a0" : : "i" (&send_resched_ipis));
1.44 + asm("add esp, 4 ");
1.45 + asm("no_resched_ipis: ");
1.46 + asm("pop ecx ");
1.47 + asm("pop edx ");
1.48 + asm("pop ebx ");
1.49 + asm("pop esi ");
1.50 + asm("pop edi ");
1.51 + asm("pop ebp ");
1.52 + asm("pop eax ");
1.53 + asm("pop ds ");
1.54 + asm("pop es ");
1.55 + asm("pop fs ");
1.56 + asm("pop gs ");
1.57 + asm("sti ");
1.58 + asm("push ebx ");
1.59 + asm("call eax ");
1.60 + asm("add esp, 4 ");
1.61 + asm("call %a0" : : "i" (NKern_Exit));
1.62 + }
1.63 +
1.64 +extern "C" __NAKED__ TUint __tr()
1.65 + {
1.66 + asm("xor eax, eax");
1.67 + asm("str ax");
1.68 + asm("ret");
1.69 + }
1.70 +
1.71 +__NAKED__ TUint32 X86::GetCR0()
1.72 + {
1.73 + asm("mov eax, cr0");
1.74 + asm("ret");
1.75 + }
1.76 +
1.77 +__NAKED__ void X86::SetCR0(TUint32)
1.78 + {
1.79 + asm("mov eax, [esp+4]");
1.80 + asm("mov cr0, eax");
1.81 + asm("ret");
1.82 + }
1.83 +
1.84 +__NAKED__ TUint32 X86::ModifyCR0(TUint32 /*clear*/, TUint32 /*set*/)
1.85 + {
1.86 + asm("mov ecx, [esp+4]");
1.87 + asm("mov edx, [esp+8]");
1.88 + asm("mov eax, cr0");
1.89 + asm("not ecx");
1.90 + asm("and ecx, eax");
1.91 + asm("or ecx, edx");
1.92 + asm("mov cr0, ecx");
1.93 + asm("ret");
1.94 + }
1.95 +
1.96 +/** Mark the beginning of an event handler tied to a thread or thread group
1.97 +
1.98 + Return the number of the CPU on which the event handler should run
1.99 +*/
1.100 +__NAKED__ TInt NSchedulable::BeginTiedEvent()
1.101 + {
1.102 + THISCALL_PROLOG0()
1.103 + asm("mov eax, 0x10000 "); // EEventCountInc
1.104 + asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState));
1.105 + asm("test eax, 0x8000 "); // EEventParent
1.106 + asm("jz short bte0 "); // not set so don't look at group
1.107 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent));
1.108 + asm("cmp edx, 0 ");
1.109 + asm("jz short bte_bad "); // no parent - shouldn't happen
1.110 + asm("cmp edx, ecx ");
1.111 + asm("jz short bte2 "); // parent not yet updated, use iNewParent
1.112 + asm("bte1: ");
1.113 + asm("mov eax, 0x10000 "); // EEventCountInc
1.114 + asm("lock xadd [edx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState));
1.115 + asm("bte0: ");
1.116 + asm("and eax, 0x1f "); // EEventCpuMask
1.117 + THISCALL_EPILOG0()
1.118 +
1.119 + asm("bte2: ");
1.120 + asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent
1.121 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent));
1.122 + asm("cmp edx, 0 ");
1.123 + asm("jnz short bte1 ");
1.124 + asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent
1.125 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set
1.126 + asm("cmp edx, ecx ");
1.127 + asm("jnz short bte1 "); // if iParent still not set, something is wrong
1.128 +
1.129 + asm("bte_bad: ");
1.130 + asm("int 0xff ");
1.131 + }
1.132 +
1.133 +
1.134 +/** Mark the end of an event handler tied to a thread or thread group
1.135 +
1.136 +*/
1.137 +__NAKED__ void NSchedulable::EndTiedEvent()
1.138 + {
1.139 + THISCALL_PROLOG0()
1.140 + asm("test dword ptr [ecx+%0], 0x800" : : "i" _FOFF(NSchedulable,iEventState)); // EEventParent
1.141 + asm("jnz short etep0 ");
1.142 + asm("ete1: ");
1.143 + asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
1.144 + asm("ete2: ");
1.145 + asm("mov edx, eax ");
1.146 + asm("sub edx, 0x10000 "); // EEventCountInc
1.147 + asm("cmp edx, 0x10000 "); // EEventCountInc
1.148 + asm("jae short ete3 ");
1.149 + asm("mov dl, dh ");
1.150 + asm("and dl, 0x1f "); // event cpu = thread cpu
1.151 + asm("ete3: ");
1.152 + asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
1.153 + asm("jne short ete2 ");
1.154 + asm("cmp edx, 0x10000 "); // EEventCountInc
1.155 + asm("jae short ete4 "); // If this wasn't last tied event, finish
1.156 + asm("test edx, 0x4000 "); // test deferred ready flag
1.157 + asm("jz short ete4 ");
1.158 + asm("push ecx ");
1.159 + asm("lea ecx, [ecx+%0]" : : "i" _FOFF(NSchedulable,i_IDfcMem));
1.160 + asm("call %a0" : : "i" (add_dfc));
1.161 + asm("pop ecx ");
1.162 + asm("ete4: ");
1.163 + THISCALL_EPILOG0()
1.164 +
1.165 + asm("etep0: ");
1.166 + asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after seeing parent flag set
1.167 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent));
1.168 + asm("cmp edx, 0 ");
1.169 + asm("jz short ete_bad "); // no parent - shouldn't happen
1.170 + asm("cmp edx, ecx ");
1.171 + asm("jz short etep1 "); // parent not yet updated, use iNewParent
1.172 + asm("etep2: ");
1.173 + asm("push ecx ");
1.174 + asm("mov ecx, edx ");
1.175 + asm("call ete1 "); // operate on parent state
1.176 + asm("pop ecx "); // restore this
1.177 +// mb();
1.178 + asm("mov eax, 0xffff0000 "); // -EEventCountInc
1.179 + asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState)); // decrement thread's event count
1.180 + THISCALL_EPILOG0()
1.181 +
1.182 + asm("etep1: ");
1.183 + asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent
1.184 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent));
1.185 + asm("cmp edx, 0 ");
1.186 + asm("jnz short etep2 ");
1.187 + asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent
1.188 + asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set
1.189 + asm("cmp edx, ecx ");
1.190 + asm("jnz short etep2 "); // if iParent still not set, something is wrong
1.191 +
1.192 + asm("ete_bad: ");
1.193 + asm("int 0xff ");
1.194 + }
1.195 +
1.196 +
1.197 +/** Check for concurrent tied events when a thread/group becomes ready
1.198 +
1.199 + This is only ever called on a lone thread or a group, not on a thread
1.200 + which is part of a group.
1.201 +
1.202 + Update the thread CPU field in iEventState
1.203 + If thread CPU != event CPU and event count nonzero, atomically
1.204 + set the ready deferred flag and return TRUE, else return FALSE.
1.205 + If event count zero, set event CPU = thread CPU atomically.
1.206 +
1.207 + @param aCpu the CPU on which the thread/group is to become ready
1.208 + @return TRUE if the ready must be deferred.
1.209 +*/
1.210 +__NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt aCpu)
1.211 + {
1.212 + THISCALL_PROLOG1()
1.213 + asm("push ebx ");
1.214 + asm("mov ebx, [esp+8] "); // ebx = aCpu
1.215 + asm("and ebx, 0x1f ");
1.216 + asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
1.217 + asm("teri1: ");
1.218 + asm("mov edx, eax ");
1.219 + asm("and dh, 0xe0 ");
1.220 + asm("or dh, bl "); // set thread CPU field
1.221 + asm("cmp edx, 0x10000 "); // EEventCountInc
1.222 + asm("jb short teri2 "); // skip if event count zero
1.223 + asm("cmp dl, bl "); // thread CPU = event CPU?
1.224 + asm("je short teri3 "); // skip if same
1.225 + asm("or edx, 0x4000 "); // EDeferredReady
1.226 + asm("jmp short teri3 ");
1.227 + asm("teri2: ");
1.228 + asm("mov dl, dh ");
1.229 + asm("and dl, 0x1f "); // event CPU = thread CPU
1.230 + asm("teri3: ");
1.231 + asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
1.232 + asm("jne short teri1 ");
1.233 + asm("xor eax, edx "); // old iEventState ^ new iEventState
1.234 + asm("pop ebx ");
1.235 + asm("and eax, 0x4000 "); // return TRUE if EDeferredReady was set
1.236 + THISCALL_EPILOG1()
1.237 + }
1.238 +
1.239 +
1.240 +/** Check for concurrent tied events when a thread leaves a group
1.241 +
1.242 + If event count zero, atomically set the event and thread CPUs to the
1.243 + current CPU, clear the parent flag and return TRUE, else return FALSE.
1.244 +
1.245 + @return TRUE if the parent flag has been cleared
1.246 +*/
1.247 +__NAKED__ TBool NThreadBase::TiedEventLeaveInterlock()
1.248 + {
1.249 + THISCALL_PROLOG0()
1.250 + asm("push ebx ");
1.251 + asm("xor ebx, ebx ");
1.252 + asm("str bx ");
1.253 + asm("sub bl, 0x28 ");
1.254 + asm("shr bl, 3 ");
1.255 + asm("mov bh, bl ");
1.256 + asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
1.257 + asm("teli1: ");
1.258 + asm("cmp eax, 0x10000 "); // EEventCountInc
1.259 + asm("jae short teli0 "); // if count >=1, finish and return FALSE
1.260 + asm("mov edx, ebx "); // update CPUs, clear parent flag
1.261 + // NOTE: Deferred ready flag must have been clear since thread is running
1.262 + asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
1.263 + asm("jne short teli1 ");
1.264 + asm("pop ebx ");
1.265 + asm("mov eax, 1 "); // return TRUE
1.266 + THISCALL_EPILOG0()
1.267 + asm("teli0: ");
1.268 + asm("pop ebx ");
1.269 + asm("xor eax, eax "); // return FALSE
1.270 + THISCALL_EPILOG0()
1.271 + }
1.272 +
1.273 +
1.274 +/** Check for concurrent tied events when a thread joins a group
1.275 +
1.276 + If event count zero, atomically set the parent flag and return TRUE,
1.277 + else return FALSE.
1.278 +
1.279 + @return TRUE if the parent flag has been set
1.280 +*/
1.281 +__NAKED__ TBool NThreadBase::TiedEventJoinInterlock()
1.282 + {
1.283 + THISCALL_PROLOG0()
1.284 + asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
1.285 + asm("teji1: ");
1.286 + asm("cmp eax, 0x10000 "); // EEventCountInc
1.287 + asm("jae short teji0 "); // if count >=1, finish and return FALSE
1.288 + asm("mov edx, eax ");
1.289 + asm("or edx, 0x8000 "); // set parent flag
1.290 + asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
1.291 + asm("jne short teji1 ");
1.292 + asm("mov eax, 1 "); // return TRUE
1.293 + THISCALL_EPILOG0()
1.294 + asm("teji0: ");
1.295 + asm("xor eax, eax "); // return FALSE
1.296 + THISCALL_EPILOG0()
1.297 + }
1.298 +
1.299 +
1.300 +/** Decrement a fast semaphore count
1.301 +
1.302 + If count > 0, decrement and do memory barrier
1.303 + If count = 0, set equal to (thread>>2)|0x80000000
1.304 + Return original count
1.305 +*/
1.306 +__NAKED__ TInt NFastSemaphore::Dec(NThreadBase*)
1.307 + {
1.308 + THISCALL_PROLOG1()
1.309 + asm("mov eax, [ecx]");
1.310 + asm("fsdec:");
1.311 + asm("mov edx, eax");
1.312 + asm("dec edx");
1.313 + asm("jns short fsdec1");
1.314 + asm("mov edx, [esp+4]");
1.315 + asm("shr edx, 2");
1.316 + asm("or edx, 0x80000000");
1.317 + asm("fsdec1:");
1.318 + asm("lock cmpxchg [ecx], edx");
1.319 + asm("jne short fsdec");
1.320 + THISCALL_EPILOG1()
1.321 + }
1.322 +
1.323 +/** Increment a fast semaphore count
1.324 +
1.325 + Do memory barrier
1.326 + If iCount >= 0, increment by aCount and return 0
1.327 + If iCount < 0, set count equal to aCount-1 and return (original count << 2)
1.328 +*/
1.329 +__NAKED__ NThreadBase* NFastSemaphore::Inc(TInt)
1.330 + {
1.331 + THISCALL_PROLOG1()
1.332 + asm("mov eax, [ecx]");
1.333 + asm("fsinc:");
1.334 + asm("mov edx, [esp+4]");
1.335 + asm("test eax, eax");
1.336 + asm("js short fsinc1");
1.337 + asm("lea edx, [edx+eax+1]");
1.338 + asm("fsinc1:");
1.339 + asm("dec edx");
1.340 + asm("lock cmpxchg [ecx], edx");
1.341 + asm("jne short fsinc");
1.342 + asm("add eax, eax");
1.343 + asm("jc short fsinc2");
1.344 + asm("xor eax, eax");
1.345 + asm("fsinc2:");
1.346 + asm("add eax, eax");
1.347 + THISCALL_EPILOG1()
1.348 + }
1.349 +
1.350 +/** Reset a fast semaphore count
1.351 +
1.352 + Do memory barrier
1.353 + If iCount >= 0, set iCount=0 and return 0
1.354 + If iCount < 0, set iCount=0 and return (original count << 2)
1.355 +*/
1.356 +__NAKED__ NThreadBase* NFastSemaphore::DoReset()
1.357 + {
1.358 + THISCALL_PROLOG0()
1.359 + asm("xor eax, eax");
1.360 + asm("lock xchg eax, [ecx]");
1.361 + asm("add eax, eax");
1.362 + asm("jc short fsrst0");
1.363 + asm("xor eax, eax");
1.364 + asm("fsrst0:");
1.365 + asm("add eax, eax");
1.366 + THISCALL_EPILOG0()
1.367 + }
1.368 +
1.369 +/** Check whether a thread holds a fast mutex.
1.370 + If so set the mutex contention flag and return TRUE, else return FALSE.
1.371 +
1.372 + Called with kernel lock held
1.373 +
1.374 + @internalComponent
1.375 + */
1.376 +__NAKED__ TBool NThreadBase::CheckFastMutexDefer()
1.377 + {
1.378 + THISCALL_PROLOG0()
1.379 + asm("mov eax, [ecx+%0]": :"i"_FOFF(NThreadBase, iHeldFastMutex));
1.380 + asm("mov edx, 0xfffffffc");
1.381 + asm("and edx, eax"); // edx points to mutex if any, eax bit 0 = flag
1.382 + asm("jnz short checkfmd1");
1.383 + asm("xor eax, eax"); // no mutex - return FALSE
1.384 + THISCALL_EPILOG0()
1.385 +
1.386 + // iHeldFastMutex points to a mutex
1.387 + asm("checkfmd1:");
1.388 + asm("test al, 1");
1.389 + asm("jz short checkfmd2");
1.390 +
1.391 + // mutex being released
1.392 + asm("mov eax, ecx");
1.393 + asm("inc ecx");
1.394 + asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1 ...
1.395 + asm("jz short checkfmd3"); // ... and return TRUE
1.396 + asm("cmp eax, ecx"); // otherwise check if contention flag already set
1.397 + asm("jz short checkfmd3"); // if so return TRUE
1.398 + asm("xor eax, eax");
1.399 + asm("dec ecx");
1.400 + asm("mov [ecx+%0], eax": :"i"_FOFF(NThreadBase, iHeldFastMutex)); // else already released, so set iHeldFastMutex=0
1.401 + THISCALL_EPILOG0() // and return FALSE
1.402 +
1.403 + // mutex being acquired or has been acquired
1.404 + // if it has been acquired set the contention flag and return TRUE, else return FALSE
1.405 + asm("checkfmd2:");
1.406 + asm("mov eax, ecx");
1.407 + asm("inc ecx");
1.408 + asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1
1.409 + asm("jz short checkfmd3"); // ... and return TRUE
1.410 + asm("cmp eax, ecx"); // otherwise check if contention flag already set
1.411 + asm("jz short checkfmd3"); // if so return TRUE
1.412 + asm("xor eax, eax");
1.413 + THISCALL_EPILOG0() // else return FALSE
1.414 +
1.415 + asm("checkfmd3:");
1.416 + asm("mov eax, 1"); // return TRUE
1.417 + THISCALL_EPILOG0()
1.418 + }
1.419 +
1.420 +
1.421 +/** Transition the state of an IDFC or DFC when Add() is called
1.422 +
1.423 + 0000->008n, 00Cn->00En, all other states unchanged
1.424 + Return original state.
1.425 +
1.426 + Enter and return with interrupts disabled.
1.427 +*/
1.428 +__NAKED__ TUint32 TDfc::AddStateChange()
1.429 + {
1.430 + THISCALL_PROLOG0()
1.431 + asm("xor eax, eax ");
1.432 + asm("mov ax, [ecx+10] ");
1.433 + asm("ascr: ");
1.434 + asm("mov edx, eax ");
1.435 + asm("test eax, eax ");
1.436 + asm("jne short asc1 ");
1.437 + asm("str dx ");
1.438 + asm("shr dl, 3 "); // dl = current CPU number + 5
1.439 + asm("add dl, 0x7b "); // 0000->008n
1.440 + asm("jmp short asc0 ");
1.441 + asm("asc1: ");
1.442 + asm("cmp eax, 0xE0 ");
1.443 + asm("jae short asc0 "); // if outside range 00C0-00DF leave alone
1.444 + asm("cmp eax, 0xC0 ");
1.445 + asm("jb short asc0 ");
1.446 + asm("add dl, 0x20 "); // 00Cn->00En
1.447 + asm("asc0: ");
1.448 + asm("lock cmpxchg [ecx+10], dx ");
1.449 + asm("jne short ascr ");
1.450 + THISCALL_EPILOG0()
1.451 + }
1.452 +
1.453 +/** Transition the state of an IDFC just before running it.
1.454 +
1.455 + 002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000
1.456 + other initial states invalid
1.457 + Return original state
1.458 +
1.459 + Enter and return with interrupts disabled.
1.460 +*/
1.461 +__NAKED__ TUint32 TDfc::RunIDFCStateChange()
1.462 + {
1.463 + THISCALL_PROLOG0()
1.464 + asm("xor eax, eax ");
1.465 + asm("mov ax, [ecx+10] ");
1.466 + asm("risr: ");
1.467 + asm("cmp ah, 0 ");
1.468 + asm("jne short ris1 ");
1.469 + asm("mov edx, eax ");
1.470 + asm("and dl, 0xfe ");
1.471 + asm("cmp dl, 0x20 ");
1.472 + asm("je short ris2 "); // 002g
1.473 + asm("mov edx, eax ");
1.474 + asm("cmp dl, 0xc0 ");
1.475 + asm("jge short ris_bad "); // not 80-BF
1.476 + asm("and dl, 0x1f ");
1.477 +
1.478 +asm("push ebx ");
1.479 +asm("str bx ");
1.480 +asm("sub bl, 0x28 ");
1.481 +asm("shr bl, 3 ");
1.482 +asm("cmp bl, dl ");
1.483 +asm("pop ebx ");
1.484 +asm("jne short ris_bad ");
1.485 +
1.486 + asm("or dl, 0xc0 "); // 008n->00Cn, 00An->00Cn
1.487 + asm("jmp short ris0 ");
1.488 + asm("ris_bad: ");
1.489 + asm("int 0xff "); // DIE
1.490 + asm("ris2: ");
1.491 +asm("mov edx, eax ");
1.492 +asm("xor dl, 0x21 ");
1.493 +asm("cmp dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
1.494 +asm("jne short ris_bad ");
1.495 + asm("str dx ");
1.496 + asm("shr dl, 3 "); // dl = current CPU number + 5
1.497 + asm("add dl, 0xbb "); // 002g->00Cn
1.498 + asm("jmp short ris0 ");
1.499 + asm("ris1: ");
1.500 + asm("xor edx, edx ");
1.501 + asm("cmp al, 0 ");
1.502 + asm("je short ris0 "); // XX00->0000
1.503 +asm("str dx ");
1.504 +asm("sub dl, 0x28 ");
1.505 +asm("shr dl, 3 ");
1.506 +asm("xor dl, al ");
1.507 +asm("and dl, 0x1f ");
1.508 +asm("jne short ris_bad ");
1.509 +asm("xor edx, edx ");
1.510 + asm("mov dh, ah "); // XXYY->XX00
1.511 + asm("ris0: ");
1.512 + asm("lock cmpxchg [ecx+10], dx ");
1.513 + asm("jne short risr ");
1.514 + THISCALL_EPILOG0()
1.515 + }
1.516 +
1.517 +/** Transition the state of an IDFC just after running it.
1.518 +
1.519 + First swap aS->iCurrentIDFC with 0
1.520 + If original value != this, return 0xFFFFFFFF and don't touch *this
1.521 + Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000
1.522 + other initial states invalid
1.523 + Return original state
1.524 +
1.525 + Enter and return with interrupts disabled.
1.526 +*/
1.527 +__NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/)
1.528 + {
1.529 + THISCALL_PROLOG1()
1.530 + asm("mov edx, [esp+4] "); // edx = aS
1.531 + asm("xor eax, eax ");
1.532 + asm("lock xchg eax, [edx+%0]" : : "i" _FOFF(TSubScheduler,iCurrentIDFC)); // swap aS->iCurrentIDFC with 0
1.533 + asm("xor eax, ecx "); // if aS->iCurrentIDFC==this originally, eax=0
1.534 + asm("jne short eis9 "); // else bail out
1.535 + asm("mov ax, [ecx+10] ");
1.536 + asm("eisr: ");
1.537 + asm("xor edx, edx ");
1.538 + asm("cmp al, 0 ");
1.539 + asm("je short eis0 "); // XX00->0000
1.540 + asm("cmp al, 0x60 ");
1.541 + asm("jb short eis_bad "); // bad if < 60
1.542 + asm("cmp al, 0xC0 ");
1.543 + asm("jl short eis_bad "); // bad if 80-BF
1.544 +asm("str dx ");
1.545 +asm("sub dl, 0x28 ");
1.546 +asm("shr dl, 3 ");
1.547 +asm("xor dl, al ");
1.548 +asm("and dl, 0x1f ");
1.549 +asm("jne short eis_bad ");
1.550 +asm("xor edx, edx ");
1.551 + asm("cmp ah, 0 ");
1.552 + asm("je short eis1 ");
1.553 + asm("mov dh, ah "); // XX6n->XX00, XXCn->XX00, XXEn->XX00
1.554 + asm("jmp short eis0 ");
1.555 + asm("eis1: ");
1.556 + asm("cmp al, 0xE0 ");
1.557 + asm("jl short eis0 "); // 00Cn->0000
1.558 + asm("mov dl, al ");
1.559 + asm("jb short eis0 "); // 006n->006n
1.560 + asm("sub dl, 0x60 "); // 00En->008n
1.561 + asm("eis0: ");
1.562 + asm("lock cmpxchg [ecx+10], dx ");
1.563 + asm("jne short eisr ");
1.564 + THISCALL_EPILOG1()
1.565 + asm("eis9: ");
1.566 + asm("mov eax, 0xffffffff ");
1.567 + THISCALL_EPILOG1()
1.568 + asm("eis_bad: ");
1.569 + asm("int 0xff ");
1.570 + }
1.571 +
1.572 +/** Transition the state of an IDFC just after running it.
1.573 +
1.574 + 006n->002g where g = TheScheduler.iIdleGeneration
1.575 + XX6n->XX00
1.576 + other initial states invalid
1.577 + Return original state
1.578 +
1.579 + Enter and return with interrupts disabled.
1.580 +*/
1.581 +__NAKED__ TUint32 TDfc::EndIDFCStateChange2()
1.582 + {
1.583 + THISCALL_PROLOG0()
1.584 + asm("xor eax, eax ");
1.585 + asm("mov ax, [ecx+10] ");
1.586 + asm("eis2r: ");
1.587 + asm("xor edx, edx ");
1.588 + asm("cmp al, 0x60 ");
1.589 + asm("jl short eis2_bad "); // if not 006n or XX6n, invalid
1.590 +asm("str dx ");
1.591 +asm("sub dl, 0x28 ");
1.592 +asm("shr dl, 3 ");
1.593 +asm("xor dl, al ");
1.594 +asm("and dl, 0x1f ");
1.595 +asm("jne short eis2_bad ");
1.596 +asm("xor edx, edx ");
1.597 + asm("or dh, ah ");
1.598 + asm("jne short eis20 "); // XX6n->XX00
1.599 + asm("mov edx, 0x20 ");
1.600 + asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
1.601 + asm("eis20: ");
1.602 + asm("lock cmpxchg [ecx+10], dx ");
1.603 + asm("jne short eis2r ");
1.604 + THISCALL_EPILOG0()
1.605 + asm("eis2_bad: ");
1.606 + asm("int 0xff ");
1.607 + }
1.608 +
1.609 +/** Transition the state of a DFC just before moving it from the IDFC queue to
1.610 + its final queue.
1.611 +
1.612 + 002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000
1.613 + other initial states invalid
1.614 + Return original state
1.615 +*/
1.616 +__NAKED__ TUint32 TDfc::MoveToFinalQStateChange()
1.617 + {
1.618 + THISCALL_PROLOG0()
1.619 + asm("xor eax, eax ");
1.620 + asm("mov ax, [ecx+10] ");
1.621 + asm("mfqr: ");
1.622 + asm("xor edx, edx ");
1.623 + asm("cmp al, 0xa0 ");
1.624 + asm("jl short mfq1a "); // 80-9F ok
1.625 + asm("cmp al, 0x20 ");
1.626 + asm("je short mfq1 "); // 20 ok
1.627 + asm("cmp al, 0x21 ");
1.628 + asm("je short mfq1 "); // 21 ok
1.629 + asm("cmp eax, 0 ");
1.630 + asm("je short mfq_bad "); // 0000 -> bad
1.631 + asm("cmp al, 0 "); // XX00 ok
1.632 + asm("je short mfq0 "); // XX00->0000
1.633 + asm("jmp short mfq_bad "); // not 002g, 008n, XX2g, XX8n, XX00
1.634 +asm("mfq1a: ");
1.635 +asm("str dx ");
1.636 +asm("sub dl, 0x28 ");
1.637 +asm("shr dl, 3 ");
1.638 +asm("xor dl, al ");
1.639 +asm("and dl, 0x1f ");
1.640 +asm("jne short mfq_bad ");
1.641 +asm("xor edx, edx ");
1.642 + asm("mfq1: ");
1.643 + asm("cmp ah, 0 ");
1.644 + asm("jne short mfq2 ");
1.645 + asm("mov dl, 1 ");
1.646 + asm("jmp short mfq0 "); // 002g->0001, 008n->0001
1.647 + asm("mfq2: ");
1.648 + asm("mov dh, ah "); // XXYY->XX00
1.649 + asm("mfq0: ");
1.650 + asm("lock cmpxchg [ecx+10], dx ");
1.651 + asm("jne short mfqr ");
1.652 + THISCALL_EPILOG0()
1.653 + asm("mfq_bad: ");
1.654 + asm("int 0xff ");
1.655 + }
1.656 +
1.657 +/** Transition the state of an IDFC when transferring it to another CPU
1.658 +
1.659 + 002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000
1.660 + other initial states invalid
1.661 + Return original state
1.662 +
1.663 + Enter and return with interrupts disabled and target CPU's ExIDfcLock held.
1.664 +*/
1.665 +__NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/)
1.666 + {
1.667 + THISCALL_PROLOG1()
1.668 + asm("xor eax, eax ");
1.669 + asm("mov ax, [ecx+10] ");
1.670 + asm("tisr: ");
1.671 + asm("xor edx, edx ");
1.672 + asm("cmp al, 0xa0 ");
1.673 + asm("jl short tis1a "); // 80-9F ok
1.674 + asm("cmp al, 0x20 ");
1.675 + asm("je short tis1 "); // 20 ok
1.676 + asm("cmp al, 0x21 ");
1.677 +asm("je short tis1 "); // 21 ok
1.678 + asm("jne short tis_bad "); // not 002g or 008n -> bad
1.679 +asm("tis1a: ");
1.680 +asm("str dx ");
1.681 +asm("sub dl, 0x28 ");
1.682 +asm("shr dl, 3 ");
1.683 +asm("xor dl, al ");
1.684 +asm("and dl, 0x1f ");
1.685 +asm("jne short tis_bad ");
1.686 +asm("xor edx, edx ");
1.687 + asm("tis1: ");
1.688 + asm("cmp ah, 0 ");
1.689 + asm("jne short tis2 ");
1.690 + asm("mov dl, [esp+4] ");
1.691 + asm("or dl, 0xA0 ");
1.692 + asm("jmp short tis0 "); // 002g->00Am, 008n->00Am
1.693 + asm("tis2: ");
1.694 + asm("cmp al, 0 ");
1.695 + asm("je short tis0 "); // XX00->0000
1.696 + asm("mov dh, ah "); // XXYY->XX00
1.697 + asm("tis0: ");
1.698 + asm("lock cmpxchg [ecx+10], dx ");
1.699 + asm("jne short tisr ");
1.700 + THISCALL_EPILOG1()
1.701 + asm("tis_bad: ");
1.702 + asm("int 0xff ");
1.703 + }
1.704 +
1.705 +/** Transition the state of an IDFC/DFC just before cancelling it.
1.706 +
1.707 + 0000->0000, XX00->ZZ00, xxYY->zzYY
1.708 + Return original state
1.709 +
1.710 + Enter and return with interrupts disabled.
1.711 +*/
1.712 +__NAKED__ TUint32 TDfc::CancelInitialStateChange()
1.713 + {
1.714 + THISCALL_PROLOG0()
1.715 + asm("push ebx ");
1.716 + asm("str bx ");
1.717 + asm("shr bl, 3 ");
1.718 + asm("add bl, 3 "); // bl = current cpu number + 8
1.719 + asm("xor eax, eax ");
1.720 + asm("mov ax, [ecx+10] ");
1.721 + asm("cisr: ");
1.722 + asm("mov edx, eax ");
1.723 + asm("test eax, eax ");
1.724 + asm("je short cis0 "); // 0000->0000
1.725 + asm("bts edx, ebx "); // XX00->ZZ00, xxYY->zzYY
1.726 + asm("cis0: ");
1.727 + asm("lock cmpxchg [ecx+10], dx ");
1.728 + asm("jne short cisr ");
1.729 + asm("pop ebx ");
1.730 + THISCALL_EPILOG0()
1.731 + }
1.732 +
1.733 +/** Transition the state of an IDFC/DFC at the end of a cancel operation
1.734 +
1.735 + XXYY->XX00, XX00->0000
1.736 + Return original state
1.737 +
1.738 + Enter and return with interrupts disabled.
1.739 +*/
1.740 +__NAKED__ TUint32 TDfc::CancelFinalStateChange()
1.741 + {
1.742 + THISCALL_PROLOG0()
1.743 + asm("xor eax, eax ");
1.744 + asm("mov ax, [ecx+10] ");
1.745 + asm("cfsr: ");
1.746 + asm("xor edx, edx ");
1.747 + asm("cmp al, 0 ");
1.748 + asm("je short cfs0 "); // XX00->0000
1.749 + asm("mov dh, ah "); // XXYY->XX00
1.750 + asm("cfs0: ");
1.751 + asm("lock cmpxchg [ecx+10], dx ");
1.752 + asm("jne short cfsr ");
1.753 + THISCALL_EPILOG0()
1.754 + }
1.755 +
1.756 +/** Transition the state of an IDFC or DFC when QueueOnIdle() is called
1.757 +
1.758 + 0000->002g where g = TheScheduler.iIdleGeneration,
1.759 + 00Cn->006n, all other states unchanged
1.760 + Return original state.
1.761 +
1.762 + Enter and return with interrupts disabled and IdleSpinLock held.
1.763 +*/
1.764 +__NAKED__ TUint32 TDfc::QueueOnIdleStateChange()
1.765 + {
1.766 + THISCALL_PROLOG0()
1.767 + asm("xor eax, eax ");
1.768 + asm("mov ax, [ecx+10] ");
1.769 + asm("qisr: ");
1.770 + asm("mov edx, eax ");
1.771 + asm("test eax, eax ");
1.772 + asm("jne short qis1 ");
1.773 + asm("mov edx, 0x20 ");
1.774 + asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
1.775 + asm("jmp short qis0 ");
1.776 + asm("qis1: ");
1.777 + asm("cmp eax, 0xE0 ");
1.778 + asm("jae short qis0 "); // if outside range 00C0-00DF leave alone
1.779 + asm("cmp eax, 0xC0 ");
1.780 + asm("jb short qis0 ");
1.781 + asm("sub dl, 0x60 "); // 00Cn->006n
1.782 + asm("qis0: ");
1.783 + asm("lock cmpxchg [ecx+10], dx ");
1.784 + asm("jne short qisr ");
1.785 + THISCALL_EPILOG0()
1.786 + }
1.787 +
1.788 +
1.789 +__NAKED__ void TDfc::ResetState()
1.790 + {
1.791 + THISCALL_PROLOG0()
1.792 + asm("xor eax, eax ");
1.793 + asm("lock xchg ax, [ecx+10] ");
1.794 + asm("cmp eax, 0 ");
1.795 + asm("je short rst_bad ");
1.796 + THISCALL_EPILOG0()
1.797 + asm("rst_bad: ");
1.798 + asm("int 0xf8 ");
1.799 + }
1.800 +
1.801 +
1.802 +