Update contrib.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\x86\ncthrd.cia
21 const TLinAddr NKern_Exit = (TLinAddr)NKern::Exit;
22 //const TLinAddr NKern_Lock = (TLinAddr)NKern::Lock;
24 extern "C" void send_resched_ipis(TUint32 aMask);
25 extern "C" void __fastcall add_dfc(TDfc* aDfc);
28 __NAKED__ void __StartThread()
30 // On entry interrupts disabled, SThreadExcStack on stack
31 asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
32 asm("add esp, 4 "); // get rid of iReason
34 asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
36 asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
37 asm("test eax, eax ");
38 asm("jz short no_resched_ipis ");
40 asm("call %a0" : : "i" (&send_resched_ipis));
42 asm("no_resched_ipis: ");
58 asm("call %a0" : : "i" (NKern_Exit));
61 extern "C" __NAKED__ TUint __tr()
68 __NAKED__ TUint32 X86::GetCR0()
74 __NAKED__ void X86::SetCR0(TUint32)
76 asm("mov eax, [esp+4]");
81 __NAKED__ TUint32 X86::ModifyCR0(TUint32 /*clear*/, TUint32 /*set*/)
83 asm("mov ecx, [esp+4]");
84 asm("mov edx, [esp+8]");
93 /** Mark the beginning of an event handler tied to a thread or thread group
95 Return the number of the CPU on which the event handler should run
97 __NAKED__ TInt NSchedulable::BeginTiedEvent()
100 asm("mov eax, 0x10000 "); // EEventCountInc
101 asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState));
102 asm("test eax, 0x8000 "); // EEventParent
103 asm("jz short bte0 "); // not set so don't look at group
104 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent));
106 asm("jz short bte_bad "); // no parent - shouldn't happen
107 asm("cmp edx, ecx ");
108 asm("jz short bte2 "); // parent not yet updated, use iNewParent
110 asm("mov eax, 0x10000 "); // EEventCountInc
111 asm("lock xadd [edx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState));
113 asm("and eax, 0x1f "); // EEventCpuMask
117 asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent
118 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent));
120 asm("jnz short bte1 ");
121 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent
122 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set
123 asm("cmp edx, ecx ");
124 asm("jnz short bte1 "); // if iParent still not set, something is wrong
131 /** Mark the end of an event handler tied to a thread or thread group
134 __NAKED__ void NSchedulable::EndTiedEvent()
137 asm("test dword ptr [ecx+%0], 0x800" : : "i" _FOFF(NSchedulable,iEventState)); // EEventParent
138 asm("jnz short etep0 ");
140 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
142 asm("mov edx, eax ");
143 asm("sub edx, 0x10000 "); // EEventCountInc
144 asm("cmp edx, 0x10000 "); // EEventCountInc
145 asm("jae short ete3 ");
147 asm("and dl, 0x1f "); // event cpu = thread cpu
149 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
150 asm("jne short ete2 ");
151 asm("cmp edx, 0x10000 "); // EEventCountInc
152 asm("jae short ete4 "); // If this wasn't last tied event, finish
153 asm("test edx, 0x4000 "); // test deferred ready flag
154 asm("jz short ete4 ");
156 asm("lea ecx, [ecx+%0]" : : "i" _FOFF(NSchedulable,i_IDfcMem));
157 asm("call %a0" : : "i" (add_dfc));
163 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after seeing parent flag set
164 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent));
166 asm("jz short ete_bad "); // no parent - shouldn't happen
167 asm("cmp edx, ecx ");
168 asm("jz short etep1 "); // parent not yet updated, use iNewParent
171 asm("mov ecx, edx ");
172 asm("call ete1 "); // operate on parent state
173 asm("pop ecx "); // restore this
175 asm("mov eax, 0xffff0000 "); // -EEventCountInc
176 asm("lock xadd [ecx+%0], eax" : : "i" _FOFF(NSchedulable,iEventState)); // decrement thread's event count
180 asm("lock add dword ptr [esp], 0 "); // make sure iNewParent is read after iParent
181 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NThreadBase,iNewParent));
183 asm("jnz short etep2 ");
184 asm("lock add dword ptr [esp], 0 "); // make sure iParent is read after iNewParent
185 asm("mov edx, [ecx+%0]" : : "i" _FOFF(NSchedulable,iParent)); // iNewParent has been cleared, so iParent must now have been set
186 asm("cmp edx, ecx ");
187 asm("jnz short etep2 "); // if iParent still not set, something is wrong
194 /** Check for concurrent tied events when a thread/group becomes ready
196 This is only ever called on a lone thread or a group, not on a thread
197 which is part of a group.
199 Update the thread CPU field in iEventState
200 If thread CPU != event CPU and event count nonzero, atomically
201 set the ready deferred flag and return TRUE, else return FALSE.
202 If event count zero, set event CPU = thread CPU atomically.
204 @param aCpu the CPU on which the thread/group is to become ready
205 @return TRUE if the ready must be deferred.
207 __NAKED__ TBool NSchedulable::TiedEventReadyInterlock(TInt aCpu)
211 asm("mov ebx, [esp+8] "); // ebx = aCpu
212 asm("and ebx, 0x1f ");
213 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
215 asm("mov edx, eax ");
216 asm("and dh, 0xe0 ");
217 asm("or dh, bl "); // set thread CPU field
218 asm("cmp edx, 0x10000 "); // EEventCountInc
219 asm("jb short teri2 "); // skip if event count zero
220 asm("cmp dl, bl "); // thread CPU = event CPU?
221 asm("je short teri3 "); // skip if same
222 asm("or edx, 0x4000 "); // EDeferredReady
223 asm("jmp short teri3 ");
226 asm("and dl, 0x1f "); // event CPU = thread CPU
228 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
229 asm("jne short teri1 ");
230 asm("xor eax, edx "); // old iEventState ^ new iEventState
232 asm("and eax, 0x4000 "); // return TRUE if EDeferredReady was set
237 /** Check for concurrent tied events when a thread leaves a group
239 If event count zero, atomically set the event and thread CPUs to the
240 current CPU, clear the parent flag and return TRUE, else return FALSE.
242 @return TRUE if the parent flag has been cleared
244 __NAKED__ TBool NThreadBase::TiedEventLeaveInterlock()
248 asm("xor ebx, ebx ");
250 asm("sub bl, 0x28 ");
253 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
255 asm("cmp eax, 0x10000 "); // EEventCountInc
256 asm("jae short teli0 "); // if count >=1, finish and return FALSE
257 asm("mov edx, ebx "); // update CPUs, clear parent flag
258 // NOTE: Deferred ready flag must have been clear since thread is running
259 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
260 asm("jne short teli1 ");
262 asm("mov eax, 1 "); // return TRUE
266 asm("xor eax, eax "); // return FALSE
271 /** Check for concurrent tied events when a thread joins a group
273 If event count zero, atomically set the parent flag and return TRUE,
276 @return TRUE if the parent flag has been set
278 __NAKED__ TBool NThreadBase::TiedEventJoinInterlock()
281 asm("mov eax, [ecx+%0]" : : "i" _FOFF(NSchedulable,iEventState));
283 asm("cmp eax, 0x10000 "); // EEventCountInc
284 asm("jae short teji0 "); // if count >=1, finish and return FALSE
285 asm("mov edx, eax ");
286 asm("or edx, 0x8000 "); // set parent flag
287 asm("lock cmpxchg [ecx+%0], edx" : : "i" _FOFF(NSchedulable,iEventState));
288 asm("jne short teji1 ");
289 asm("mov eax, 1 "); // return TRUE
292 asm("xor eax, eax "); // return FALSE
297 /** Decrement a fast semaphore count
299 If count > 0, decrement and do memory barrier
300 If count = 0, set equal to (thread>>2)|0x80000000
301 Return original count
303 __NAKED__ TInt NFastSemaphore::Dec(NThreadBase*)
306 asm("mov eax, [ecx]");
310 asm("jns short fsdec1");
311 asm("mov edx, [esp+4]");
313 asm("or edx, 0x80000000");
315 asm("lock cmpxchg [ecx], edx");
316 asm("jne short fsdec");
320 /** Increment a fast semaphore count
323 If iCount >= 0, increment by aCount and return 0
324 If iCount < 0, set count equal to aCount-1 and return (original count << 2)
326 __NAKED__ NThreadBase* NFastSemaphore::Inc(TInt)
329 asm("mov eax, [ecx]");
331 asm("mov edx, [esp+4]");
332 asm("test eax, eax");
333 asm("js short fsinc1");
334 asm("lea edx, [edx+eax+1]");
337 asm("lock cmpxchg [ecx], edx");
338 asm("jne short fsinc");
340 asm("jc short fsinc2");
347 /** Reset a fast semaphore count
350 If iCount >= 0, set iCount=0 and return 0
351 If iCount < 0, set iCount=0 and return (original count << 2)
353 __NAKED__ NThreadBase* NFastSemaphore::DoReset()
357 asm("lock xchg eax, [ecx]");
359 asm("jc short fsrst0");
366 /** Check whether a thread holds a fast mutex.
367 If so set the mutex contention flag and return TRUE, else return FALSE.
369 Called with kernel lock held
373 __NAKED__ TBool NThreadBase::CheckFastMutexDefer()
376 asm("mov eax, [ecx+%0]": :"i"_FOFF(NThreadBase, iHeldFastMutex));
377 asm("mov edx, 0xfffffffc");
378 asm("and edx, eax"); // edx points to mutex if any, eax bit 0 = flag
379 asm("jnz short checkfmd1");
380 asm("xor eax, eax"); // no mutex - return FALSE
383 // iHeldFastMutex points to a mutex
386 asm("jz short checkfmd2");
388 // mutex being released
391 asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1 ...
392 asm("jz short checkfmd3"); // ... and return TRUE
393 asm("cmp eax, ecx"); // otherwise check if contention flag already set
394 asm("jz short checkfmd3"); // if so return TRUE
397 asm("mov [ecx+%0], eax": :"i"_FOFF(NThreadBase, iHeldFastMutex)); // else already released, so set iHeldFastMutex=0
398 THISCALL_EPILOG0() // and return FALSE
400 // mutex being acquired or has been acquired
401 // if it has been acquired set the contention flag and return TRUE, else return FALSE
405 asm("lock cmpxchg [edx], ecx"); // if m->iHoldingThread==this, set m->iHoldingThread = this+1
406 asm("jz short checkfmd3"); // ... and return TRUE
407 asm("cmp eax, ecx"); // otherwise check if contention flag already set
408 asm("jz short checkfmd3"); // if so return TRUE
410 THISCALL_EPILOG0() // else return FALSE
413 asm("mov eax, 1"); // return TRUE
418 /** Transition the state of an IDFC or DFC when Add() is called
420 0000->008n, 00Cn->00En, all other states unchanged
421 Return original state.
423 Enter and return with interrupts disabled.
425 __NAKED__ TUint32 TDfc::AddStateChange()
428 asm("xor eax, eax ");
429 asm("mov ax, [ecx+10] ");
431 asm("mov edx, eax ");
432 asm("test eax, eax ");
433 asm("jne short asc1 ");
435 asm("shr dl, 3 "); // dl = current CPU number + 5
436 asm("add dl, 0x7b "); // 0000->008n
437 asm("jmp short asc0 ");
439 asm("cmp eax, 0xE0 ");
440 asm("jae short asc0 "); // if outside range 00C0-00DF leave alone
441 asm("cmp eax, 0xC0 ");
442 asm("jb short asc0 ");
443 asm("add dl, 0x20 "); // 00Cn->00En
445 asm("lock cmpxchg [ecx+10], dx ");
446 asm("jne short ascr ");
450 /** Transition the state of an IDFC just before running it.
452 002g->00Cn, 008n->00Cn, 00An->00Cn, XXYY->XX00, XX00->0000
453 other initial states invalid
454 Return original state
456 Enter and return with interrupts disabled.
458 __NAKED__ TUint32 TDfc::RunIDFCStateChange()
461 asm("xor eax, eax ");
462 asm("mov ax, [ecx+10] ");
465 asm("jne short ris1 ");
466 asm("mov edx, eax ");
467 asm("and dl, 0xfe ");
468 asm("cmp dl, 0x20 ");
469 asm("je short ris2 "); // 002g
470 asm("mov edx, eax ");
471 asm("cmp dl, 0xc0 ");
472 asm("jge short ris_bad "); // not 80-BF
473 asm("and dl, 0x1f ");
477 asm("sub bl, 0x28 ");
481 asm("jne short ris_bad ");
483 asm("or dl, 0xc0 "); // 008n->00Cn, 00An->00Cn
484 asm("jmp short ris0 ");
486 asm("int 0xff "); // DIE
488 asm("mov edx, eax ");
489 asm("xor dl, 0x21 ");
490 asm("cmp dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
491 asm("jne short ris_bad ");
493 asm("shr dl, 3 "); // dl = current CPU number + 5
494 asm("add dl, 0xbb "); // 002g->00Cn
495 asm("jmp short ris0 ");
497 asm("xor edx, edx ");
499 asm("je short ris0 "); // XX00->0000
501 asm("sub dl, 0x28 ");
504 asm("and dl, 0x1f ");
505 asm("jne short ris_bad ");
506 asm("xor edx, edx ");
507 asm("mov dh, ah "); // XXYY->XX00
509 asm("lock cmpxchg [ecx+10], dx ");
510 asm("jne short risr ");
514 /** Transition the state of an IDFC just after running it.
516 First swap aS->iCurrentIDFC with 0
517 If original value != this, return 0xFFFFFFFF and don't touch *this
518 Else 00Cn->0000, 00En->008n, 006n->006n, XXCn->XX00, XXEn->XX00, XX6n->XX00, XX00->0000
519 other initial states invalid
520 Return original state
522 Enter and return with interrupts disabled.
524 __NAKED__ TUint32 TDfc::EndIDFCStateChange(TSubScheduler* /*aS*/)
527 asm("mov edx, [esp+4] "); // edx = aS
528 asm("xor eax, eax ");
529 asm("lock xchg eax, [edx+%0]" : : "i" _FOFF(TSubScheduler,iCurrentIDFC)); // swap aS->iCurrentIDFC with 0
530 asm("xor eax, ecx "); // if aS->iCurrentIDFC==this originally, eax=0
531 asm("jne short eis9 "); // else bail out
532 asm("mov ax, [ecx+10] ");
534 asm("xor edx, edx ");
536 asm("je short eis0 "); // XX00->0000
537 asm("cmp al, 0x60 ");
538 asm("jb short eis_bad "); // bad if < 60
539 asm("cmp al, 0xC0 ");
540 asm("jl short eis_bad "); // bad if 80-BF
542 asm("sub dl, 0x28 ");
545 asm("and dl, 0x1f ");
546 asm("jne short eis_bad ");
547 asm("xor edx, edx ");
549 asm("je short eis1 ");
550 asm("mov dh, ah "); // XX6n->XX00, XXCn->XX00, XXEn->XX00
551 asm("jmp short eis0 ");
553 asm("cmp al, 0xE0 ");
554 asm("jl short eis0 "); // 00Cn->0000
556 asm("jb short eis0 "); // 006n->006n
557 asm("sub dl, 0x60 "); // 00En->008n
559 asm("lock cmpxchg [ecx+10], dx ");
560 asm("jne short eisr ");
563 asm("mov eax, 0xffffffff ");
569 /** Transition the state of an IDFC just after running it.
571 006n->002g where g = TheScheduler.iIdleGeneration
573 other initial states invalid
574 Return original state
576 Enter and return with interrupts disabled.
578 __NAKED__ TUint32 TDfc::EndIDFCStateChange2()
581 asm("xor eax, eax ");
582 asm("mov ax, [ecx+10] ");
584 asm("xor edx, edx ");
585 asm("cmp al, 0x60 ");
586 asm("jl short eis2_bad "); // if not 006n or XX6n, invalid
588 asm("sub dl, 0x28 ");
591 asm("and dl, 0x1f ");
592 asm("jne short eis2_bad ");
593 asm("xor edx, edx ");
595 asm("jne short eis20 "); // XX6n->XX00
596 asm("mov edx, 0x20 ");
597 asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
599 asm("lock cmpxchg [ecx+10], dx ");
600 asm("jne short eis2r ");
606 /** Transition the state of a DFC just before moving it from the IDFC queue to
609 002g->0001, 008n->0001, XX2g->XX00, XX8n->XX00, XX00->0000
610 other initial states invalid
611 Return original state
613 __NAKED__ TUint32 TDfc::MoveToFinalQStateChange()
616 asm("xor eax, eax ");
617 asm("mov ax, [ecx+10] ");
619 asm("xor edx, edx ");
620 asm("cmp al, 0xa0 ");
621 asm("jl short mfq1a "); // 80-9F ok
622 asm("cmp al, 0x20 ");
623 asm("je short mfq1 "); // 20 ok
624 asm("cmp al, 0x21 ");
625 asm("je short mfq1 "); // 21 ok
627 asm("je short mfq_bad "); // 0000 -> bad
628 asm("cmp al, 0 "); // XX00 ok
629 asm("je short mfq0 "); // XX00->0000
630 asm("jmp short mfq_bad "); // not 002g, 008n, XX2g, XX8n, XX00
633 asm("sub dl, 0x28 ");
636 asm("and dl, 0x1f ");
637 asm("jne short mfq_bad ");
638 asm("xor edx, edx ");
641 asm("jne short mfq2 ");
643 asm("jmp short mfq0 "); // 002g->0001, 008n->0001
645 asm("mov dh, ah "); // XXYY->XX00
647 asm("lock cmpxchg [ecx+10], dx ");
648 asm("jne short mfqr ");
654 /** Transition the state of an IDFC when transferring it to another CPU
656 002g->00Am, 008n->00Am, XXYY->XX00, XX00->0000
657 other initial states invalid
658 Return original state
660 Enter and return with interrupts disabled and target CPU's ExIDfcLock held.
662 __NAKED__ TUint32 TDfc::TransferIDFCStateChange(TInt /*aCpu*/)
665 asm("xor eax, eax ");
666 asm("mov ax, [ecx+10] ");
668 asm("xor edx, edx ");
669 asm("cmp al, 0xa0 ");
670 asm("jl short tis1a "); // 80-9F ok
671 asm("cmp al, 0x20 ");
672 asm("je short tis1 "); // 20 ok
673 asm("cmp al, 0x21 ");
674 asm("je short tis1 "); // 21 ok
675 asm("jne short tis_bad "); // not 002g or 008n -> bad
678 asm("sub dl, 0x28 ");
681 asm("and dl, 0x1f ");
682 asm("jne short tis_bad ");
683 asm("xor edx, edx ");
686 asm("jne short tis2 ");
687 asm("mov dl, [esp+4] ");
689 asm("jmp short tis0 "); // 002g->00Am, 008n->00Am
692 asm("je short tis0 "); // XX00->0000
693 asm("mov dh, ah "); // XXYY->XX00
695 asm("lock cmpxchg [ecx+10], dx ");
696 asm("jne short tisr ");
702 /** Transition the state of an IDFC/DFC just before cancelling it.
704 0000->0000, XX00->ZZ00, xxYY->zzYY
705 Return original state
707 Enter and return with interrupts disabled.
709 __NAKED__ TUint32 TDfc::CancelInitialStateChange()
715 asm("add bl, 3 "); // bl = current cpu number + 8
716 asm("xor eax, eax ");
717 asm("mov ax, [ecx+10] ");
719 asm("mov edx, eax ");
720 asm("test eax, eax ");
721 asm("je short cis0 "); // 0000->0000
722 asm("bts edx, ebx "); // XX00->ZZ00, xxYY->zzYY
724 asm("lock cmpxchg [ecx+10], dx ");
725 asm("jne short cisr ");
730 /** Transition the state of an IDFC/DFC at the end of a cancel operation
732 XXYY->XX00, XX00->0000
733 Return original state
735 Enter and return with interrupts disabled.
737 __NAKED__ TUint32 TDfc::CancelFinalStateChange()
740 asm("xor eax, eax ");
741 asm("mov ax, [ecx+10] ");
743 asm("xor edx, edx ");
745 asm("je short cfs0 "); // XX00->0000
746 asm("mov dh, ah "); // XXYY->XX00
748 asm("lock cmpxchg [ecx+10], dx ");
749 asm("jne short cfsr ");
753 /** Transition the state of an IDFC or DFC when QueueOnIdle() is called
755 0000->002g where g = TheScheduler.iIdleGeneration,
756 00Cn->006n, all other states unchanged
757 Return original state.
759 Enter and return with interrupts disabled and IdleSpinLock held.
761 __NAKED__ TUint32 TDfc::QueueOnIdleStateChange()
764 asm("xor eax, eax ");
765 asm("mov ax, [ecx+10] ");
767 asm("mov edx, eax ");
768 asm("test eax, eax ");
769 asm("jne short qis1 ");
770 asm("mov edx, 0x20 ");
771 asm("or dl, [%a0]" : : "i" (&TheScheduler.iIdleGeneration));
772 asm("jmp short qis0 ");
774 asm("cmp eax, 0xE0 ");
775 asm("jae short qis0 "); // if outside range 00C0-00DF leave alone
776 asm("cmp eax, 0xC0 ");
777 asm("jb short qis0 ");
778 asm("sub dl, 0x60 "); // 00Cn->006n
780 asm("lock cmpxchg [ecx+10], dx ");
781 asm("jne short qisr ");
786 __NAKED__ void TDfc::ResetState()
789 asm("xor eax, eax ");
790 asm("lock xchg ax, [ecx+10] ");
792 asm("je short rst_bad ");