sl@0: // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkernsmp\x86\ncthrd.cpp sl@0: // sl@0: // sl@0: sl@0: // NThreadBase member data sl@0: #define __INCLUDE_NTHREADBASE_DEFINES__ sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: // Called by a thread when it first runs sl@0: void __StartThread(); sl@0: sl@0: void NThreadBase::OnKill() sl@0: { sl@0: } sl@0: sl@0: void NThreadBase::OnExit() sl@0: { sl@0: } sl@0: sl@0: extern void __ltr(TInt /*aSelector*/); sl@0: sl@0: extern "C" TUint __tr(); sl@0: extern void InitAPTimestamp(SNThreadCreateInfo& aInfo); sl@0: sl@0: TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial) sl@0: { sl@0: if (!aInfo.iStackBase || aInfo.iStackSize<0x100) sl@0: return KErrArgument; sl@0: new (this) NThread; sl@0: TInt cpu = -1; sl@0: if (aInitial) sl@0: { sl@0: cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1); sl@0: if (cpu==0) sl@0: memset(SubSchedulerLookupTable, 0x9a, sizeof(SubSchedulerLookupTable)); sl@0: aInfo.iCpuAffinity = cpu; sl@0: // OK since we can't migrate yet sl@0: TUint32 apicid = *(volatile TUint32*)(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID) >> 24; sl@0: TSubScheduler& ss = TheSubSchedulers[cpu]; sl@0: ss.i_APICID = (TAny*)(apicid<<24); sl@0: ss.iCurrentThread = this; sl@0: SubSchedulerLookupTable[apicid] = &ss; sl@0: ss.iLastTimestamp64 = NKern::Timestamp(); sl@0: iRunCount64 = UI64LIT(1); sl@0: __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, &ss)); sl@0: if (cpu) sl@0: { sl@0: __ltr(TSS_SELECTOR(cpu)); sl@0: NIrq::HwInit2AP(); sl@0: __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<iVector = SThreadStackStub::EVector; sl@0: tss->iError = 0; sl@0: tss->iEip = 0; sl@0: tss->iCs = 0; sl@0: tss->iEflags = 0; sl@0: sp = (TLinAddr)tss; sl@0: sp -= (TLinAddr)aInfo.iParameterBlockSize; sl@0: wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize); sl@0: pb = (TUint32)sp; sl@0: tss->iPBlock = sp; sl@0: } sl@0: SThreadInitStack* tis = (SThreadInitStack*)sp; sl@0: --tis; sl@0: tis->iR.iCR0 = X86::DefaultCR0 | KX86CR0_TS; sl@0: tis->iR.iReschedFlag = 1; sl@0: tis->iR.iEip = (TUint32)&__StartThread; sl@0: tis->iR.iReason = 0; sl@0: tis->iX.iEcx = 0; sl@0: tis->iX.iEdx = 0; sl@0: tis->iX.iEbx = pb; // parameter block pointer sl@0: tis->iX.iEsi = 0; sl@0: tis->iX.iEdi = 0; sl@0: tis->iX.iEbp = stack_top; sl@0: tis->iX.iEax = (TUint32)aInfo.iFunction; sl@0: tis->iX.iDs = KRing0DS; sl@0: tis->iX.iEs = KRing0DS; sl@0: tis->iX.iFs = 0; sl@0: tis->iX.iGs = KRing0DS; sl@0: tis->iX.iVector = SThreadInitStack::EVector; sl@0: tis->iX.iError = 0; sl@0: tis->iX.iEip = (TUint32)aInfo.iFunction; sl@0: tis->iX.iCs = KRing0CS; sl@0: tis->iX.iEflags = (TUint32)(EX86FlagIF|EX86FlagAC|0x1002); sl@0: tis->iX.iEsp3 = 0xFFFFFFFFu; sl@0: tis->iX.iSs3 = 0xFFFFFFFFu; sl@0: wordmove(&iCoprocessorState, DefaultCoprocessorState, sizeof(iCoprocessorState)); sl@0: iSavedSP = (TLinAddr)tis; sl@0: } sl@0: else sl@0: { sl@0: NKern::EnableAllInterrupts(); sl@0: sl@0: // synchronize AP's timestamp with BP's sl@0: if (cpu>0) sl@0: InitAPTimestamp(aInfo); sl@0: } sl@0: #ifdef BTRACE_THREAD_IDENTIFICATION sl@0: BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DumpExcInfo(TX86ExcInfo& a) sl@0: { sl@0: DEBUGPRINT("Exc %02x EFLAGS=%08x FAR=%08x ErrCode=%08x",a.iExcId,a.iEflags,a.iFaultAddress,a.iExcErrorCode); sl@0: DEBUGPRINT("EAX=%08x EBX=%08x ECX=%08x EDX=%08x",a.iEax,a.iEbx,a.iEcx,a.iEdx); sl@0: DEBUGPRINT("ESP=%08x EBP=%08x ESI=%08x EDI=%08x",a.iEsp,a.iEbp,a.iEsi,a.iEdi); sl@0: DEBUGPRINT(" CS=%08x EIP=%08x DS=%08x SS=%08x",a.iCs,a.iEip,a.iDs,a.iSs); sl@0: DEBUGPRINT(" ES=%08x FS=%08x GS=%08x",a.iEs,a.iFs,a.iGs); sl@0: if (a.iCs&3) sl@0: { sl@0: DEBUGPRINT("SS3=%08x ESP3=%08x",a.iSs3,a.iEsp3); sl@0: } sl@0: TScheduler& s = TheScheduler; sl@0: TInt irq = NKern::DisableAllInterrupts(); sl@0: TSubScheduler& ss = SubScheduler(); sl@0: NThreadBase* ct = ss.iCurrentThread; sl@0: TInt inc = TInt(ss.i_IrqNestCount); sl@0: TInt cpu = ss.iCpuNum; sl@0: NKern::RestoreInterrupts(irq); sl@0: DEBUGPRINT("Thread %T, CPU %d, KLCount=%08x, IrqNest=%d",ct,cpu,ss.iKernLockCount,inc); sl@0: } sl@0: sl@0: sl@0: void GetContextAfterExc(TX86RegSet& aContext, SThreadExcStack* txs, TUint32& aAvailRegistersMask, TBool aSystem) sl@0: { sl@0: TInt cpl = txs->iCs & 3; sl@0: aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid sl@0: aContext.iEax = txs->iEax; sl@0: aContext.iEbx = txs->iEbx; sl@0: aContext.iEcx = txs->iEcx; sl@0: aContext.iEdx = txs->iEdx; sl@0: if (aSystem) sl@0: { sl@0: aContext.iEsp = TUint32(txs+1); sl@0: if (cpl==0) sl@0: aContext.iEsp -= 8; // two less words pushed if interrupt taken while CPL=0 sl@0: aContext.iSs = KRing0DS; sl@0: aAvailRegistersMask &= ~0x2000u; // SS assumed not read sl@0: } sl@0: else if (cpl==3) sl@0: { sl@0: aContext.iEsp = txs->iEsp3; sl@0: aContext.iSs = txs->iSs3; sl@0: } sl@0: else sl@0: { sl@0: __crash(); sl@0: } sl@0: aContext.iEbp = txs->iEbp; sl@0: aContext.iEsi = txs->iEsi; sl@0: aContext.iEdi = txs->iEdi; sl@0: aContext.iCs = txs->iCs; sl@0: aContext.iDs = txs->iDs; sl@0: aContext.iEs = txs->iEs; sl@0: aContext.iFs = txs->iFs; sl@0: aContext.iGs = txs->iGs; sl@0: aContext.iEflags = txs->iEflags; sl@0: aContext.iEip = txs->iEip; sl@0: } sl@0: sl@0: void GetContextAfterSlowExec(TX86RegSet& aContext, SThreadSlowExecStack* tsxs, TUint32& aAvailRegistersMask) sl@0: { sl@0: TInt cpl = tsxs->iCs & 3; sl@0: if (cpl!=3) sl@0: { sl@0: __crash(); sl@0: } sl@0: aAvailRegistersMask = 0xffffu; // EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid sl@0: aContext.iEax = tsxs->iEax; sl@0: aContext.iEbx = tsxs->iEbx; sl@0: aContext.iEcx = tsxs->iEcx; sl@0: aContext.iEdx = tsxs->iEdx; sl@0: aContext.iEsp = tsxs->iEsp3; sl@0: aContext.iSs = tsxs->iSs3; sl@0: aContext.iEbp = tsxs->iEbp; sl@0: aContext.iEsi = tsxs->iEsi; sl@0: aContext.iEdi = tsxs->iEdi; sl@0: aContext.iCs = tsxs->iCs; sl@0: aContext.iDs = tsxs->iDs; sl@0: aContext.iEs = tsxs->iEs; sl@0: aContext.iFs = tsxs->iFs; sl@0: aContext.iGs = tsxs->iGs; sl@0: aContext.iEflags = tsxs->iEflags; sl@0: aContext.iEip = tsxs->iEip; sl@0: } sl@0: sl@0: sl@0: // Enter and return with kernel locked sl@0: void NThread::GetUserContext(TX86RegSet& aContext, TUint32& aAvailRegistersMask) sl@0: { sl@0: NThread* pC = NCurrentThreadL(); sl@0: TSubScheduler* ss = 0; sl@0: if (pC != this) sl@0: { sl@0: AcqSLock(); sl@0: if (iWaitState.ThreadIsDead()) sl@0: { sl@0: RelSLock(); sl@0: aAvailRegistersMask = 0; sl@0: return; sl@0: } sl@0: if (iReady && iParent->iReady) sl@0: { sl@0: ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); sl@0: ss->iReadyListLock.LockOnly(); sl@0: } sl@0: if (iCurrent) sl@0: { sl@0: // thread is actually running on another CPU sl@0: // interrupt that CPU and wait for it to enter interrupt mode sl@0: // this allows a snapshot of the thread user state to be observed sl@0: // and ensures the thread cannot return to user mode sl@0: send_resched_ipi_and_wait(iLastCpu); sl@0: } sl@0: } sl@0: TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); sl@0: if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode sl@0: { sl@0: if (stack[-7] == 0x21) // slow exec sl@0: GetContextAfterSlowExec(aContext, ((SThreadSlowExecStack*)stack)-1, aAvailRegistersMask); sl@0: else sl@0: GetContextAfterExc(aContext, ((SThreadExcStack*)stack)-1, aAvailRegistersMask, FALSE); sl@0: } sl@0: if (pC != this) sl@0: { sl@0: if (ss) sl@0: ss->iReadyListLock.UnlockOnly(); sl@0: RelSLock(); sl@0: } sl@0: } sl@0: sl@0: class TGetContextIPI : public TGenericIPI sl@0: { sl@0: public: sl@0: void Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegistersMask); sl@0: static void Isr(TGenericIPI*); sl@0: public: sl@0: TX86RegSet* iContext; sl@0: TUint32* iAvailRegsMask; sl@0: }; sl@0: sl@0: void TGetContextIPI::Isr(TGenericIPI* aPtr) sl@0: { sl@0: TGetContextIPI& ipi = *(TGetContextIPI*)aPtr; sl@0: TX86RegSet& a = *ipi.iContext; sl@0: TSubScheduler& ss = SubScheduler(); sl@0: TUint32* irqstack = (TUint32*)ss.i_IrqStackTop; sl@0: SThreadExcStack* txs = (SThreadExcStack*)irqstack[-1]; // first word pushed on IRQ stack points to thread supervisor stack sl@0: GetContextAfterExc(a, txs, *ipi.iAvailRegsMask, TRUE); sl@0: } sl@0: sl@0: void TGetContextIPI::Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegsMask) sl@0: { sl@0: iContext = &aContext; sl@0: iAvailRegsMask = &aAvailRegsMask; sl@0: Queue(&Isr, 1u<iReady) sl@0: { sl@0: ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); sl@0: ss->iReadyListLock.LockOnly(); sl@0: } sl@0: if (iCurrent) sl@0: { sl@0: // thread is actually running on another CPU sl@0: // use an interprocessor interrupt to get a snapshot of the state sl@0: TGetContextIPI ipi; sl@0: ipi.Get(iLastCpu, aContext, aAvailRegsMask); sl@0: } sl@0: else sl@0: { sl@0: // thread is not running and can't start sl@0: SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP; sl@0: TUint32 kct = trs->iReason; sl@0: TLinAddr sp = TLinAddr(trs+1); sl@0: TUint32* stack = (TUint32*)sp; sl@0: switch (kct) sl@0: { sl@0: case 0: // thread not yet started sl@0: { sl@0: aContext.iEcx = stack[0]; sl@0: aContext.iEdx = stack[1]; sl@0: aContext.iEbx = stack[2]; sl@0: aContext.iEsi = stack[3]; sl@0: aContext.iEdi = stack[4]; sl@0: aContext.iEbp = stack[5]; sl@0: aContext.iEax = stack[6]; sl@0: aContext.iDs = stack[7]; sl@0: aContext.iEs = stack[8]; sl@0: aContext.iFs = stack[9]; sl@0: aContext.iGs = stack[10]; sl@0: aContext.iEsp = sp + 40 - 8; // entry to initial function sl@0: aContext.iEip = aContext.iEax; sl@0: aContext.iEflags = 0x41202; // guess sl@0: aContext.iCs = KRing0CS; sl@0: aContext.iSs = KRing0DS; sl@0: aAvailRegsMask = 0x9effu; sl@0: break; sl@0: } sl@0: case 1: // unlock sl@0: { sl@0: aContext.iFs = stack[0]; sl@0: aContext.iGs = stack[1]; sl@0: aContext.iEbx = stack[2]; sl@0: aContext.iEbp = stack[3]; sl@0: aContext.iEdi = stack[4]; sl@0: aContext.iEsi = stack[5]; sl@0: aContext.iEip = stack[6]; // return address from NKern::Unlock() sl@0: aContext.iCs = KRing0CS; sl@0: aContext.iDs = KRing0DS; sl@0: aContext.iEs = KRing0DS; sl@0: aContext.iSs = KRing0DS; sl@0: aContext.iEsp = sp + 28; // ESP after return from NKern::Unlock() sl@0: aContext.iEax = 0; // unknown sl@0: aContext.iEcx = 0; // unknown sl@0: aContext.iEdx = 0; // unknown sl@0: aContext.iEflags = 0x41202; // guess sl@0: aAvailRegsMask =0x98f2u; // EIP,GS,FS,EDI,ESI,EBP,ESP,EBX available, others guessed or unavailable sl@0: break; sl@0: } sl@0: case 2: // IRQ sl@0: { sl@0: GetContextAfterExc(aContext, (SThreadExcStack*)sp, aAvailRegsMask, TRUE); sl@0: break; sl@0: } sl@0: default: // unknown reschedule reason sl@0: __NK_ASSERT_ALWAYS(0); sl@0: } sl@0: } sl@0: if (ss) sl@0: ss->iReadyListLock.UnlockOnly(); sl@0: RelSLock(); sl@0: } sl@0: sl@0: // Enter and return with kernel locked sl@0: void NThread::SetUserContext(const TX86RegSet& aContext, TUint32& aRegMask) sl@0: { sl@0: NThread* pC = NCurrentThreadL(); sl@0: TSubScheduler* ss = 0; sl@0: if (pC != this) sl@0: { sl@0: AcqSLock(); sl@0: if (iWaitState.ThreadIsDead()) sl@0: { sl@0: RelSLock(); sl@0: aRegMask = 0; sl@0: return; sl@0: } sl@0: if (iReady && iParent->iReady) sl@0: { sl@0: ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask); sl@0: ss->iReadyListLock.LockOnly(); sl@0: } sl@0: if (iCurrent) sl@0: { sl@0: // thread is actually running on another CPU sl@0: // interrupt that CPU and wait for it to enter interrupt mode sl@0: // this allows a snapshot of the thread user state to be observed sl@0: // and ensures the thread cannot return to user mode sl@0: send_resched_ipi_and_wait(iLastCpu); sl@0: } sl@0: } sl@0: TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize)); sl@0: SThreadExcStack* txs = 0; sl@0: SThreadSlowExecStack* tsxs = 0; sl@0: aRegMask &= 0xffffu; sl@0: if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u) // if not, thread never entered user mode sl@0: { sl@0: if (stack[-7] == 0x21) // slow exec sl@0: tsxs = ((SThreadSlowExecStack*)stack)-1; sl@0: else sl@0: txs = ((SThreadExcStack*)stack)-1; sl@0: sl@0: #define WRITE_REG(reg, value) \ sl@0: { if (tsxs) tsxs->reg=(value); else txs->reg=(value); } sl@0: sl@0: if (aRegMask & 0x0001u) sl@0: WRITE_REG(iEax, aContext.iEax); sl@0: if (aRegMask & 0x0002u) sl@0: WRITE_REG(iEbx, aContext.iEbx); sl@0: if (aRegMask & 0x0004u) sl@0: { sl@0: // don't allow write to iEcx if in slow exec since this may conflict sl@0: // with handle preprocessing sl@0: if (tsxs) sl@0: aRegMask &= ~0x0004u; sl@0: else sl@0: txs->iEcx = aContext.iEcx; sl@0: } sl@0: if (aRegMask & 0x0008u) sl@0: WRITE_REG(iEdx, aContext.iEdx); sl@0: if (aRegMask & 0x0010u) sl@0: WRITE_REG(iEsp3, aContext.iEsp); sl@0: if (aRegMask & 0x0020u) sl@0: WRITE_REG(iEbp, aContext.iEbp); sl@0: if (aRegMask & 0x0040u) sl@0: WRITE_REG(iEsi, aContext.iEsi); sl@0: if (aRegMask & 0x0080u) sl@0: WRITE_REG(iEdi, aContext.iEdi); sl@0: if (aRegMask & 0x0100u) sl@0: WRITE_REG(iCs, aContext.iCs|3); sl@0: if (aRegMask & 0x0200u) sl@0: WRITE_REG(iDs, aContext.iDs|3); sl@0: if (aRegMask & 0x0400u) sl@0: WRITE_REG(iEs, aContext.iEs|3); sl@0: if (aRegMask & 0x0800u) sl@0: WRITE_REG(iFs, aContext.iFs|3); sl@0: if (aRegMask & 0x1000u) sl@0: WRITE_REG(iGs, aContext.iGs|3); sl@0: if (aRegMask & 0x2000u) sl@0: WRITE_REG(iSs3, aContext.iSs|3); sl@0: if (aRegMask & 0x4000u) sl@0: WRITE_REG(iEflags, aContext.iEflags); sl@0: if (aRegMask & 0x8000u) sl@0: WRITE_REG(iEip, aContext.iEip); sl@0: } sl@0: else sl@0: aRegMask = 0; sl@0: if (pC != this) sl@0: { sl@0: if (ss) sl@0: ss->iReadyListLock.UnlockOnly(); sl@0: RelSLock(); sl@0: } sl@0: } sl@0: sl@0: /** Get (subset of) user context of specified thread. sl@0: sl@0: The nanokernel does not systematically save all registers in the supervisor sl@0: stack on entry into privileged mode and the exact subset depends on why the sl@0: switch to privileged mode occured. So in general only a subset of the sl@0: register set is available. sl@0: sl@0: @param aThread Thread to inspect. It can be the current thread or a sl@0: non-current one. sl@0: sl@0: @param aContext Pointer to TX86RegSet structure where the context is sl@0: copied. sl@0: sl@0: @param aAvailRegistersMask Bit mask telling which subset of the context is sl@0: available and has been copied to aContext (1: register available / 0: not sl@0: available). Bits represent fields in TX86RegSet, i.e. sl@0: 0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI sl@0: 8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP sl@0: sl@0: @see TX86RegSet sl@0: @see ThreadSetUserContext sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext"); sl@0: TX86RegSet& a = *(TX86RegSet*)aContext; sl@0: memclr(aContext, sizeof(TX86RegSet)); sl@0: NKern::Lock(); sl@0: aThread->GetUserContext(a, aAvailRegistersMask); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Get (subset of) system context of specified thread. sl@0: sl@0: @param aThread Thread to inspect. It can be the current thread or a sl@0: non-current one. sl@0: sl@0: @param aContext Pointer to TX86RegSet structure where the context is sl@0: copied. sl@0: sl@0: @param aAvailRegistersMask Bit mask telling which subset of the context is sl@0: available and has been copied to aContext (1: register available / 0: not sl@0: available). Bits represent fields in TX86RegSet, i.e. sl@0: 0:EAX 1:EBX 2:ECX 3:EDX 4:ESP 5:EBP 6:ESI 7:EDI sl@0: 8:CS 9:DS 10:ES 11:FS 12:GS 13:SS 14:EFLAGS 15:EIP sl@0: sl@0: @see TX86RegSet sl@0: @see ThreadGetUserContext sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext"); sl@0: TX86RegSet& a = *(TX86RegSet*)aContext; sl@0: memclr(aContext, sizeof(TX86RegSet)); sl@0: NKern::Lock(); sl@0: aThread->GetSystemContext(a, aAvailRegistersMask); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Set (subset of) user context of specified thread. sl@0: sl@0: @param aThread Thread to modify. It can be the current thread or a sl@0: non-current one. sl@0: sl@0: @param aContext Pointer to TX86RegSet structure containing the context sl@0: to set. The values of registers which aren't part of the context saved sl@0: on the supervisor stack are ignored. sl@0: sl@0: @see TX86RegSet sl@0: @see ThreadGetUserContext sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext"); sl@0: TX86RegSet& a = *(TX86RegSet*)aContext; sl@0: TUint32 mask = 0xffffu; sl@0: NKern::Lock(); sl@0: aThread->SetUserContext(a, mask); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Return the total CPU time so far used by the specified thread. sl@0: sl@0: @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq(). sl@0: */ sl@0: EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread) sl@0: { sl@0: TSubScheduler* ss = 0; sl@0: NKern::Lock(); sl@0: aThread->AcqSLock(); sl@0: if (aThread->i_NThread_Initial) sl@0: ss = &TheSubSchedulers[aThread->iLastCpu]; sl@0: else if (aThread->iReady && aThread->iParent->iReady) sl@0: ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask]; sl@0: if (ss) sl@0: ss->iReadyListLock.LockOnly(); sl@0: TUint64 t = aThread->iTotalCpuTime64; sl@0: if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread)) sl@0: t += (NKern::Timestamp() - ss->iLastTimestamp64); sl@0: if (ss) sl@0: ss->iReadyListLock.UnlockOnly(); sl@0: aThread->RelSLock(); sl@0: NKern::Unlock(); sl@0: return t; sl@0: } sl@0: sl@0: extern "C" void __fastcall add_dfc(TDfc* aDfc) sl@0: { sl@0: aDfc->Add(); sl@0: } sl@0: sl@0: sl@0: TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback) sl@0: { sl@0: __e32_memory_barrier(); sl@0: if (aCallback->iNext != KUserModeCallbackUnqueued) sl@0: return KErrInUse; sl@0: TInt result = KErrDied; sl@0: NKern::Lock(); sl@0: TUserModeCallback* listHead = aThread->iUserModeCallbacks; sl@0: do { sl@0: if (TLinAddr(listHead) & 3) sl@0: goto done; // thread exiting sl@0: aCallback->iNext = listHead; sl@0: } while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback)); sl@0: result = KErrNone; sl@0: sl@0: if (!listHead) // if this isn't first callback someone else will have done this bit sl@0: { sl@0: /* sl@0: * If aThread is currently running on another CPU we need to send an IPI so sl@0: * that it will enter kernel mode and run the callback. sl@0: * The synchronization is tricky here. We want to check if the thread is sl@0: * running and if so on which core. We need to avoid any possibility of sl@0: * the thread entering user mode without having seen the callback, sl@0: * either because we thought it wasn't running so didn't send an IPI or sl@0: * because the thread migrated after we looked and we sent the IPI to sl@0: * the wrong processor. Sending a redundant IPI is not a problem (e.g. sl@0: * because the thread is running in kernel mode - which we can't tell - sl@0: * or because the thread stopped running after we looked) sl@0: * The following events are significant: sl@0: * Event A: Target thread writes to iCurrent when it starts running sl@0: * Event B: Target thread reads iUserModeCallbacks before entering user sl@0: * mode sl@0: * Event C: This thread writes to iUserModeCallbacks sl@0: * Event D: This thread reads iCurrent to check if aThread is running sl@0: * There is a barrier between A and B since A occurs with the ready sl@0: * list lock for the CPU involved or the thread lock for aThread held sl@0: * and this lock is released before B occurs. sl@0: * There is a barrier between C and D (__e32_atomic_cas_ord_ptr). sl@0: * Any observer which observes B must also have observed A. sl@0: * Any observer which observes D must also have observed C. sl@0: * If aThread observes B before C (i.e. enters user mode without running sl@0: * the callback) it must observe A before C and so it must also observe sl@0: * A before D (i.e. D reads the correct value for iCurrent). sl@0: */ sl@0: TInt current = aThread->iCurrent; sl@0: if (current) sl@0: { sl@0: TInt cpu = current & NSchedulable::EReadyCpuMask; sl@0: if (cpu != NKern::CurrentCpu()) sl@0: send_resched_ipi(cpu); sl@0: } sl@0: } sl@0: done: sl@0: NKern::Unlock(); sl@0: return result; sl@0: } sl@0: sl@0: