os/kernelhwsrv/kernel/eka/nkernsmp/x86/ncthrd.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\x86\ncthrd.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
// NThreadBase member data
sl@0
    19
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    20
sl@0
    21
#include <x86.h>
sl@0
    22
#include <apic.h>
sl@0
    23
#include <nk_irq.h>
sl@0
    24
sl@0
    25
// Called by a thread when it first runs
sl@0
    26
void __StartThread();
sl@0
    27
sl@0
    28
void NThreadBase::OnKill()
sl@0
    29
	{
sl@0
    30
	}
sl@0
    31
sl@0
    32
void NThreadBase::OnExit()
sl@0
    33
	{
sl@0
    34
	}
sl@0
    35
sl@0
    36
extern void __ltr(TInt /*aSelector*/);
sl@0
    37
sl@0
    38
extern "C" TUint __tr();
sl@0
    39
extern void InitAPTimestamp(SNThreadCreateInfo& aInfo);
sl@0
    40
sl@0
    41
TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
sl@0
    42
	{
sl@0
    43
	if (!aInfo.iStackBase || aInfo.iStackSize<0x100)
sl@0
    44
		return KErrArgument;
sl@0
    45
	new (this) NThread;
sl@0
    46
	TInt cpu = -1;
sl@0
    47
	if (aInitial)
sl@0
    48
		{
sl@0
    49
		cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1);
sl@0
    50
		if (cpu==0)
sl@0
    51
			memset(SubSchedulerLookupTable, 0x9a, sizeof(SubSchedulerLookupTable));
sl@0
    52
		aInfo.iCpuAffinity = cpu;
sl@0
    53
		// OK since we can't migrate yet
sl@0
    54
		TUint32 apicid = *(volatile TUint32*)(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID) >> 24;
sl@0
    55
		TSubScheduler& ss = TheSubSchedulers[cpu];
sl@0
    56
		ss.i_APICID = (TAny*)(apicid<<24);
sl@0
    57
		ss.iCurrentThread = this;
sl@0
    58
		SubSchedulerLookupTable[apicid] = &ss;
sl@0
    59
		ss.iLastTimestamp64 = NKern::Timestamp();
sl@0
    60
		iRunCount64 = UI64LIT(1);
sl@0
    61
		__KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d APICID=%08x ss=%08x", cpu, apicid, &ss));
sl@0
    62
		if (cpu)
sl@0
    63
			{
sl@0
    64
			__ltr(TSS_SELECTOR(cpu));
sl@0
    65
			NIrq::HwInit2AP();
sl@0
    66
			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu);
sl@0
    67
			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu);
sl@0
    68
			__e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu);
sl@0
    69
			__KTRACE_OPT(KBOOT,DEBUGPRINT("AP TR=%x",__tr()));
sl@0
    70
			}
sl@0
    71
		}
sl@0
    72
	TInt r=NThreadBase::Create(aInfo,aInitial);
sl@0
    73
	if (r!=KErrNone)
sl@0
    74
		return r;
sl@0
    75
	if (!aInitial)
sl@0
    76
		{
sl@0
    77
		TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize;
sl@0
    78
		TLinAddr sp = stack_top;
sl@0
    79
		TUint32 pb = (TUint32)aInfo.iParameterBlock;
sl@0
    80
		SThreadStackStub* tss = 0;
sl@0
    81
		if (aInfo.iParameterBlockSize)
sl@0
    82
			{
sl@0
    83
			tss = (SThreadStackStub*)stack_top;
sl@0
    84
			--tss;
sl@0
    85
			tss->iVector = SThreadStackStub::EVector;
sl@0
    86
			tss->iError = 0;
sl@0
    87
			tss->iEip = 0;
sl@0
    88
			tss->iCs = 0;
sl@0
    89
			tss->iEflags = 0;
sl@0
    90
			sp = (TLinAddr)tss;
sl@0
    91
			sp -= (TLinAddr)aInfo.iParameterBlockSize;
sl@0
    92
			wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize);
sl@0
    93
			pb = (TUint32)sp;
sl@0
    94
			tss->iPBlock = sp;
sl@0
    95
			}
sl@0
    96
		SThreadInitStack* tis = (SThreadInitStack*)sp;
sl@0
    97
		--tis;
sl@0
    98
		tis->iR.iCR0 = X86::DefaultCR0 | KX86CR0_TS;
sl@0
    99
		tis->iR.iReschedFlag = 1;
sl@0
   100
		tis->iR.iEip = (TUint32)&__StartThread;
sl@0
   101
		tis->iR.iReason = 0;
sl@0
   102
		tis->iX.iEcx = 0;
sl@0
   103
		tis->iX.iEdx = 0;
sl@0
   104
		tis->iX.iEbx = pb;		// parameter block pointer
sl@0
   105
		tis->iX.iEsi = 0;
sl@0
   106
		tis->iX.iEdi = 0;
sl@0
   107
		tis->iX.iEbp = stack_top;
sl@0
   108
		tis->iX.iEax = (TUint32)aInfo.iFunction;
sl@0
   109
		tis->iX.iDs = KRing0DS;
sl@0
   110
		tis->iX.iEs = KRing0DS;
sl@0
   111
		tis->iX.iFs = 0;
sl@0
   112
		tis->iX.iGs = KRing0DS;
sl@0
   113
		tis->iX.iVector = SThreadInitStack::EVector;
sl@0
   114
		tis->iX.iError = 0;
sl@0
   115
		tis->iX.iEip = (TUint32)aInfo.iFunction;
sl@0
   116
		tis->iX.iCs = KRing0CS;
sl@0
   117
		tis->iX.iEflags = (TUint32)(EX86FlagIF|EX86FlagAC|0x1002);
sl@0
   118
		tis->iX.iEsp3 = 0xFFFFFFFFu;
sl@0
   119
		tis->iX.iSs3 = 0xFFFFFFFFu;
sl@0
   120
		wordmove(&iCoprocessorState, DefaultCoprocessorState, sizeof(iCoprocessorState));
sl@0
   121
		iSavedSP = (TLinAddr)tis;
sl@0
   122
		}
sl@0
   123
	else
sl@0
   124
		{
sl@0
   125
		NKern::EnableAllInterrupts();
sl@0
   126
sl@0
   127
		// synchronize AP's timestamp with BP's
sl@0
   128
		if (cpu>0)
sl@0
   129
			InitAPTimestamp(aInfo);
sl@0
   130
		}
sl@0
   131
#ifdef BTRACE_THREAD_IDENTIFICATION
sl@0
   132
	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
sl@0
   133
#endif
sl@0
   134
	return KErrNone;
sl@0
   135
	}
sl@0
   136
sl@0
   137
void DumpExcInfo(TX86ExcInfo& a)
sl@0
   138
	{
sl@0
   139
	DEBUGPRINT("Exc %02x EFLAGS=%08x FAR=%08x ErrCode=%08x",a.iExcId,a.iEflags,a.iFaultAddress,a.iExcErrorCode);
sl@0
   140
	DEBUGPRINT("EAX=%08x EBX=%08x ECX=%08x EDX=%08x",a.iEax,a.iEbx,a.iEcx,a.iEdx);
sl@0
   141
	DEBUGPRINT("ESP=%08x EBP=%08x ESI=%08x EDI=%08x",a.iEsp,a.iEbp,a.iEsi,a.iEdi);
sl@0
   142
	DEBUGPRINT(" CS=%08x EIP=%08x  DS=%08x  SS=%08x",a.iCs,a.iEip,a.iDs,a.iSs);
sl@0
   143
	DEBUGPRINT(" ES=%08x  FS=%08x  GS=%08x",a.iEs,a.iFs,a.iGs);
sl@0
   144
	if (a.iCs&3)
sl@0
   145
		{
sl@0
   146
		DEBUGPRINT("SS3=%08x ESP3=%08x",a.iSs3,a.iEsp3);
sl@0
   147
		}
sl@0
   148
	TScheduler& s = TheScheduler;
sl@0
   149
	TInt irq = NKern::DisableAllInterrupts();
sl@0
   150
	TSubScheduler& ss = SubScheduler();
sl@0
   151
	NThreadBase* ct = ss.iCurrentThread;
sl@0
   152
	TInt inc = TInt(ss.i_IrqNestCount);
sl@0
   153
	TInt cpu = ss.iCpuNum;
sl@0
   154
	NKern::RestoreInterrupts(irq);
sl@0
   155
	DEBUGPRINT("Thread %T, CPU %d, KLCount=%08x, IrqNest=%d",ct,cpu,ss.iKernLockCount,inc);
sl@0
   156
	}
sl@0
   157
sl@0
   158
sl@0
   159
void GetContextAfterExc(TX86RegSet& aContext, SThreadExcStack* txs, TUint32& aAvailRegistersMask, TBool aSystem)
sl@0
   160
	{
sl@0
   161
	TInt cpl = txs->iCs & 3;
sl@0
   162
	aAvailRegistersMask = 0xffffu;	// EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid
sl@0
   163
	aContext.iEax = txs->iEax;
sl@0
   164
	aContext.iEbx = txs->iEbx;
sl@0
   165
	aContext.iEcx = txs->iEcx;
sl@0
   166
	aContext.iEdx = txs->iEdx;
sl@0
   167
	if (aSystem)
sl@0
   168
		{
sl@0
   169
		aContext.iEsp = TUint32(txs+1);
sl@0
   170
		if (cpl==0)
sl@0
   171
			aContext.iEsp -= 8;		// two less words pushed if interrupt taken while CPL=0
sl@0
   172
		aContext.iSs = KRing0DS;
sl@0
   173
		aAvailRegistersMask &= ~0x2000u;	// SS assumed not read
sl@0
   174
		}
sl@0
   175
	else if (cpl==3)
sl@0
   176
		{
sl@0
   177
		aContext.iEsp = txs->iEsp3;
sl@0
   178
		aContext.iSs = txs->iSs3;
sl@0
   179
		}
sl@0
   180
	else
sl@0
   181
		{
sl@0
   182
		__crash();
sl@0
   183
		}
sl@0
   184
	aContext.iEbp = txs->iEbp;
sl@0
   185
	aContext.iEsi = txs->iEsi;
sl@0
   186
	aContext.iEdi = txs->iEdi;
sl@0
   187
	aContext.iCs = txs->iCs;
sl@0
   188
	aContext.iDs = txs->iDs;
sl@0
   189
	aContext.iEs = txs->iEs;
sl@0
   190
	aContext.iFs = txs->iFs;
sl@0
   191
	aContext.iGs = txs->iGs;
sl@0
   192
	aContext.iEflags = txs->iEflags;
sl@0
   193
	aContext.iEip = txs->iEip;
sl@0
   194
	}
sl@0
   195
sl@0
   196
void GetContextAfterSlowExec(TX86RegSet& aContext, SThreadSlowExecStack* tsxs, TUint32& aAvailRegistersMask)
sl@0
   197
	{
sl@0
   198
	TInt cpl = tsxs->iCs & 3;
sl@0
   199
	if (cpl!=3)
sl@0
   200
		{
sl@0
   201
		__crash();
sl@0
   202
		}
sl@0
   203
	aAvailRegistersMask = 0xffffu;	// EAX,EBX,ECX,EDX,ESP,EBP,ESI,EDI,CS,DS,ES,FS,GS,SS,EFLAGS,EIP all valid
sl@0
   204
	aContext.iEax = tsxs->iEax;
sl@0
   205
	aContext.iEbx = tsxs->iEbx;
sl@0
   206
	aContext.iEcx = tsxs->iEcx;
sl@0
   207
	aContext.iEdx = tsxs->iEdx;
sl@0
   208
	aContext.iEsp = tsxs->iEsp3;
sl@0
   209
	aContext.iSs = tsxs->iSs3;
sl@0
   210
	aContext.iEbp = tsxs->iEbp;
sl@0
   211
	aContext.iEsi = tsxs->iEsi;
sl@0
   212
	aContext.iEdi = tsxs->iEdi;
sl@0
   213
	aContext.iCs = tsxs->iCs;
sl@0
   214
	aContext.iDs = tsxs->iDs;
sl@0
   215
	aContext.iEs = tsxs->iEs;
sl@0
   216
	aContext.iFs = tsxs->iFs;
sl@0
   217
	aContext.iGs = tsxs->iGs;
sl@0
   218
	aContext.iEflags = tsxs->iEflags;
sl@0
   219
	aContext.iEip = tsxs->iEip;
sl@0
   220
	}
sl@0
   221
sl@0
   222
sl@0
   223
// Enter and return with kernel locked
sl@0
   224
void NThread::GetUserContext(TX86RegSet& aContext, TUint32& aAvailRegistersMask)
sl@0
   225
	{
sl@0
   226
	NThread* pC = NCurrentThreadL();
sl@0
   227
	TSubScheduler* ss = 0;
sl@0
   228
	if (pC != this)
sl@0
   229
		{
sl@0
   230
		AcqSLock();
sl@0
   231
		if (iWaitState.ThreadIsDead())
sl@0
   232
			{
sl@0
   233
			RelSLock();
sl@0
   234
			aAvailRegistersMask = 0;
sl@0
   235
			return;
sl@0
   236
			}
sl@0
   237
		if (iReady && iParent->iReady)
sl@0
   238
			{
sl@0
   239
			ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   240
			ss->iReadyListLock.LockOnly();
sl@0
   241
			}
sl@0
   242
		if (iCurrent)
sl@0
   243
			{
sl@0
   244
			// thread is actually running on another CPU
sl@0
   245
			// interrupt that CPU and wait for it to enter interrupt mode
sl@0
   246
			// this allows a snapshot of the thread user state to be observed
sl@0
   247
			// and ensures the thread cannot return to user mode
sl@0
   248
			send_resched_ipi_and_wait(iLastCpu);
sl@0
   249
			}
sl@0
   250
		}
sl@0
   251
	TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
sl@0
   252
	if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u)	// if not, thread never entered user mode
sl@0
   253
		{
sl@0
   254
		if (stack[-7] == 0x21)	// slow exec
sl@0
   255
			GetContextAfterSlowExec(aContext, ((SThreadSlowExecStack*)stack)-1, aAvailRegistersMask);
sl@0
   256
		else
sl@0
   257
			GetContextAfterExc(aContext, ((SThreadExcStack*)stack)-1, aAvailRegistersMask, FALSE);
sl@0
   258
		}
sl@0
   259
	if (pC != this)
sl@0
   260
		{
sl@0
   261
		if (ss)
sl@0
   262
			ss->iReadyListLock.UnlockOnly();
sl@0
   263
		RelSLock();
sl@0
   264
		}
sl@0
   265
	}
sl@0
   266
sl@0
   267
class TGetContextIPI : public TGenericIPI
sl@0
   268
	{
sl@0
   269
public:
sl@0
   270
	void Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegistersMask);
sl@0
   271
	static void Isr(TGenericIPI*);
sl@0
   272
public:
sl@0
   273
	TX86RegSet* iContext;
sl@0
   274
	TUint32* iAvailRegsMask;
sl@0
   275
	};
sl@0
   276
sl@0
   277
void TGetContextIPI::Isr(TGenericIPI* aPtr)
sl@0
   278
	{
sl@0
   279
	TGetContextIPI& ipi = *(TGetContextIPI*)aPtr;
sl@0
   280
	TX86RegSet& a = *ipi.iContext;
sl@0
   281
	TSubScheduler& ss = SubScheduler();
sl@0
   282
	TUint32* irqstack = (TUint32*)ss.i_IrqStackTop;
sl@0
   283
	SThreadExcStack* txs = (SThreadExcStack*)irqstack[-1];	// first word pushed on IRQ stack points to thread supervisor stack
sl@0
   284
	GetContextAfterExc(a, txs, *ipi.iAvailRegsMask, TRUE);
sl@0
   285
	}
sl@0
   286
sl@0
   287
void TGetContextIPI::Get(TInt aCpu, TX86RegSet& aContext, TUint32& aAvailRegsMask)
sl@0
   288
	{
sl@0
   289
	iContext = &aContext;
sl@0
   290
	iAvailRegsMask = &aAvailRegsMask;
sl@0
   291
	Queue(&Isr, 1u<<aCpu);
sl@0
   292
	WaitCompletion();
sl@0
   293
	}
sl@0
   294
sl@0
   295
// Enter and return with kernel locked
sl@0
   296
void NThread::GetSystemContext(TX86RegSet& aContext, TUint32& aAvailRegsMask)
sl@0
   297
	{
sl@0
   298
	aAvailRegsMask = 0;
sl@0
   299
	NThread* pC = NCurrentThreadL();
sl@0
   300
	__NK_ASSERT_ALWAYS(pC!=this);
sl@0
   301
	TSubScheduler* ss = 0;
sl@0
   302
	AcqSLock();
sl@0
   303
	if (iWaitState.ThreadIsDead())
sl@0
   304
		{
sl@0
   305
		RelSLock();
sl@0
   306
		return;
sl@0
   307
		}
sl@0
   308
	if (iReady && iParent->iReady)
sl@0
   309
		{
sl@0
   310
		ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   311
		ss->iReadyListLock.LockOnly();
sl@0
   312
		}
sl@0
   313
	if (iCurrent)
sl@0
   314
		{
sl@0
   315
		// thread is actually running on another CPU
sl@0
   316
		// use an interprocessor interrupt to get a snapshot of the state
sl@0
   317
		TGetContextIPI ipi;
sl@0
   318
		ipi.Get(iLastCpu, aContext, aAvailRegsMask);
sl@0
   319
		}
sl@0
   320
	else
sl@0
   321
		{
sl@0
   322
		// thread is not running and can't start
sl@0
   323
		SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP;
sl@0
   324
		TUint32 kct = trs->iReason;
sl@0
   325
		TLinAddr sp = TLinAddr(trs+1);
sl@0
   326
		TUint32* stack = (TUint32*)sp;
sl@0
   327
		switch (kct)
sl@0
   328
			{
sl@0
   329
			case 0:	// thread not yet started
sl@0
   330
				{
sl@0
   331
				aContext.iEcx = stack[0];
sl@0
   332
				aContext.iEdx = stack[1];
sl@0
   333
				aContext.iEbx = stack[2];
sl@0
   334
				aContext.iEsi = stack[3];
sl@0
   335
				aContext.iEdi = stack[4];
sl@0
   336
				aContext.iEbp = stack[5];
sl@0
   337
				aContext.iEax = stack[6];
sl@0
   338
				aContext.iDs = stack[7];
sl@0
   339
				aContext.iEs = stack[8];
sl@0
   340
				aContext.iFs = stack[9];
sl@0
   341
				aContext.iGs = stack[10];
sl@0
   342
				aContext.iEsp = sp + 40 - 8;	// entry to initial function
sl@0
   343
				aContext.iEip = aContext.iEax;
sl@0
   344
				aContext.iEflags = 0x41202;		// guess
sl@0
   345
				aContext.iCs = KRing0CS;
sl@0
   346
				aContext.iSs = KRing0DS;
sl@0
   347
				aAvailRegsMask = 0x9effu;
sl@0
   348
				break;
sl@0
   349
				}
sl@0
   350
			case 1:	// unlock
sl@0
   351
				{
sl@0
   352
				aContext.iFs = stack[0];
sl@0
   353
				aContext.iGs = stack[1];
sl@0
   354
				aContext.iEbx = stack[2];
sl@0
   355
				aContext.iEbp = stack[3];
sl@0
   356
				aContext.iEdi = stack[4];
sl@0
   357
				aContext.iEsi = stack[5];
sl@0
   358
				aContext.iEip = stack[6];	// return address from NKern::Unlock()
sl@0
   359
				aContext.iCs = KRing0CS;
sl@0
   360
				aContext.iDs = KRing0DS;
sl@0
   361
				aContext.iEs = KRing0DS;
sl@0
   362
				aContext.iSs = KRing0DS;
sl@0
   363
				aContext.iEsp = sp + 28;	// ESP after return from NKern::Unlock()
sl@0
   364
				aContext.iEax = 0;	// unknown
sl@0
   365
				aContext.iEcx = 0;	// unknown
sl@0
   366
				aContext.iEdx = 0;	// unknown
sl@0
   367
				aContext.iEflags = 0x41202;	// guess
sl@0
   368
				aAvailRegsMask =0x98f2u;	// EIP,GS,FS,EDI,ESI,EBP,ESP,EBX available, others guessed or unavailable
sl@0
   369
				break;
sl@0
   370
				}
sl@0
   371
			case 2:	// IRQ
sl@0
   372
				{
sl@0
   373
				GetContextAfterExc(aContext, (SThreadExcStack*)sp, aAvailRegsMask, TRUE);
sl@0
   374
				break;
sl@0
   375
				}
sl@0
   376
			default:	// unknown reschedule reason
sl@0
   377
				__NK_ASSERT_ALWAYS(0);
sl@0
   378
			}
sl@0
   379
		}
sl@0
   380
	if (ss)
sl@0
   381
		ss->iReadyListLock.UnlockOnly();
sl@0
   382
	RelSLock();
sl@0
   383
	}
sl@0
   384
sl@0
   385
// Enter and return with kernel locked
sl@0
   386
void NThread::SetUserContext(const TX86RegSet& aContext, TUint32& aRegMask)
sl@0
   387
	{
sl@0
   388
	NThread* pC = NCurrentThreadL();
sl@0
   389
	TSubScheduler* ss = 0;
sl@0
   390
	if (pC != this)
sl@0
   391
		{
sl@0
   392
		AcqSLock();
sl@0
   393
		if (iWaitState.ThreadIsDead())
sl@0
   394
			{
sl@0
   395
			RelSLock();
sl@0
   396
			aRegMask = 0;
sl@0
   397
			return;
sl@0
   398
			}
sl@0
   399
		if (iReady && iParent->iReady)
sl@0
   400
			{
sl@0
   401
			ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   402
			ss->iReadyListLock.LockOnly();
sl@0
   403
			}
sl@0
   404
		if (iCurrent)
sl@0
   405
			{
sl@0
   406
			// thread is actually running on another CPU
sl@0
   407
			// interrupt that CPU and wait for it to enter interrupt mode
sl@0
   408
			// this allows a snapshot of the thread user state to be observed
sl@0
   409
			// and ensures the thread cannot return to user mode
sl@0
   410
			send_resched_ipi_and_wait(iLastCpu);
sl@0
   411
			}
sl@0
   412
		}
sl@0
   413
	TUint32* stack = (TUint32*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
sl@0
   414
	SThreadExcStack* txs = 0;
sl@0
   415
	SThreadSlowExecStack* tsxs = 0;
sl@0
   416
	aRegMask &= 0xffffu;
sl@0
   417
	if (stack[-1]!=0xFFFFFFFFu && stack[-2]!=0xFFFFFFFFu && stack[-7]<0x100u)	// if not, thread never entered user mode
sl@0
   418
		{
sl@0
   419
		if (stack[-7] == 0x21)	// slow exec
sl@0
   420
			tsxs = ((SThreadSlowExecStack*)stack)-1;
sl@0
   421
		else
sl@0
   422
			txs = ((SThreadExcStack*)stack)-1;
sl@0
   423
sl@0
   424
#define WRITE_REG(reg, value)	\
sl@0
   425
			{ if (tsxs) tsxs->reg=(value); else txs->reg=(value); }
sl@0
   426
sl@0
   427
		if (aRegMask & 0x0001u)
sl@0
   428
			WRITE_REG(iEax, aContext.iEax);
sl@0
   429
		if (aRegMask & 0x0002u)
sl@0
   430
			WRITE_REG(iEbx, aContext.iEbx);
sl@0
   431
		if (aRegMask & 0x0004u)
sl@0
   432
			{
sl@0
   433
			// don't allow write to iEcx if in slow exec since this may conflict
sl@0
   434
			// with handle preprocessing
sl@0
   435
			if (tsxs)
sl@0
   436
				aRegMask &= ~0x0004u;
sl@0
   437
			else
sl@0
   438
				txs->iEcx = aContext.iEcx;
sl@0
   439
			}
sl@0
   440
		if (aRegMask & 0x0008u)
sl@0
   441
			WRITE_REG(iEdx, aContext.iEdx);
sl@0
   442
		if (aRegMask & 0x0010u)
sl@0
   443
			WRITE_REG(iEsp3, aContext.iEsp);
sl@0
   444
		if (aRegMask & 0x0020u)
sl@0
   445
			WRITE_REG(iEbp, aContext.iEbp);
sl@0
   446
		if (aRegMask & 0x0040u)
sl@0
   447
			WRITE_REG(iEsi, aContext.iEsi);
sl@0
   448
		if (aRegMask & 0x0080u)
sl@0
   449
			WRITE_REG(iEdi, aContext.iEdi);
sl@0
   450
		if (aRegMask & 0x0100u)
sl@0
   451
			WRITE_REG(iCs, aContext.iCs|3);
sl@0
   452
		if (aRegMask & 0x0200u)
sl@0
   453
			WRITE_REG(iDs, aContext.iDs|3);
sl@0
   454
		if (aRegMask & 0x0400u)
sl@0
   455
			WRITE_REG(iEs, aContext.iEs|3);
sl@0
   456
		if (aRegMask & 0x0800u)
sl@0
   457
			WRITE_REG(iFs, aContext.iFs|3);
sl@0
   458
		if (aRegMask & 0x1000u)
sl@0
   459
			WRITE_REG(iGs, aContext.iGs|3);
sl@0
   460
		if (aRegMask & 0x2000u)
sl@0
   461
			WRITE_REG(iSs3, aContext.iSs|3);
sl@0
   462
		if (aRegMask & 0x4000u)
sl@0
   463
			WRITE_REG(iEflags, aContext.iEflags);
sl@0
   464
		if (aRegMask & 0x8000u)
sl@0
   465
			WRITE_REG(iEip, aContext.iEip);
sl@0
   466
		}
sl@0
   467
	else
sl@0
   468
		aRegMask = 0;
sl@0
   469
	if (pC != this)
sl@0
   470
		{
sl@0
   471
		if (ss)
sl@0
   472
			ss->iReadyListLock.UnlockOnly();
sl@0
   473
		RelSLock();
sl@0
   474
		}
sl@0
   475
	}
sl@0
   476
sl@0
   477
/** Get (subset of) user context of specified thread.
sl@0
   478
sl@0
   479
	The nanokernel does not systematically save all registers in the supervisor
sl@0
   480
	stack on entry into privileged mode and the exact subset depends on why the
sl@0
   481
	switch to privileged mode occured.  So in general only a subset of the
sl@0
   482
	register set is available.
sl@0
   483
sl@0
   484
	@param aThread	Thread to inspect.  It can be the current thread or a
sl@0
   485
	non-current one.
sl@0
   486
sl@0
   487
	@param aContext	Pointer to TX86RegSet structure where the context is
sl@0
   488
	copied.
sl@0
   489
sl@0
   490
	@param aAvailRegistersMask Bit mask telling which subset of the context is
sl@0
   491
	available and has been copied to aContext (1: register available / 0: not
sl@0
   492
	available). Bits represent fields in TX86RegSet, i.e.
sl@0
   493
	0:EAX	1:EBX	2:ECX	3:EDX	4:ESP	5:EBP	6:ESI	7:EDI
sl@0
   494
	8:CS	9:DS	10:ES	11:FS	12:GS	13:SS	14:EFLAGS 15:EIP
sl@0
   495
sl@0
   496
	@see TX86RegSet
sl@0
   497
	@see ThreadSetUserContext
sl@0
   498
sl@0
   499
	@pre Call in a thread context.
sl@0
   500
	@pre Interrupts must be enabled.
sl@0
   501
 */
sl@0
   502
EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
sl@0
   503
	{
sl@0
   504
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext");
sl@0
   505
	TX86RegSet& a = *(TX86RegSet*)aContext;
sl@0
   506
	memclr(aContext, sizeof(TX86RegSet));
sl@0
   507
	NKern::Lock();
sl@0
   508
	aThread->GetUserContext(a, aAvailRegistersMask);
sl@0
   509
	NKern::Unlock();
sl@0
   510
	}
sl@0
   511
sl@0
   512
sl@0
   513
/** Get (subset of) system context of specified thread.
sl@0
   514
  
sl@0
   515
	@param aThread	Thread to inspect.  It can be the current thread or a
sl@0
   516
	non-current one.
sl@0
   517
sl@0
   518
	@param aContext	Pointer to TX86RegSet structure where the context is
sl@0
   519
	copied.
sl@0
   520
sl@0
   521
	@param aAvailRegistersMask Bit mask telling which subset of the context is
sl@0
   522
	available and has been copied to aContext (1: register available / 0: not
sl@0
   523
	available). Bits represent fields in TX86RegSet, i.e.
sl@0
   524
	0:EAX	1:EBX	2:ECX	3:EDX	4:ESP	5:EBP	6:ESI	7:EDI
sl@0
   525
	8:CS	9:DS	10:ES	11:FS	12:GS	13:SS	14:EFLAGS 15:EIP
sl@0
   526
sl@0
   527
	@see TX86RegSet
sl@0
   528
	@see ThreadGetUserContext
sl@0
   529
sl@0
   530
	@pre Call in a thread context.
sl@0
   531
	@pre Interrupts must be enabled.
sl@0
   532
 */
sl@0
   533
EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
sl@0
   534
	{
sl@0
   535
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext");
sl@0
   536
	TX86RegSet& a = *(TX86RegSet*)aContext;
sl@0
   537
	memclr(aContext, sizeof(TX86RegSet));
sl@0
   538
	NKern::Lock();
sl@0
   539
	aThread->GetSystemContext(a, aAvailRegistersMask);
sl@0
   540
	NKern::Unlock();
sl@0
   541
	}
sl@0
   542
sl@0
   543
sl@0
   544
/** Set (subset of) user context of specified thread.
sl@0
   545
sl@0
   546
	@param aThread	Thread to modify.  It can be the current thread or a
sl@0
   547
	non-current one.
sl@0
   548
sl@0
   549
	@param aContext	Pointer to TX86RegSet structure containing the context
sl@0
   550
	to set.  The values of registers which aren't part of the context saved
sl@0
   551
	on the supervisor stack are ignored.
sl@0
   552
sl@0
   553
	@see TX86RegSet
sl@0
   554
	@see ThreadGetUserContext
sl@0
   555
sl@0
   556
  	@pre Call in a thread context.
sl@0
   557
	@pre Interrupts must be enabled.
sl@0
   558
 */
sl@0
   559
EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext)
sl@0
   560
	{
sl@0
   561
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext");
sl@0
   562
	TX86RegSet& a = *(TX86RegSet*)aContext;
sl@0
   563
	TUint32 mask = 0xffffu;
sl@0
   564
	NKern::Lock();
sl@0
   565
	aThread->SetUserContext(a, mask);
sl@0
   566
	NKern::Unlock();
sl@0
   567
	}
sl@0
   568
sl@0
   569
sl@0
   570
/** Return the total CPU time so far used by the specified thread.
sl@0
   571
sl@0
   572
	@return The total CPU time in units of 1/NKern::CpuTimeMeasFreq().
sl@0
   573
*/
sl@0
   574
EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread)
sl@0
   575
	{
sl@0
   576
	TSubScheduler* ss = 0;
sl@0
   577
	NKern::Lock();
sl@0
   578
	aThread->AcqSLock();
sl@0
   579
	if (aThread->i_NThread_Initial)
sl@0
   580
		ss = &TheSubSchedulers[aThread->iLastCpu];
sl@0
   581
	else if (aThread->iReady && aThread->iParent->iReady)
sl@0
   582
		ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask];
sl@0
   583
	if (ss)
sl@0
   584
		ss->iReadyListLock.LockOnly();
sl@0
   585
	TUint64 t = aThread->iTotalCpuTime64;
sl@0
   586
	if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread))
sl@0
   587
		t += (NKern::Timestamp() - ss->iLastTimestamp64);
sl@0
   588
	if (ss)
sl@0
   589
		ss->iReadyListLock.UnlockOnly();
sl@0
   590
	aThread->RelSLock();
sl@0
   591
	NKern::Unlock();
sl@0
   592
	return t;
sl@0
   593
	}
sl@0
   594
sl@0
   595
extern "C" void __fastcall add_dfc(TDfc* aDfc)
sl@0
   596
	{
sl@0
   597
	aDfc->Add();
sl@0
   598
	}
sl@0
   599
sl@0
   600
sl@0
   601
TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
sl@0
   602
	{
sl@0
   603
	__e32_memory_barrier();
sl@0
   604
	if (aCallback->iNext != KUserModeCallbackUnqueued)
sl@0
   605
		return KErrInUse;
sl@0
   606
	TInt result = KErrDied;
sl@0
   607
	NKern::Lock();
sl@0
   608
	TUserModeCallback* listHead = aThread->iUserModeCallbacks;
sl@0
   609
	do	{
sl@0
   610
		if (TLinAddr(listHead) & 3)
sl@0
   611
			goto done;	// thread exiting
sl@0
   612
		aCallback->iNext = listHead;
sl@0
   613
		} while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback));
sl@0
   614
	result = KErrNone;
sl@0
   615
sl@0
   616
	if (!listHead)	// if this isn't first callback someone else will have done this bit
sl@0
   617
		{
sl@0
   618
		/*
sl@0
   619
		 * If aThread is currently running on another CPU we need to send an IPI so
sl@0
   620
		 * that it will enter kernel mode and run the callback.
sl@0
   621
		 * The synchronization is tricky here. We want to check if the thread is
sl@0
   622
		 * running and if so on which core. We need to avoid any possibility of
sl@0
   623
		 * the thread entering user mode without having seen the callback,
sl@0
   624
		 * either because we thought it wasn't running so didn't send an IPI or
sl@0
   625
		 * because the thread migrated after we looked and we sent the IPI to
sl@0
   626
		 * the wrong processor. Sending a redundant IPI is not a problem (e.g.
sl@0
   627
		 * because the thread is running in kernel mode - which we can't tell -
sl@0
   628
		 * or because the thread stopped running after we looked)
sl@0
   629
		 * The following events are significant:
sl@0
   630
		 * Event A:	Target thread writes to iCurrent when it starts running
sl@0
   631
		 * Event B: Target thread reads iUserModeCallbacks before entering user
sl@0
   632
		 *			mode
sl@0
   633
		 * Event C: This thread writes to iUserModeCallbacks
sl@0
   634
		 * Event D: This thread reads iCurrent to check if aThread is running
sl@0
   635
		 * There is a barrier between A and B since A occurs with the ready
sl@0
   636
		 * list lock for the CPU involved or the thread lock for aThread held
sl@0
   637
		 * and this lock is released before B occurs.
sl@0
   638
		 * There is a barrier between C and D (__e32_atomic_cas_ord_ptr).
sl@0
   639
		 * Any observer which observes B must also have observed A.
sl@0
   640
		 * Any observer which observes D must also have observed C.
sl@0
   641
		 * If aThread observes B before C (i.e. enters user mode without running
sl@0
   642
		 * the callback) it must observe A before C and so it must also observe
sl@0
   643
		 * A before D (i.e. D reads the correct value for iCurrent).
sl@0
   644
		 */
sl@0
   645
		TInt current = aThread->iCurrent;
sl@0
   646
		if (current)
sl@0
   647
			{
sl@0
   648
			TInt cpu = current & NSchedulable::EReadyCpuMask;
sl@0
   649
			if (cpu != NKern::CurrentCpu())
sl@0
   650
				send_resched_ipi(cpu);
sl@0
   651
			}
sl@0
   652
		}
sl@0
   653
done:
sl@0
   654
	NKern::Unlock();
sl@0
   655
	return result;
sl@0
   656
	}
sl@0
   657
sl@0
   658