os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncthrd.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\arm\ncthrd.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
// NThreadBase member data
sl@0
    19
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    20
sl@0
    21
#define __INCLUDE_REG_OFFSETS__
sl@0
    22
#include <arm.h>
sl@0
    23
#include <arm_gic.h>
sl@0
    24
#include <arm_scu.h>
sl@0
    25
#include <arm_tmr.h>
sl@0
    26
#include <nk_irq.h>
sl@0
    27
sl@0
    28
const TInt KNThreadMinStackSize = 0x100;	// needs to be enough for interrupt + reschedule stack
sl@0
    29
sl@0
    30
// Called by a thread when it first runs
sl@0
    31
extern "C" void __StartThread();
sl@0
    32
sl@0
    33
// Initialise CPU registers
sl@0
    34
extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
sl@0
    35
sl@0
    36
extern "C" void ExcFault(TAny*);
sl@0
    37
sl@0
    38
extern TUint32 __mpid();
sl@0
    39
extern void InitAPTimestamp(SNThreadCreateInfo& aInfo);
sl@0
    40
sl@0
    41
TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
sl@0
    42
	{
sl@0
    43
	// Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
sl@0
    44
	__NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
sl@0
    45
	__NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
sl@0
    46
	TInt cpu = -1;
sl@0
    47
	new (this) NThread;
sl@0
    48
	if (aInitial)
sl@0
    49
		{
sl@0
    50
		cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1);
sl@0
    51
		aInfo.iCpuAffinity = cpu;
sl@0
    52
		// OK since we can't migrate yet
sl@0
    53
		TSubScheduler& ss = TheSubSchedulers[cpu];
sl@0
    54
		ss.iCurrentThread = this;
sl@0
    55
		iRunCount64 = UI64LIT(1);
sl@0
    56
		__KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d ss=%08x", cpu, &ss));
sl@0
    57
		if (cpu)
sl@0
    58
			{
sl@0
    59
			initialiseState(cpu,&ss);
sl@0
    60
sl@0
    61
			ArmLocalTimer& T = LOCAL_TIMER;
sl@0
    62
			T.iWatchdogDisable = E_ArmTmrWDD_1;
sl@0
    63
			T.iWatchdogDisable = E_ArmTmrWDD_2;
sl@0
    64
			T.iTimerCtrl = 0;
sl@0
    65
			T.iTimerIntStatus = E_ArmTmrIntStatus_Event;
sl@0
    66
			T.iWatchdogCtrl = 0;
sl@0
    67
			T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event;
sl@0
    68
sl@0
    69
			NIrq::HwInit2AP();
sl@0
    70
			T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
sl@0
    71
sl@0
    72
			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu);
sl@0
    73
			__e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu);
sl@0
    74
			__e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu);
sl@0
    75
			__KTRACE_OPT(KBOOT,DEBUGPRINT("AP MPID=%08x",__mpid()));
sl@0
    76
			}
sl@0
    77
		else
sl@0
    78
			{
sl@0
    79
			Arm::DefaultDomainAccess = Arm::Dacr();
sl@0
    80
			Arm::ModifyCar(0, 0x00f00000);		// full access to CP10, CP11
sl@0
    81
			Arm::DefaultCoprocessorAccess = Arm::Car();
sl@0
    82
			}
sl@0
    83
		}
sl@0
    84
	TInt r=NThreadBase::Create(aInfo,aInitial);
sl@0
    85
	if (r!=KErrNone)
sl@0
    86
		return r;
sl@0
    87
	if (!aInitial)
sl@0
    88
		{
sl@0
    89
		aInfo.iPriority = 0;
sl@0
    90
		TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize;
sl@0
    91
		TLinAddr sp = stack_top;
sl@0
    92
		TUint32 pb = (TUint32)aInfo.iParameterBlock;
sl@0
    93
		SThreadStackStub* tss = 0;
sl@0
    94
		if (aInfo.iParameterBlockSize)
sl@0
    95
			{
sl@0
    96
			tss = (SThreadStackStub*)stack_top;
sl@0
    97
			--tss;
sl@0
    98
			tss->iExcCode = SThreadExcStack::EStub;
sl@0
    99
			tss->iR15 = 0;
sl@0
   100
			tss->iCPSR = 0;
sl@0
   101
			sp = (TLinAddr)tss;
sl@0
   102
			sp -= (TLinAddr)aInfo.iParameterBlockSize;
sl@0
   103
			wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize);
sl@0
   104
			pb = (TUint32)sp;
sl@0
   105
			tss->iPBlock = sp;
sl@0
   106
			}
sl@0
   107
		SThreadInitStack* tis = (SThreadInitStack*)sp;
sl@0
   108
		--tis;
sl@0
   109
		memclr(tis, sizeof(SThreadInitStack));
sl@0
   110
		iSavedSP = (TLinAddr)tis;
sl@0
   111
#ifdef __CPU_HAS_VFP
sl@0
   112
		tis->iR.iFpExc = VFP_FPEXC_THRD_INIT;
sl@0
   113
#endif
sl@0
   114
		tis->iR.iCar = Arm::DefaultCoprocessorAccess;
sl@0
   115
		tis->iR.iDacr = Arm::DefaultDomainAccess;
sl@0
   116
		tis->iR.iSpsrSvc = MODE_SVC;
sl@0
   117
		tis->iR.iSPRschdFlg = TLinAddr(&tis->iX) | 1;
sl@0
   118
		tis->iR.iR15 = (TUint32)&__StartThread;
sl@0
   119
sl@0
   120
		tis->iX.iR0 = pb;
sl@0
   121
		tis->iX.iR4 = (TUint32)this;
sl@0
   122
		tis->iX.iR11 = stack_top;
sl@0
   123
		tis->iX.iExcCode = SThreadExcStack::EInit;
sl@0
   124
		tis->iX.iR15 = (TUint32)aInfo.iFunction;
sl@0
   125
		tis->iX.iCPSR = MODE_SVC;
sl@0
   126
		}
sl@0
   127
	else
sl@0
   128
		{
sl@0
   129
		NKern::EnableAllInterrupts();
sl@0
   130
sl@0
   131
		// start local timer
sl@0
   132
		ArmLocalTimer& T = LOCAL_TIMER;
sl@0
   133
		T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
sl@0
   134
sl@0
   135
		// synchronize AP's timestamp with BP's
sl@0
   136
		if (cpu>0)
sl@0
   137
			InitAPTimestamp(aInfo);
sl@0
   138
		}
sl@0
   139
#ifdef BTRACE_THREAD_IDENTIFICATION
sl@0
   140
	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
sl@0
   141
#endif
sl@0
   142
	return KErrNone;
sl@0
   143
	}
sl@0
   144
sl@0
   145
sl@0
   146
/**	Called from generic layer when thread is killed asynchronously.
sl@0
   147
sl@0
   148
	For ARM, save reason for last user->kernel switch (if any) so that user
sl@0
   149
	context can be accessed from EDebugEventRemoveThread hook.  Must be done
sl@0
   150
	before forcing the thread to exit as this alters the saved return address
sl@0
   151
	which is used to figure out where the context is saved.
sl@0
   152
sl@0
   153
	@pre kernel locked
sl@0
   154
	@post kernel locked
sl@0
   155
 */
sl@0
   156
void NThreadBase::OnKill()
sl@0
   157
	{
sl@0
   158
	}
sl@0
   159
sl@0
   160
/**	Called from generic layer when thread exits.
sl@0
   161
sl@0
   162
	For ARM, save that if the thread terminates synchronously the last
sl@0
   163
	user->kernel switch was an exec call.  Do nothing if non-user thread or
sl@0
   164
	reason already saved in OnKill().
sl@0
   165
sl@0
   166
	@pre kernel locked
sl@0
   167
	@post kernel locked
sl@0
   168
	@see OnKill
sl@0
   169
 */
sl@0
   170
void NThreadBase::OnExit()
sl@0
   171
	{
sl@0
   172
	}
sl@0
   173
sl@0
   174
sl@0
   175
void DumpExcInfo(TArmExcInfo& a)
sl@0
   176
	{
sl@0
   177
	DEBUGPRINT("Exc %1d Cpsr=%08x FAR=%08x FSR=%08x",a.iExcCode,a.iCpsr,a.iFaultAddress,a.iFaultStatus);
sl@0
   178
	DEBUGPRINT(" R0=%08x  R1=%08x  R2=%08x  R3=%08x",a.iR0,a.iR1,a.iR2,a.iR3);
sl@0
   179
	DEBUGPRINT(" R4=%08x  R5=%08x  R6=%08x  R7=%08x",a.iR4,a.iR5,a.iR6,a.iR7);
sl@0
   180
	DEBUGPRINT(" R8=%08x  R9=%08x R10=%08x R11=%08x",a.iR8,a.iR9,a.iR10,a.iR11);
sl@0
   181
	DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x",a.iR12,a.iR13,a.iR14,a.iR15);
sl@0
   182
	DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
sl@0
   183
sl@0
   184
	TInt irq = NKern::DisableAllInterrupts();
sl@0
   185
	TSubScheduler& ss = SubScheduler();
sl@0
   186
	NThreadBase* ct = ss.iCurrentThread;
sl@0
   187
	TInt inc = TInt(ss.i_IrqNestCount);
sl@0
   188
	TInt cpu = ss.iCpuNum;
sl@0
   189
	TInt klc = ss.iKernLockCount;
sl@0
   190
	NKern::RestoreInterrupts(irq);
sl@0
   191
	DEBUGPRINT("Thread %T, CPU %d, KLCount=%d, IrqNest=%d", ct, cpu, klc, inc);
sl@0
   192
	}
sl@0
   193
sl@0
   194
void DumpFullRegSet(SFullArmRegSet& a)
sl@0
   195
	{
sl@0
   196
	SNormalRegs& r = a.iN;
sl@0
   197
	DEBUGPRINT("MODE_USR:");
sl@0
   198
	DEBUGPRINT(" R0=%08x  R1=%08x  R2=%08x  R3=%08x", r.iR0,  r.iR1,  r.iR2,  r.iR3);
sl@0
   199
	DEBUGPRINT(" R4=%08x  R5=%08x  R6=%08x  R7=%08x", r.iR4,  r.iR5,  r.iR6,  r.iR7);
sl@0
   200
	DEBUGPRINT(" R8=%08x  R9=%08x R10=%08x R11=%08x", r.iR8,  r.iR9,  r.iR10, r.iR11);
sl@0
   201
	DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x", r.iR12, r.iR13, r.iR14, r.iR15);
sl@0
   202
	DEBUGPRINT("CPSR=%08x", r.iFlags);
sl@0
   203
	DEBUGPRINT("MODE_FIQ:");
sl@0
   204
	DEBUGPRINT(" R8=%08x  R9=%08x R10=%08x R11=%08x",  r.iR8Fiq,  r.iR9Fiq,  r.iR10Fiq, r.iR11Fiq);
sl@0
   205
	DEBUGPRINT("R12=%08x R13=%08x R14=%08x SPSR=%08x", r.iR12Fiq, r.iR13Fiq, r.iR14Fiq, r.iSpsrFiq);
sl@0
   206
	DEBUGPRINT("MODE_IRQ:");
sl@0
   207
	DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Irq, r.iR14Irq, r.iSpsrIrq);
sl@0
   208
	DEBUGPRINT("MODE_SVC:");
sl@0
   209
	DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Svc, r.iR14Svc, r.iSpsrSvc);
sl@0
   210
	DEBUGPRINT("MODE_ABT:");
sl@0
   211
	DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Abt, r.iR14Abt, r.iSpsrAbt);
sl@0
   212
	DEBUGPRINT("MODE_UND:");
sl@0
   213
	DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Und, r.iR14Und, r.iSpsrUnd);
sl@0
   214
//	DEBUGPRINT("MODE_MON:");
sl@0
   215
//	DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Mon, r.iR14Mon, r.iSpsrMon);
sl@0
   216
sl@0
   217
	SAuxiliaryRegs& aux = a.iA;
sl@0
   218
	DEBUGPRINT("TEEHBR=%08x  CPACR=%08x", aux.iTEEHBR, aux.iCPACR);
sl@0
   219
sl@0
   220
	SBankedRegs& b = a.iB[0];
sl@0
   221
	DEBUGPRINT(" SCTLR=%08x  ACTLR=%08x   PRRR=%08x   NMRR=%08x", b.iSCTLR, b.iACTLR, b.iPRRR, b.iNMRR);
sl@0
   222
	DEBUGPRINT("  DACR=%08x  TTBR0=%08x  TTBR1=%08x  TTBCR=%08x", b.iDACR, b.iTTBR0, b.iTTBR1, b.iTTBCR);
sl@0
   223
	DEBUGPRINT("  VBAR=%08x FCSEID=%08x CTXIDR=%08x", b.iVBAR, b.iFCSEIDR, b.iCTXIDR);
sl@0
   224
	DEBUGPRINT("Thread ID     RWRW=%08x   RWRO=%08x   RWNO=%08x", b.iRWRWTID, b.iRWROTID, b.iRWNOTID);
sl@0
   225
	DEBUGPRINT("  DFSR=%08x   DFAR=%08x   IFSR=%08x   IFAR=%08x", b.iDFSR, b.iDFAR, b.iIFSR, b.iIFAR);
sl@0
   226
	DEBUGPRINT(" ADFSR=%08x              AIFSR=%08x", b.iADFSR, b.iAIFSR);
sl@0
   227
#ifdef __CPU_HAS_VFP
sl@0
   228
	DEBUGPRINT("FPEXC %08x", a.iMore[0]);
sl@0
   229
#endif
sl@0
   230
	DEBUGPRINT("ExcCode %08x", a.iExcCode);
sl@0
   231
	}
sl@0
   232
sl@0
   233
sl@0
   234
#define CONTEXT_ELEMENT_UNDEFINED(val)							\
sl@0
   235
	{															\
sl@0
   236
	TArmContextElement::EUndefined,								\
sl@0
   237
	val,														\
sl@0
   238
	0,															\
sl@0
   239
	0															\
sl@0
   240
	}
sl@0
   241
sl@0
   242
#define CONTEXT_ELEMENT_EXCEPTION(reg)							\
sl@0
   243
	{															\
sl@0
   244
	TArmContextElement::EOffsetFromStackTop,					\
sl@0
   245
	((sizeof(SThreadExcStack)-_FOFF(SThreadExcStack,reg))>>2),	\
sl@0
   246
	0,															\
sl@0
   247
	0															\
sl@0
   248
	}
sl@0
   249
sl@0
   250
#define CONTEXT_ELEMENT_RESCHED(reg)							\
sl@0
   251
	{															\
sl@0
   252
	TArmContextElement::EOffsetFromSp,							\
sl@0
   253
	(_FOFF(SThreadReschedStack,reg)>>2),						\
sl@0
   254
	0,															\
sl@0
   255
	0															\
sl@0
   256
	}
sl@0
   257
sl@0
   258
#define CONTEXT_ELEMENT_RESCHED_SP()							\
sl@0
   259
	{															\
sl@0
   260
	TArmContextElement::EOffsetFromSpBic3,						\
sl@0
   261
	(_FOFF(SThreadReschedStack,iSPRschdFlg)>>2),				\
sl@0
   262
	0,															\
sl@0
   263
	0															\
sl@0
   264
	}
sl@0
   265
sl@0
   266
#define CONTEXT_ELEMENT_RESCHED_SP_PLUS(offset)					\
sl@0
   267
	{															\
sl@0
   268
	TArmContextElement::EOffsetFromSpBic3_1,					\
sl@0
   269
	(_FOFF(SThreadReschedStack,iSPRschdFlg)>>2),				\
sl@0
   270
	(offset),													\
sl@0
   271
	0															\
sl@0
   272
	}
sl@0
   273
sl@0
   274
#define CONTEXT_ELEMENT_RESCHED_SP_OFFSET(offset)				\
sl@0
   275
	{															\
sl@0
   276
	TArmContextElement::EOffsetFromSpBic3_2,					\
sl@0
   277
	(_FOFF(SThreadReschedStack,iSPRschdFlg)>>2),				\
sl@0
   278
	(offset),													\
sl@0
   279
	0															\
sl@0
   280
	}
sl@0
   281
sl@0
   282
#define CONTEXT_ELEMENT_RESCHED_IRQ(reg)						\
sl@0
   283
	{															\
sl@0
   284
	TArmContextElement::EOffsetFromSpBic3_2,					\
sl@0
   285
	(_FOFF(SThreadReschedStack,iSPRschdFlg)>>2),				\
sl@0
   286
	((_FOFF(SThreadIrqStack,reg)-sizeof(SThreadReschedStack))>>2),	\
sl@0
   287
	0															\
sl@0
   288
	}
sl@0
   289
sl@0
   290
#define CONTEXT_ELEMENT_RESCHED_INIT(reg)						\
sl@0
   291
	{															\
sl@0
   292
	TArmContextElement::EOffsetFromSpBic3_2,					\
sl@0
   293
	(_FOFF(SThreadReschedStack,iSPRschdFlg)>>2),				\
sl@0
   294
	((_FOFF(SThreadInitStack,reg)-sizeof(SThreadReschedStack))>>2),	\
sl@0
   295
	0															\
sl@0
   296
	}
sl@0
   297
sl@0
   298
sl@0
   299
const TArmContextElement ContextTableException[] =
sl@0
   300
	{
sl@0
   301
	CONTEXT_ELEMENT_EXCEPTION(iR0),
sl@0
   302
	CONTEXT_ELEMENT_EXCEPTION(iR1),
sl@0
   303
	CONTEXT_ELEMENT_EXCEPTION(iR2),
sl@0
   304
	CONTEXT_ELEMENT_EXCEPTION(iR3),
sl@0
   305
	CONTEXT_ELEMENT_EXCEPTION(iR4),
sl@0
   306
	CONTEXT_ELEMENT_EXCEPTION(iR5),
sl@0
   307
	CONTEXT_ELEMENT_EXCEPTION(iR6),
sl@0
   308
	CONTEXT_ELEMENT_EXCEPTION(iR7),
sl@0
   309
	CONTEXT_ELEMENT_EXCEPTION(iR8),
sl@0
   310
	CONTEXT_ELEMENT_EXCEPTION(iR9),
sl@0
   311
	CONTEXT_ELEMENT_EXCEPTION(iR10),
sl@0
   312
	CONTEXT_ELEMENT_EXCEPTION(iR11),
sl@0
   313
	CONTEXT_ELEMENT_EXCEPTION(iR12),
sl@0
   314
	CONTEXT_ELEMENT_EXCEPTION(iR13usr),
sl@0
   315
	CONTEXT_ELEMENT_EXCEPTION(iR14usr),
sl@0
   316
	CONTEXT_ELEMENT_EXCEPTION(iR15),
sl@0
   317
	CONTEXT_ELEMENT_EXCEPTION(iCPSR),
sl@0
   318
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   319
	};
sl@0
   320
sl@0
   321
const TArmContextElement ContextTableUndefined[] =
sl@0
   322
	{
sl@0
   323
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   324
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   325
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   326
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   327
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   328
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   329
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   330
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   331
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   332
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   333
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   334
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   335
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   336
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   337
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   338
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   339
	CONTEXT_ELEMENT_UNDEFINED(EUserMode),
sl@0
   340
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   341
	};
sl@0
   342
sl@0
   343
// Table used for non dying threads which have been preempted by an interrupt
sl@0
   344
// while in user mode.  
sl@0
   345
const TArmContextElement ContextTableUserInterrupt[] =
sl@0
   346
	{
sl@0
   347
	CONTEXT_ELEMENT_EXCEPTION(iR0),
sl@0
   348
	CONTEXT_ELEMENT_EXCEPTION(iR1),
sl@0
   349
	CONTEXT_ELEMENT_EXCEPTION(iR2),
sl@0
   350
	CONTEXT_ELEMENT_EXCEPTION(iR3),
sl@0
   351
	CONTEXT_ELEMENT_EXCEPTION(iR4),
sl@0
   352
	CONTEXT_ELEMENT_EXCEPTION(iR5),
sl@0
   353
	CONTEXT_ELEMENT_EXCEPTION(iR6),
sl@0
   354
	CONTEXT_ELEMENT_EXCEPTION(iR7),
sl@0
   355
	CONTEXT_ELEMENT_EXCEPTION(iR8),
sl@0
   356
	CONTEXT_ELEMENT_EXCEPTION(iR9),
sl@0
   357
	CONTEXT_ELEMENT_EXCEPTION(iR10),
sl@0
   358
	CONTEXT_ELEMENT_EXCEPTION(iR11),
sl@0
   359
	CONTEXT_ELEMENT_EXCEPTION(iR12),
sl@0
   360
	CONTEXT_ELEMENT_EXCEPTION(iR13usr),
sl@0
   361
	CONTEXT_ELEMENT_EXCEPTION(iR14usr),
sl@0
   362
	CONTEXT_ELEMENT_EXCEPTION(iR15),
sl@0
   363
	CONTEXT_ELEMENT_EXCEPTION(iCPSR),
sl@0
   364
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   365
	};
sl@0
   366
sl@0
   367
// Table used for threads which have been preempted by an interrupt while in
sl@0
   368
// supervisor mode in the SWI handler either before the return address was
sl@0
   369
// saved or after the registers were restored.
sl@0
   370
const TArmContextElement ContextTableSvsrInterrupt1[] =
sl@0
   371
	{
sl@0
   372
	CONTEXT_ELEMENT_EXCEPTION(iR0),
sl@0
   373
	CONTEXT_ELEMENT_EXCEPTION(iR1),
sl@0
   374
	CONTEXT_ELEMENT_EXCEPTION(iR2),
sl@0
   375
	CONTEXT_ELEMENT_EXCEPTION(iR3),
sl@0
   376
	CONTEXT_ELEMENT_EXCEPTION(iR4),
sl@0
   377
	CONTEXT_ELEMENT_EXCEPTION(iR5),
sl@0
   378
	CONTEXT_ELEMENT_EXCEPTION(iR6),
sl@0
   379
	CONTEXT_ELEMENT_EXCEPTION(iR7),
sl@0
   380
	CONTEXT_ELEMENT_EXCEPTION(iR8),
sl@0
   381
	CONTEXT_ELEMENT_EXCEPTION(iR9),
sl@0
   382
	CONTEXT_ELEMENT_EXCEPTION(iR10),
sl@0
   383
	CONTEXT_ELEMENT_EXCEPTION(iR11),
sl@0
   384
	CONTEXT_ELEMENT_EXCEPTION(iR12),
sl@0
   385
	CONTEXT_ELEMENT_EXCEPTION(iR13usr),
sl@0
   386
	CONTEXT_ELEMENT_EXCEPTION(iR14usr),
sl@0
   387
	CONTEXT_ELEMENT_EXCEPTION(iR15),
sl@0
   388
	CONTEXT_ELEMENT_UNDEFINED(EUserMode),  // can't get flags so just use 'user mode'
sl@0
   389
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   390
	};
sl@0
   391
sl@0
   392
// Table used for non-dying threads blocked on their request semaphore.
sl@0
   393
const TArmContextElement ContextTableWFAR[] =
sl@0
   394
	{
sl@0
   395
	CONTEXT_ELEMENT_EXCEPTION(iR0),
sl@0
   396
	CONTEXT_ELEMENT_EXCEPTION(iR1),
sl@0
   397
	CONTEXT_ELEMENT_EXCEPTION(iR2),
sl@0
   398
	CONTEXT_ELEMENT_EXCEPTION(iR3),
sl@0
   399
	CONTEXT_ELEMENT_EXCEPTION(iR4),
sl@0
   400
	CONTEXT_ELEMENT_EXCEPTION(iR5),
sl@0
   401
	CONTEXT_ELEMENT_EXCEPTION(iR6),
sl@0
   402
	CONTEXT_ELEMENT_EXCEPTION(iR7),
sl@0
   403
	CONTEXT_ELEMENT_EXCEPTION(iR8),
sl@0
   404
	CONTEXT_ELEMENT_EXCEPTION(iR9),
sl@0
   405
	CONTEXT_ELEMENT_EXCEPTION(iR10),
sl@0
   406
	CONTEXT_ELEMENT_EXCEPTION(iR11),
sl@0
   407
	CONTEXT_ELEMENT_EXCEPTION(iR12),
sl@0
   408
	CONTEXT_ELEMENT_EXCEPTION(iR13usr),
sl@0
   409
	CONTEXT_ELEMENT_EXCEPTION(iR14usr),
sl@0
   410
	CONTEXT_ELEMENT_EXCEPTION(iR15),
sl@0
   411
	CONTEXT_ELEMENT_EXCEPTION(iCPSR),
sl@0
   412
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   413
	};
sl@0
   414
sl@0
   415
const TArmContextElement ContextTableExec[] =
sl@0
   416
	{
sl@0
   417
	CONTEXT_ELEMENT_EXCEPTION(iR0),
sl@0
   418
	CONTEXT_ELEMENT_EXCEPTION(iR1),
sl@0
   419
	CONTEXT_ELEMENT_EXCEPTION(iR2),
sl@0
   420
	CONTEXT_ELEMENT_EXCEPTION(iR3),
sl@0
   421
	CONTEXT_ELEMENT_EXCEPTION(iR4),
sl@0
   422
	CONTEXT_ELEMENT_EXCEPTION(iR5),
sl@0
   423
	CONTEXT_ELEMENT_EXCEPTION(iR6),
sl@0
   424
	CONTEXT_ELEMENT_EXCEPTION(iR7),
sl@0
   425
	CONTEXT_ELEMENT_EXCEPTION(iR8),
sl@0
   426
	CONTEXT_ELEMENT_EXCEPTION(iR9),
sl@0
   427
	CONTEXT_ELEMENT_EXCEPTION(iR10),
sl@0
   428
	CONTEXT_ELEMENT_EXCEPTION(iR11),
sl@0
   429
	CONTEXT_ELEMENT_EXCEPTION(iR12),
sl@0
   430
	CONTEXT_ELEMENT_EXCEPTION(iR13usr),
sl@0
   431
	CONTEXT_ELEMENT_EXCEPTION(iR14usr),
sl@0
   432
	CONTEXT_ELEMENT_EXCEPTION(iR15),
sl@0
   433
	CONTEXT_ELEMENT_EXCEPTION(iCPSR),
sl@0
   434
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   435
	};
sl@0
   436
sl@0
   437
// Table used to retrieve a thread's kernel side context at the point where
sl@0
   438
// Reschedule() returns.
sl@0
   439
// Used for kernel threads.
sl@0
   440
const TArmContextElement ContextTableKernel[] =
sl@0
   441
	{
sl@0
   442
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   443
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   444
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   445
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   446
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   447
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   448
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   449
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   450
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   451
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   452
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   453
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   454
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   455
	CONTEXT_ELEMENT_RESCHED_SP(),			// supervisor stack pointer before reschedule
sl@0
   456
	CONTEXT_ELEMENT_UNDEFINED(0),			// supervisor lr is unknown
sl@0
   457
	CONTEXT_ELEMENT_RESCHED(iR15),			// return address from reschedule
sl@0
   458
	CONTEXT_ELEMENT_UNDEFINED(ESvcMode),	// can't get flags so just use 'user mode'
sl@0
   459
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   460
	};
sl@0
   461
sl@0
   462
// Table used to retrieve a thread's kernel side context at the point where
sl@0
   463
// NKern::Unlock() or NKern::PreemptionPoint() returns.
sl@0
   464
// Used for kernel threads.
sl@0
   465
const TArmContextElement ContextTableKernel1[] =
sl@0
   466
	{
sl@0
   467
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   468
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   469
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   470
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   471
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4),
sl@0
   472
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8),
sl@0
   473
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12),
sl@0
   474
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16),
sl@0
   475
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20),
sl@0
   476
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24),
sl@0
   477
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28),
sl@0
   478
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32),
sl@0
   479
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   480
	CONTEXT_ELEMENT_RESCHED_SP_PLUS(40),
sl@0
   481
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
sl@0
   482
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
sl@0
   483
	CONTEXT_ELEMENT_UNDEFINED(ESvcMode),
sl@0
   484
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   485
	};
sl@0
   486
sl@0
   487
// Table used to retrieve a thread's kernel side context at the point where
sl@0
   488
// NKern::FSWait() or NKern::WaitForAnyRequest() returns.
sl@0
   489
// Used for kernel threads.
sl@0
   490
const TArmContextElement ContextTableKernel2[] =
sl@0
   491
	{
sl@0
   492
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   493
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   494
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   495
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   496
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4),
sl@0
   497
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8),
sl@0
   498
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12),
sl@0
   499
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16),
sl@0
   500
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20),
sl@0
   501
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24),
sl@0
   502
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28),
sl@0
   503
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32),
sl@0
   504
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   505
	CONTEXT_ELEMENT_RESCHED_SP_PLUS(40),
sl@0
   506
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
sl@0
   507
	CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
sl@0
   508
	CONTEXT_ELEMENT_UNDEFINED(ESvcMode),
sl@0
   509
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   510
	};
sl@0
   511
sl@0
   512
// Table used to retrieve a thread's kernel side context at the point where
sl@0
   513
// an interrupt taken in supervisor mode returns.
sl@0
   514
// Used for kernel threads.
sl@0
   515
const TArmContextElement ContextTableKernel3[] =
sl@0
   516
	{
sl@0
   517
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR0),
sl@0
   518
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR1),
sl@0
   519
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR2),
sl@0
   520
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR3),
sl@0
   521
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR4),
sl@0
   522
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR5),
sl@0
   523
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR6),
sl@0
   524
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR7),
sl@0
   525
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR8),
sl@0
   526
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR9),
sl@0
   527
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR10),
sl@0
   528
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR11),
sl@0
   529
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR12),
sl@0
   530
	CONTEXT_ELEMENT_RESCHED_SP_PLUS((sizeof(SThreadExcStack)+8)),
sl@0
   531
	CONTEXT_ELEMENT_RESCHED_IRQ(iR14svc),
sl@0
   532
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR15),
sl@0
   533
	CONTEXT_ELEMENT_RESCHED_IRQ(iX.iCPSR),
sl@0
   534
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   535
	};
sl@0
   536
sl@0
   537
// Table used to retrieve a thread's kernel side context at the point where
sl@0
   538
// Exec::WaitForAnyRequest() returns.
sl@0
   539
// Used for kernel threads.
sl@0
   540
const TArmContextElement ContextTableKernel4[] =
sl@0
   541
	{
sl@0
   542
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR0),
sl@0
   543
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR1),
sl@0
   544
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR2),
sl@0
   545
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR3),
sl@0
   546
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR4),
sl@0
   547
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR5),
sl@0
   548
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR6),
sl@0
   549
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR7),
sl@0
   550
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR8),
sl@0
   551
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR9),
sl@0
   552
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR10),
sl@0
   553
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR11),
sl@0
   554
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR12),
sl@0
   555
	CONTEXT_ELEMENT_RESCHED_SP_PLUS(sizeof(SThreadExcStack)),
sl@0
   556
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15),
sl@0
   557
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15),
sl@0
   558
	CONTEXT_ELEMENT_RESCHED_INIT(iX.iCPSR),
sl@0
   559
	CONTEXT_ELEMENT_UNDEFINED(0),
sl@0
   560
	};
sl@0
   561
sl@0
   562
const TArmContextElement* const ThreadUserContextTables[] =
sl@0
   563
	{
sl@0
   564
	ContextTableUndefined,					// EContextNone
sl@0
   565
	ContextTableException,					// EContextException
sl@0
   566
	ContextTableUndefined,					// EContextUndefined
sl@0
   567
	ContextTableUserInterrupt,				// EContextUserInterrupt
sl@0
   568
	ContextTableUndefined,					// EContextUserInterruptDied (not used)
sl@0
   569
	ContextTableSvsrInterrupt1,				// EContextSvsrInterrupt1
sl@0
   570
	ContextTableUndefined,					// EContextSvsrInterrupt1Died (not used)
sl@0
   571
	ContextTableUndefined,					// EContextSvsrInterrupt2 (not used)
sl@0
   572
	ContextTableUndefined,					// EContextSvsrInterrupt2Died (not used)
sl@0
   573
	ContextTableWFAR,						// EContextWFAR
sl@0
   574
	ContextTableUndefined,					// EContextWFARDied (not used)
sl@0
   575
	ContextTableExec,						// EContextExec
sl@0
   576
	ContextTableKernel,						// EContextKernel
sl@0
   577
	ContextTableKernel1,					// EContextKernel1
sl@0
   578
	ContextTableKernel2,					// EContextKernel2
sl@0
   579
	ContextTableKernel3,					// EContextKernel3
sl@0
   580
	ContextTableKernel4,					// EContextKernel4
sl@0
   581
	0 // Null terminated
sl@0
   582
	};
sl@0
   583
sl@0
   584
/**  Return table of pointers to user context tables.
sl@0
   585
sl@0
   586
	Each user context table is an array of TArmContextElement objects, one per
sl@0
   587
	ARM CPU register, in the order defined in TArmRegisters.
sl@0
   588
sl@0
   589
	The master table contains pointers to the user context tables in the order
sl@0
   590
	defined in TUserContextType.  There are as many user context tables as
sl@0
   591
	scenarii leading a user thread to switch to privileged mode.
sl@0
   592
sl@0
   593
	Stop-mode debug agents should use this function to store the address of the
sl@0
   594
	master table at a location known to the host debugger.  Run-mode debug
sl@0
   595
	agents are advised to use NKern::GetUserContext() and
sl@0
   596
	NKern::SetUserContext() instead.
sl@0
   597
sl@0
   598
	@return A pointer to the master table.  The master table is NULL
sl@0
   599
	terminated.  The master and user context tables are guaranteed to remain at
sl@0
   600
	the same location for the lifetime of the OS execution so it is safe the
sl@0
   601
	cache the returned address.
sl@0
   602
sl@0
   603
	@see UserContextType
sl@0
   604
	@see TArmContextElement
sl@0
   605
	@see TArmRegisters
sl@0
   606
	@see TUserContextType
sl@0
   607
	@see NKern::SetUserContext
sl@0
   608
	@see NKern::GetUserContext
sl@0
   609
sl@0
   610
	@publishedPartner
sl@0
   611
 */
sl@0
   612
EXPORT_C const TArmContextElement* const* NThread::UserContextTables()
sl@0
   613
	{
sl@0
   614
	return &ThreadUserContextTables[0];
sl@0
   615
	}
sl@0
   616
sl@0
   617
sl@0
   618
/** Get a value which indicates where a thread's user mode context is stored.
sl@0
   619
sl@0
   620
	@return A value that can be used as an index into the tables returned by
sl@0
   621
	NThread::UserContextTables().
sl@0
   622
sl@0
   623
	@pre any context
sl@0
   624
	@pre kernel locked
sl@0
   625
	@post kernel locked
sl@0
   626
 
sl@0
   627
	@see UserContextTables
sl@0
   628
	@publishedPartner
sl@0
   629
 */
sl@0
   630
EXPORT_C NThread::TUserContextType NThread::UserContextType()
sl@0
   631
	{
sl@0
   632
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThread::UserContextType");
sl@0
   633
sl@0
   634
	/*
sl@0
   635
	The SMP nanokernel always saves R0-R12,R13usr,R14usr,ExcCode,PC,CPSR on any
sl@0
   636
	entry to the kernel, so getting the user context is always the same.
sl@0
   637
	The only possible problem is an FIQ occurring immediately after any other
sl@0
   638
	exception, before the registers have been saved. In this case the registers
sl@0
   639
	saved by the FIQ will be the ones observed and they will be correct except
sl@0
   640
	that the CPSR value will indicate a mode other than USR, which can be used
sl@0
   641
	to detect the condition.
sl@0
   642
	*/
sl@0
   643
	return EContextException;
sl@0
   644
	}
sl@0
   645
sl@0
   646
sl@0
   647
// Enter and return with kernel locked
sl@0
   648
void NThread::GetUserContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
sl@0
   649
	{
sl@0
   650
	NThread* pC = NCurrentThreadL();
sl@0
   651
	TSubScheduler* ss = 0;
sl@0
   652
	if (pC != this)
sl@0
   653
		{
sl@0
   654
		AcqSLock();
sl@0
   655
		if (iWaitState.ThreadIsDead())
sl@0
   656
			{
sl@0
   657
			RelSLock();
sl@0
   658
			aAvailRegistersMask = 0;
sl@0
   659
			return;
sl@0
   660
			}
sl@0
   661
		if (iReady && iParent->iReady)
sl@0
   662
			{
sl@0
   663
			ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   664
			ss->iReadyListLock.LockOnly();
sl@0
   665
			}
sl@0
   666
		if (iCurrent)
sl@0
   667
			{
sl@0
   668
			// thread is actually running on another CPU
sl@0
   669
			// interrupt that CPU and wait for it to enter interrupt mode
sl@0
   670
			// this allows a snapshot of the thread user state to be observed
sl@0
   671
			// and ensures the thread cannot return to user mode
sl@0
   672
			send_resched_ipi_and_wait(iLastCpu);
sl@0
   673
			}
sl@0
   674
		}
sl@0
   675
	SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
sl@0
   676
	--txs;
sl@0
   677
	if (txs->iExcCode <= SThreadExcStack::EInit)	// if not, thread never entered user mode
sl@0
   678
		{
sl@0
   679
		aContext.iR0 = txs->iR0;
sl@0
   680
		aContext.iR1 = txs->iR1;
sl@0
   681
		aContext.iR2 = txs->iR2;
sl@0
   682
		aContext.iR3 = txs->iR3;
sl@0
   683
		aContext.iR4 = txs->iR4;
sl@0
   684
		aContext.iR5 = txs->iR5;
sl@0
   685
		aContext.iR6 = txs->iR6;
sl@0
   686
		aContext.iR7 = txs->iR7;
sl@0
   687
		aContext.iR8 = txs->iR8;
sl@0
   688
		aContext.iR9 = txs->iR9;
sl@0
   689
		aContext.iR10 = txs->iR10;
sl@0
   690
		aContext.iR11 = txs->iR11;
sl@0
   691
		aContext.iR12 = txs->iR12;
sl@0
   692
		aContext.iR13 = txs->iR13usr;
sl@0
   693
		aContext.iR14 = txs->iR14usr;
sl@0
   694
		aContext.iR15 = txs->iR15;
sl@0
   695
		aContext.iFlags = txs->iCPSR;
sl@0
   696
		if ((aContext.iFlags & 0x1f) == 0x10)
sl@0
   697
			aAvailRegistersMask = 0x1ffffu;	// R0-R15,CPSR all valid
sl@0
   698
		else
sl@0
   699
			{
sl@0
   700
			aContext.iFlags = 0x10;			// account for FIQ in SVC case
sl@0
   701
			aAvailRegistersMask = 0x0ffffu;	// CPSR not valid
sl@0
   702
			}
sl@0
   703
		}
sl@0
   704
	if (pC != this)
sl@0
   705
		{
sl@0
   706
		if (ss)
sl@0
   707
			ss->iReadyListLock.UnlockOnly();
sl@0
   708
		RelSLock();
sl@0
   709
		}
sl@0
   710
	}
sl@0
   711
sl@0
   712
class TGetContextIPI : public TGenericIPI
sl@0
   713
	{
sl@0
   714
public:
sl@0
   715
	void Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegistersMask);
sl@0
   716
	static void Isr(TGenericIPI*);
sl@0
   717
public:
sl@0
   718
	TArmRegSet* iContext;
sl@0
   719
	TUint32* iAvailRegsMask;
sl@0
   720
	};
sl@0
   721
sl@0
   722
extern "C" TLinAddr get_sp_svc();
sl@0
   723
extern "C" TLinAddr get_lr_svc();
sl@0
   724
extern "C" TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/);
sl@0
   725
sl@0
   726
void TGetContextIPI::Isr(TGenericIPI* aPtr)
sl@0
   727
	{
sl@0
   728
	TGetContextIPI& ipi = *(TGetContextIPI*)aPtr;
sl@0
   729
	TArmRegSet& a = *ipi.iContext;
sl@0
   730
	SThreadExcStack* txs = (SThreadExcStack*)get_sp_svc();
sl@0
   731
	a.iR0 = txs->iR0;
sl@0
   732
	a.iR1 = txs->iR1;
sl@0
   733
	a.iR2 = txs->iR2;
sl@0
   734
	a.iR3 = txs->iR3;
sl@0
   735
	a.iR4 = txs->iR4;
sl@0
   736
	a.iR5 = txs->iR5;
sl@0
   737
	a.iR6 = txs->iR6;
sl@0
   738
	a.iR7 = txs->iR7;
sl@0
   739
	a.iR8 = txs->iR8;
sl@0
   740
	a.iR9 = txs->iR9;
sl@0
   741
	a.iR10 = txs->iR10;
sl@0
   742
	a.iR11 = txs->iR11;
sl@0
   743
	a.iR12 = txs->iR12;
sl@0
   744
	a.iR13 = TUint32(txs) + sizeof(SThreadExcStack);
sl@0
   745
	a.iR14 = get_lr_svc();
sl@0
   746
	a.iR15 = txs->iR15;
sl@0
   747
	a.iFlags = txs->iCPSR;
sl@0
   748
	*ipi.iAvailRegsMask = 0x1ffffu;
sl@0
   749
	}
sl@0
   750
sl@0
   751
void TGetContextIPI::Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegsMask)
sl@0
   752
	{
sl@0
   753
	iContext = &aContext;
sl@0
   754
	iAvailRegsMask = &aAvailRegsMask;
sl@0
   755
	Queue(&Isr, 1u<<aCpu);
sl@0
   756
	WaitCompletion();
sl@0
   757
	}
sl@0
   758
sl@0
   759
void GetRegs(TArmRegSet& aContext, TLinAddr aStart, TUint32 aMask)
sl@0
   760
	{
sl@0
   761
	TUint32* d = (TUint32*)&aContext;
sl@0
   762
	const TUint32* s = (const TUint32*)aStart;
sl@0
   763
	for (; aMask; aMask>>=1, ++d)
sl@0
   764
		{
sl@0
   765
		if (aMask & 1)
sl@0
   766
			*d = *s++;
sl@0
   767
		}
sl@0
   768
	}
sl@0
   769
sl@0
   770
// Enter and return with kernel locked
sl@0
   771
void NThread::GetSystemContext(TArmRegSet& aContext, TUint32& aAvailRegsMask)
sl@0
   772
	{
sl@0
   773
	aAvailRegsMask = 0;
sl@0
   774
	NThread* pC = NCurrentThreadL();
sl@0
   775
	__NK_ASSERT_ALWAYS(pC!=this);
sl@0
   776
	TSubScheduler* ss = 0;
sl@0
   777
	AcqSLock();
sl@0
   778
	if (iWaitState.ThreadIsDead())
sl@0
   779
		{
sl@0
   780
		RelSLock();
sl@0
   781
		return;
sl@0
   782
		}
sl@0
   783
	if (iReady && iParent->iReady)
sl@0
   784
		{
sl@0
   785
		ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   786
		ss->iReadyListLock.LockOnly();
sl@0
   787
		}
sl@0
   788
	if (iCurrent)
sl@0
   789
		{
sl@0
   790
		// thread is actually running on another CPU
sl@0
   791
		// use an interprocessor interrupt to get a snapshot of the state
sl@0
   792
		TGetContextIPI ipi;
sl@0
   793
		ipi.Get(iLastCpu, aContext, aAvailRegsMask);
sl@0
   794
		}
sl@0
   795
	else
sl@0
   796
		{
sl@0
   797
		// thread is not running and can't start
sl@0
   798
		SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP;
sl@0
   799
		TInt kct = get_kernel_context_type(trs->iR15);
sl@0
   800
		__NK_ASSERT_ALWAYS(kct>=0);	// couldn't match return address from reschedule
sl@0
   801
		TLinAddr sp = trs->iSPRschdFlg &~ 3;
sl@0
   802
		switch (kct)
sl@0
   803
			{
sl@0
   804
			case 0:	// thread not yet started
sl@0
   805
			case 5:	// Exec::WaitForAnyRequest()
sl@0
   806
				GetRegs(aContext, sp, 0x01fffu);
sl@0
   807
				aContext.iR13 = sp + sizeof(SThreadExcStack);
sl@0
   808
				GetRegs(aContext, sp+64, 0x18000u);
sl@0
   809
				aAvailRegsMask =0x1bfffu;
sl@0
   810
				break;
sl@0
   811
			case 1:	// unlock
sl@0
   812
			case 2:	// preemption point
sl@0
   813
			case 3:	// NKern::WaitForAnyRequest() or NKern::FSWait()
sl@0
   814
				GetRegs(aContext, sp+4, 0x08ff0u);
sl@0
   815
				aContext.iR14 = aContext.iR15;
sl@0
   816
				aContext.iR13 = sp+40;
sl@0
   817
				aAvailRegsMask =0x0eff0u;
sl@0
   818
				break;
sl@0
   819
			case 4:	// IRQ/FIQ
sl@0
   820
				GetRegs(aContext, sp+4, 0x04000u);
sl@0
   821
				GetRegs(aContext, sp+8, 0x01fffu);
sl@0
   822
				GetRegs(aContext, sp+64, 0x18000u);
sl@0
   823
				aContext.iR13 = sp + sizeof(SThreadExcStack) + 8;
sl@0
   824
				aAvailRegsMask =0x1ffffu;
sl@0
   825
				break;
sl@0
   826
			default:
sl@0
   827
				__NK_ASSERT_ALWAYS(0);
sl@0
   828
			}
sl@0
   829
		}
sl@0
   830
	if (ss)
sl@0
   831
		ss->iReadyListLock.UnlockOnly();
sl@0
   832
	RelSLock();
sl@0
   833
	}
sl@0
   834
sl@0
   835
// Enter and return with kernel locked
sl@0
   836
void NThread::SetUserContext(const TArmRegSet& aContext, TUint32& aRegMask)
sl@0
   837
	{
sl@0
   838
	NThread* pC = NCurrentThreadL();
sl@0
   839
	TSubScheduler* ss = 0;
sl@0
   840
	if (pC != this)
sl@0
   841
		{
sl@0
   842
		AcqSLock();
sl@0
   843
		if (iWaitState.ThreadIsDead())
sl@0
   844
			{
sl@0
   845
			RelSLock();
sl@0
   846
			aRegMask = 0;
sl@0
   847
			return;
sl@0
   848
			}
sl@0
   849
		if (iReady && iParent->iReady)
sl@0
   850
			{
sl@0
   851
			ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
sl@0
   852
			ss->iReadyListLock.LockOnly();
sl@0
   853
			}
sl@0
   854
		if (iCurrent)
sl@0
   855
			{
sl@0
   856
			// thread is actually running on another CPU
sl@0
   857
			// interrupt that CPU and wait for it to enter interrupt mode
sl@0
   858
			// this allows a snapshot of the thread user state to be observed
sl@0
   859
			// and ensures the thread cannot return to user mode
sl@0
   860
			send_resched_ipi_and_wait(iLastCpu);
sl@0
   861
			}
sl@0
   862
		}
sl@0
   863
	SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
sl@0
   864
	--txs;
sl@0
   865
	aRegMask &= 0x1ffffu;
sl@0
   866
	if (txs->iExcCode <= SThreadExcStack::EInit)	// if not, thread never entered user mode
sl@0
   867
		{
sl@0
   868
		if (aRegMask & 0x0001u)
sl@0
   869
			txs->iR0 = aContext.iR0;
sl@0
   870
		if (aRegMask & 0x0002u)
sl@0
   871
			txs->iR1 = aContext.iR1;
sl@0
   872
		if (aRegMask & 0x0004u)
sl@0
   873
			txs->iR2 = aContext.iR2;
sl@0
   874
		if (aRegMask & 0x0008u)
sl@0
   875
			txs->iR3 = aContext.iR3;
sl@0
   876
		if (aRegMask & 0x0010u)
sl@0
   877
			txs->iR4 = aContext.iR4;
sl@0
   878
		if (aRegMask & 0x0020u)
sl@0
   879
			txs->iR5 = aContext.iR5;
sl@0
   880
		if (aRegMask & 0x0040u)
sl@0
   881
			txs->iR6 = aContext.iR6;
sl@0
   882
		if (aRegMask & 0x0080u)
sl@0
   883
			txs->iR7 = aContext.iR7;
sl@0
   884
		if (aRegMask & 0x0100u)
sl@0
   885
			txs->iR8 = aContext.iR8;
sl@0
   886
		if (aRegMask & 0x0200u)
sl@0
   887
			txs->iR9 = aContext.iR9;
sl@0
   888
		if (aRegMask & 0x0400u)
sl@0
   889
			txs->iR10 = aContext.iR10;
sl@0
   890
		if (aRegMask & 0x0800u)
sl@0
   891
			txs->iR11 = aContext.iR11;
sl@0
   892
		if (aRegMask & 0x1000u)
sl@0
   893
			txs->iR12 = aContext.iR12;
sl@0
   894
		if (aRegMask & 0x2000u)
sl@0
   895
			txs->iR13usr = aContext.iR13;
sl@0
   896
		if (aRegMask & 0x4000u)
sl@0
   897
			txs->iR14usr = aContext.iR14;
sl@0
   898
		if (aRegMask & 0x8000u)
sl@0
   899
			txs->iR15 = aContext.iR15;
sl@0
   900
		// Assert that target thread is in USR mode, and update only the flags part of the PSR
sl@0
   901
		__NK_ASSERT_ALWAYS((txs->iCPSR & 0x1f) == 0x10);
sl@0
   902
		if (aRegMask & 0x10000u)
sl@0
   903
			{
sl@0
   904
			// NZCVQ.......GE3-0................
sl@0
   905
			const TUint32 writableFlags = 0xF80F0000;
sl@0
   906
			txs->iCPSR &= ~writableFlags;
sl@0
   907
			txs->iCPSR |= aContext.iFlags & writableFlags;
sl@0
   908
			}
sl@0
   909
		}
sl@0
   910
	else
sl@0
   911
		aRegMask = 0;
sl@0
   912
	if (pC != this)
sl@0
   913
		{
sl@0
   914
		if (ss)
sl@0
   915
			ss->iReadyListLock.UnlockOnly();
sl@0
   916
		RelSLock();
sl@0
   917
		}
sl@0
   918
	}
sl@0
   919
sl@0
   920
/** Get (subset of) user context of specified thread.
sl@0
   921
sl@0
   922
	The nanokernel does not systematically save all registers in the supervisor
sl@0
   923
	stack on entry into privileged mode and the exact subset depends on why the
sl@0
   924
	switch to privileged mode occured.  So in general only a subset of the
sl@0
   925
	register set is available.
sl@0
   926
sl@0
   927
	@param aThread	Thread to inspect.  It can be the current thread or a
sl@0
   928
	non-current one.
sl@0
   929
sl@0
   930
	@param aContext	Pointer to TArmRegSet structure where the context is
sl@0
   931
	copied.
sl@0
   932
sl@0
   933
	@param aAvailRegistersMask Bit mask telling which subset of the context is
sl@0
   934
	available and has been copied to aContext (1: register available / 0: not
sl@0
   935
	available).  Bit 0 stands for register R0.
sl@0
   936
sl@0
   937
	@see TArmRegSet
sl@0
   938
	@see ThreadSetUserContext
sl@0
   939
sl@0
   940
	@pre Call in a thread context.
sl@0
   941
	@pre Interrupts must be enabled.
sl@0
   942
 */
sl@0
   943
EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
sl@0
   944
	{
sl@0
   945
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext");
sl@0
   946
	TArmRegSet& a = *(TArmRegSet*)aContext;
sl@0
   947
	memclr(aContext, sizeof(TArmRegSet));
sl@0
   948
	NKern::Lock();
sl@0
   949
	aThread->GetUserContext(a, aAvailRegistersMask);
sl@0
   950
	NKern::Unlock();
sl@0
   951
	}
sl@0
   952
sl@0
   953
/** Get (subset of) system context of specified thread.
sl@0
   954
  
sl@0
   955
	@param aThread	Thread to inspect.  It can be the current thread or a
sl@0
   956
	non-current one.
sl@0
   957
sl@0
   958
	@param aContext	Pointer to TArmRegSet structure where the context is
sl@0
   959
	copied.
sl@0
   960
sl@0
   961
	@param aAvailRegistersMask Bit mask telling which subset of the context is
sl@0
   962
	available and has been copied to aContext (1: register available / 0: not
sl@0
   963
	available).  Bit 0 stands for register R0.
sl@0
   964
sl@0
   965
	@see TArmRegSet
sl@0
   966
	@see ThreadSetUserContext
sl@0
   967
sl@0
   968
	@pre Call in a thread context.
sl@0
   969
	@pre Interrupts must be enabled.
sl@0
   970
 */
sl@0
   971
EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
sl@0
   972
	{
sl@0
   973
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext");
sl@0
   974
	TArmRegSet& a = *(TArmRegSet*)aContext;
sl@0
   975
	memclr(aContext, sizeof(TArmRegSet));
sl@0
   976
	NKern::Lock();
sl@0
   977
	aThread->GetSystemContext(a, aAvailRegistersMask);
sl@0
   978
	NKern::Unlock();
sl@0
   979
	}
sl@0
   980
sl@0
   981
/** Set (subset of) user context of specified thread.
sl@0
   982
sl@0
   983
	@param aThread	Thread to modify.  It can be the current thread or a
sl@0
   984
	non-current one.
sl@0
   985
sl@0
   986
	@param aContext	Pointer to TArmRegSet structure containing the context
sl@0
   987
	to set.  The values of registers which aren't part of the context saved
sl@0
   988
	on the supervisor stack are ignored.
sl@0
   989
sl@0
   990
	@see TArmRegSet
sl@0
   991
	@see ThreadGetUserContext
sl@0
   992
sl@0
   993
  	@pre Call in a thread context.
sl@0
   994
	@pre Interrupts must be enabled.
sl@0
   995
 */
sl@0
   996
EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext)
sl@0
   997
	{
sl@0
   998
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext");
sl@0
   999
	TArmRegSet& a = *(TArmRegSet*)aContext;
sl@0
  1000
	TUint32 mask = 0x1ffffu;
sl@0
  1001
	NKern::Lock();
sl@0
  1002
	aThread->SetUserContext(a, mask);
sl@0
  1003
	NKern::Unlock();
sl@0
  1004
	}
sl@0
  1005
sl@0
  1006
sl@0
  1007
#ifdef __CPU_HAS_VFP
sl@0
  1008
extern void VfpContextSave(void*);
sl@0
  1009
#endif
sl@0
  1010
/** Complete the saving of a thread's context
sl@0
  1011
sl@0
  1012
This saves the VFP/NEON registers if necessary once we know that we are definitely
sl@0
  1013
switching threads.
sl@0
  1014
sl@0
  1015
@internalComponent
sl@0
  1016
*/
sl@0
  1017
void NThread::CompleteContextSave()
sl@0
  1018
	{
sl@0
  1019
#ifdef __CPU_HAS_VFP
sl@0
  1020
	if (Arm::VfpThread[NKern::CurrentCpu()] == this)
sl@0
  1021
		{
sl@0
  1022
		VfpContextSave(iExtraContext); // Disables VFP
sl@0
  1023
		}
sl@0
  1024
#endif
sl@0
  1025
	}
sl@0
  1026
sl@0
  1027
sl@0
  1028
extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType)
sl@0
  1029
	{
sl@0
  1030
	TUint32 cpsr = aContext->iCpsr;
sl@0
  1031
	TUint32 mode = cpsr & 0x1f;
sl@0
  1032
	TUint32 opcode = aContext->iFaultStatus;
sl@0
  1033
sl@0
  1034
	// Coprocessor abort from CP15 or E7FFDEFF -> crash immediately
sl@0
  1035
	if (		(aType==15 && opcode!=0xee000f20)
sl@0
  1036
			||	(aType==32 && opcode==0xe7ffdeff)
sl@0
  1037
			||	(aType==33 && opcode==0xdeff)
sl@0
  1038
		)
sl@0
  1039
		{
sl@0
  1040
		if (mode != 0x10)
sl@0
  1041
			ExcFault(aContext);	// crash instruction in privileged mode
sl@0
  1042
		return 0;	// crash instruction in user mode - handle normally
sl@0
  1043
		}
sl@0
  1044
	if (		(aType==15 && opcode==0xee000f20)
sl@0
  1045
			||	(aType==32 && opcode==0xe7ffdefc)
sl@0
  1046
			||	(aType==33 && opcode==0xdefc)
sl@0
  1047
		)
sl@0
  1048
		{
sl@0
  1049
		// checkpoint
sl@0
  1050
		__KTRACE_OPT(KPANIC,DumpExcInfo(*aContext));
sl@0
  1051
		if (aType==32)
sl@0
  1052
			aContext->iR15 += 4;
sl@0
  1053
		else
sl@0
  1054
			aContext->iR15 += 2;
sl@0
  1055
		return 1;
sl@0
  1056
		}
sl@0
  1057
	return 0;
sl@0
  1058
	}
sl@0
  1059
sl@0
  1060
/** Return the total CPU time so far used by the specified thread.
sl@0
  1061
sl@0
  1062
	@return The total CPU time in units of 1/NKern::CpuTimeMeasFreq().
sl@0
  1063
*/
sl@0
  1064
EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread)
sl@0
  1065
	{
sl@0
  1066
	TSubScheduler* ss = 0;
sl@0
  1067
	NKern::Lock();
sl@0
  1068
	aThread->AcqSLock();
sl@0
  1069
	if (aThread->i_NThread_Initial)
sl@0
  1070
		ss = &TheSubSchedulers[aThread->iLastCpu];
sl@0
  1071
	else if (aThread->iReady && aThread->iParent->iReady)
sl@0
  1072
		ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask];
sl@0
  1073
	if (ss)
sl@0
  1074
		ss->iReadyListLock.LockOnly();
sl@0
  1075
	TUint64 t = aThread->iTotalCpuTime64;
sl@0
  1076
	if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread))
sl@0
  1077
		t += (NKern::Timestamp() - ss->iLastTimestamp64);
sl@0
  1078
	if (ss)
sl@0
  1079
		ss->iReadyListLock.UnlockOnly();
sl@0
  1080
	aThread->RelSLock();
sl@0
  1081
	NKern::Unlock();
sl@0
  1082
	return t;
sl@0
  1083
	}
sl@0
  1084
sl@0
  1085
TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
sl@0
  1086
	{
sl@0
  1087
	__e32_memory_barrier();
sl@0
  1088
	if (aCallback->iNext != KUserModeCallbackUnqueued)
sl@0
  1089
		return KErrInUse;
sl@0
  1090
	TInt result = KErrDied;
sl@0
  1091
	NKern::Lock();
sl@0
  1092
	TUserModeCallback* listHead = aThread->iUserModeCallbacks;
sl@0
  1093
	do	{
sl@0
  1094
		if (TLinAddr(listHead) & 3)
sl@0
  1095
			goto done;	// thread exiting
sl@0
  1096
		aCallback->iNext = listHead;
sl@0
  1097
		} while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback));
sl@0
  1098
	result = KErrNone;
sl@0
  1099
sl@0
  1100
	if (!listHead)	// if this isn't first callback someone else will have done this bit
sl@0
  1101
		{
sl@0
  1102
		/*
sl@0
  1103
		 * If aThread is currently running on another CPU we need to send an IPI so
sl@0
  1104
		 * that it will enter kernel mode and run the callback.
sl@0
  1105
		 * The synchronization is tricky here. We want to check if the thread is
sl@0
  1106
		 * running and if so on which core. We need to avoid any possibility of
sl@0
  1107
		 * the thread entering user mode without having seen the callback,
sl@0
  1108
		 * either because we thought it wasn't running so didn't send an IPI or
sl@0
  1109
		 * because the thread migrated after we looked and we sent the IPI to
sl@0
  1110
		 * the wrong processor. Sending a redundant IPI is not a problem (e.g.
sl@0
  1111
		 * because the thread is running in kernel mode - which we can't tell -
sl@0
  1112
		 * or because the thread stopped running after we looked)
sl@0
  1113
		 * The following events are significant:
sl@0
  1114
		 * Event A:	Target thread writes to iCurrent when it starts running
sl@0
  1115
		 * Event B: Target thread reads iUserModeCallbacks before entering user
sl@0
  1116
		 *			mode
sl@0
  1117
		 * Event C: This thread writes to iUserModeCallbacks
sl@0
  1118
		 * Event D: This thread reads iCurrent to check if aThread is running
sl@0
  1119
		 * There is a DMB and DSB between A and B since A occurs with the ready
sl@0
  1120
		 * list lock for the CPU involved or the thread lock for aThread held
sl@0
  1121
		 * and this lock is released before B occurs.
sl@0
  1122
		 * There is a DMB between C and D (part of __e32_atomic_cas_ord_ptr).
sl@0
  1123
		 * Any observer which observes B must also have observed A.
sl@0
  1124
		 * Any observer which observes D must also have observed C.
sl@0
  1125
		 * If aThread observes B before C (i.e. enters user mode without running
sl@0
  1126
		 * the callback) it must observe A before C and so it must also observe
sl@0
  1127
		 * A before D (i.e. D reads the correct value for iCurrent).
sl@0
  1128
		 */
sl@0
  1129
		TInt current = aThread->iCurrent;
sl@0
  1130
		if (current)
sl@0
  1131
			{
sl@0
  1132
			TInt cpu = current & NSchedulable::EReadyCpuMask;
sl@0
  1133
			if (cpu != NKern::CurrentCpu())
sl@0
  1134
				send_resched_ipi(cpu);
sl@0
  1135
			}
sl@0
  1136
		}
sl@0
  1137
done:
sl@0
  1138
	NKern::Unlock();
sl@0
  1139
	return result;
sl@0
  1140
	}
sl@0
  1141