sl@0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\nkern\arm\ncthrd.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
// NThreadBase member data
|
sl@0
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
sl@0
|
20 |
|
sl@0
|
21 |
#define __INCLUDE_REG_OFFSETS__
|
sl@0
|
22 |
#include <arm.h>
|
sl@0
|
23 |
|
sl@0
|
24 |
const TInt KNThreadMinStackSize = 0x100; // needs to be enough for interrupt + reschedule stack
|
sl@0
|
25 |
|
sl@0
|
26 |
// Called by a thread when it first runs
|
sl@0
|
27 |
extern void __StartThread();
|
sl@0
|
28 |
|
sl@0
|
29 |
// Called by a thread which has been forced to exit
|
sl@0
|
30 |
// Interrupts off here, kernel unlocked
|
sl@0
|
31 |
extern void __DoForcedExit();
|
sl@0
|
32 |
|
sl@0
|
33 |
void NThreadBase::SetEntry(NThreadFunction aFunc)
|
sl@0
|
34 |
{
|
sl@0
|
35 |
TUint32* sp=(TUint32*)iSavedSP;
|
sl@0
|
36 |
sp[SP_R5]=(TUint32)aFunc;
|
sl@0
|
37 |
}
|
sl@0
|
38 |
|
sl@0
|
39 |
TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
|
sl@0
|
40 |
{
|
sl@0
|
41 |
// Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
|
sl@0
|
42 |
__NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
|
sl@0
|
43 |
|
sl@0
|
44 |
__NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
|
sl@0
|
45 |
TInt r=NThreadBase::Create(aInfo,aInitial);
|
sl@0
|
46 |
if (r!=KErrNone)
|
sl@0
|
47 |
return r;
|
sl@0
|
48 |
if (!aInitial)
|
sl@0
|
49 |
{
|
sl@0
|
50 |
TUint32* sp=(TUint32*)(iStackBase+iStackSize-aInfo.iParameterBlockSize);
|
sl@0
|
51 |
TUint32 r6=(TUint32)aInfo.iParameterBlock;
|
sl@0
|
52 |
if (aInfo.iParameterBlockSize)
|
sl@0
|
53 |
{
|
sl@0
|
54 |
wordmove(sp,aInfo.iParameterBlock,aInfo.iParameterBlockSize);
|
sl@0
|
55 |
r6=(TUint32)sp;
|
sl@0
|
56 |
}
|
sl@0
|
57 |
*--sp=(TUint32)__StartThread; // PC
|
sl@0
|
58 |
*--sp=0; // R11
|
sl@0
|
59 |
*--sp=0; // R10
|
sl@0
|
60 |
*--sp=0; // R9
|
sl@0
|
61 |
*--sp=0; // R8
|
sl@0
|
62 |
*--sp=0; // R7
|
sl@0
|
63 |
*--sp=r6; // R6
|
sl@0
|
64 |
*--sp=(TUint32)aInfo.iFunction; // R5
|
sl@0
|
65 |
*--sp=(TUint32)this; // R4
|
sl@0
|
66 |
*--sp=0x13; // SPSR_SVC
|
sl@0
|
67 |
*--sp=0; // R14_USR
|
sl@0
|
68 |
*--sp=0; // R13_USR
|
sl@0
|
69 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
70 |
*--sp=Arm::DefaultDomainAccess; // DACR
|
sl@0
|
71 |
#endif
|
sl@0
|
72 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
73 |
*--sp=Arm::DefaultCoprocessorAccess; // CAR
|
sl@0
|
74 |
#endif
|
sl@0
|
75 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
76 |
*--sp=VFP_FPEXC_THRD_INIT; // FPEXC
|
sl@0
|
77 |
#endif
|
sl@0
|
78 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
79 |
*--sp=0; // TID
|
sl@0
|
80 |
#endif
|
sl@0
|
81 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
82 |
*--sp=0; // ThumbEE Base
|
sl@0
|
83 |
#endif
|
sl@0
|
84 |
iSavedSP=(TLinAddr)sp;
|
sl@0
|
85 |
}
|
sl@0
|
86 |
else
|
sl@0
|
87 |
{
|
sl@0
|
88 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
89 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
90 |
#ifdef __CPU_XSCALE__
|
sl@0
|
91 |
Arm::ModifyCar(0, 0x0c00); // enable CP10, CP11
|
sl@0
|
92 |
#else
|
sl@0
|
93 |
Arm::ModifyCar(0, 0x00f00000); // full access to CP10, CP11
|
sl@0
|
94 |
#endif
|
sl@0
|
95 |
#endif
|
sl@0
|
96 |
Arm::DefaultCoprocessorAccess = Arm::Car();
|
sl@0
|
97 |
#endif
|
sl@0
|
98 |
NKern::EnableAllInterrupts();
|
sl@0
|
99 |
}
|
sl@0
|
100 |
#ifdef BTRACE_THREAD_IDENTIFICATION
|
sl@0
|
101 |
BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
|
sl@0
|
102 |
#endif
|
sl@0
|
103 |
return KErrNone;
|
sl@0
|
104 |
}
|
sl@0
|
105 |
|
sl@0
|
106 |
/** Called from generic layer when thread is killed asynchronously.
|
sl@0
|
107 |
|
sl@0
|
108 |
For ARM, save reason for last user->kernel switch (if any) so that user
|
sl@0
|
109 |
context can be accessed from EDebugEventRemoveThread hook. Must be done
|
sl@0
|
110 |
before forcing the thread to exit as this alters the saved return address
|
sl@0
|
111 |
which is used to figure out where the context is saved.
|
sl@0
|
112 |
|
sl@0
|
113 |
@pre kernel locked
|
sl@0
|
114 |
@post kernel locked
|
sl@0
|
115 |
*/
|
sl@0
|
116 |
|
sl@0
|
117 |
void NThreadBase::OnKill()
|
sl@0
|
118 |
{
|
sl@0
|
119 |
if (iUserContextType != NThread::EContextNone)
|
sl@0
|
120 |
{
|
sl@0
|
121 |
NThread::TUserContextType t = ((NThread*)this)->UserContextType();
|
sl@0
|
122 |
switch (t)
|
sl@0
|
123 |
{
|
sl@0
|
124 |
case NThread::EContextUserInterrupt:
|
sl@0
|
125 |
t = NThread::EContextUserInterruptDied;
|
sl@0
|
126 |
break;
|
sl@0
|
127 |
case NThread::EContextSvsrInterrupt1:
|
sl@0
|
128 |
t = NThread::EContextSvsrInterrupt1Died;
|
sl@0
|
129 |
break;
|
sl@0
|
130 |
case NThread::EContextSvsrInterrupt2:
|
sl@0
|
131 |
t = NThread::EContextSvsrInterrupt2Died;
|
sl@0
|
132 |
break;
|
sl@0
|
133 |
case NThread::EContextWFAR:
|
sl@0
|
134 |
t = NThread::EContextWFARDied;
|
sl@0
|
135 |
break;
|
sl@0
|
136 |
default:
|
sl@0
|
137 |
// NOP
|
sl@0
|
138 |
break;
|
sl@0
|
139 |
}
|
sl@0
|
140 |
iUserContextType = t;
|
sl@0
|
141 |
}
|
sl@0
|
142 |
}
|
sl@0
|
143 |
|
sl@0
|
144 |
/** Called from generic layer when thread exits.
|
sl@0
|
145 |
|
sl@0
|
146 |
For ARM, save that if the thread terminates synchronously the last
|
sl@0
|
147 |
user->kernel switch was an exec call. Do nothing if non-user thread or
|
sl@0
|
148 |
reason already saved in OnKill().
|
sl@0
|
149 |
|
sl@0
|
150 |
@pre kernel locked
|
sl@0
|
151 |
@post kernel locked
|
sl@0
|
152 |
@see OnKill
|
sl@0
|
153 |
*/
|
sl@0
|
154 |
|
sl@0
|
155 |
void NThreadBase::OnExit()
|
sl@0
|
156 |
{
|
sl@0
|
157 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThreadBase::OnExit");
|
sl@0
|
158 |
if (iUserContextType == NThread::EContextUndefined)
|
sl@0
|
159 |
iUserContextType = NThread::EContextExec;
|
sl@0
|
160 |
}
|
sl@0
|
161 |
|
sl@0
|
162 |
void NThreadBase::ForceExit()
|
sl@0
|
163 |
{
|
sl@0
|
164 |
TUint32* sp=(TUint32*)iSavedSP;
|
sl@0
|
165 |
sp[SP_PC]=(TUint32)__DoForcedExit;
|
sl@0
|
166 |
}
|
sl@0
|
167 |
|
sl@0
|
168 |
void DumpExcInfo(TArmExcInfo& a)
|
sl@0
|
169 |
{
|
sl@0
|
170 |
DEBUGPRINT("Exc %1d Cpsr=%08x FAR=%08x FSR=%08x",a.iExcCode,a.iCpsr,a.iFaultAddress,a.iFaultStatus);
|
sl@0
|
171 |
DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x",a.iR0,a.iR1,a.iR2,a.iR3);
|
sl@0
|
172 |
DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x",a.iR4,a.iR5,a.iR6,a.iR7);
|
sl@0
|
173 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x",a.iR8,a.iR9,a.iR10,a.iR11);
|
sl@0
|
174 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x",a.iR12,a.iR13,a.iR14,a.iR15);
|
sl@0
|
175 |
DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
|
sl@0
|
176 |
DEBUGPRINT("Thread %T, KernCSLocked=%d",TheScheduler.iCurrentThread,TheScheduler.iKernCSLocked);
|
sl@0
|
177 |
}
|
sl@0
|
178 |
|
sl@0
|
179 |
void DumpFullRegSet(SFullArmRegSet& a)
|
sl@0
|
180 |
{
|
sl@0
|
181 |
SNormalRegs& r = a.iN;
|
sl@0
|
182 |
DEBUGPRINT("MODE_USR:");
|
sl@0
|
183 |
DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x", r.iR0, r.iR1, r.iR2, r.iR3);
|
sl@0
|
184 |
DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x", r.iR4, r.iR5, r.iR6, r.iR7);
|
sl@0
|
185 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8, r.iR9, r.iR10, r.iR11);
|
sl@0
|
186 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x", r.iR12, r.iR13, r.iR14, r.iR15);
|
sl@0
|
187 |
DEBUGPRINT("CPSR=%08x", r.iFlags);
|
sl@0
|
188 |
DEBUGPRINT("MODE_FIQ:");
|
sl@0
|
189 |
DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8Fiq, r.iR9Fiq, r.iR10Fiq, r.iR11Fiq);
|
sl@0
|
190 |
DEBUGPRINT("R12=%08x R13=%08x R14=%08x SPSR=%08x", r.iR12Fiq, r.iR13Fiq, r.iR14Fiq, r.iSpsrFiq);
|
sl@0
|
191 |
DEBUGPRINT("MODE_IRQ:");
|
sl@0
|
192 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Irq, r.iR14Irq, r.iSpsrIrq);
|
sl@0
|
193 |
DEBUGPRINT("MODE_SVC:");
|
sl@0
|
194 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Svc, r.iR14Svc, r.iSpsrSvc);
|
sl@0
|
195 |
DEBUGPRINT("MODE_ABT:");
|
sl@0
|
196 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Abt, r.iR14Abt, r.iSpsrAbt);
|
sl@0
|
197 |
DEBUGPRINT("MODE_UND:");
|
sl@0
|
198 |
DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Und, r.iR14Und, r.iSpsrUnd);
|
sl@0
|
199 |
// DEBUGPRINT("MODE_MON:");
|
sl@0
|
200 |
// DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Mon, r.iR14Mon, r.iSpsrMon);
|
sl@0
|
201 |
|
sl@0
|
202 |
SAuxiliaryRegs& aux = a.iA;
|
sl@0
|
203 |
DEBUGPRINT("TEEHBR=%08x CPACR=%08x", aux.iTEEHBR, aux.iCPACR);
|
sl@0
|
204 |
|
sl@0
|
205 |
SBankedRegs& b = a.iB[0];
|
sl@0
|
206 |
DEBUGPRINT(" SCTLR=%08x ACTLR=%08x PRRR=%08x NMRR=%08x", b.iSCTLR, b.iACTLR, b.iPRRR, b.iNMRR);
|
sl@0
|
207 |
DEBUGPRINT(" DACR=%08x TTBR0=%08x TTBR1=%08x TTBCR=%08x", b.iDACR, b.iTTBR0, b.iTTBR1, b.iTTBCR);
|
sl@0
|
208 |
DEBUGPRINT(" VBAR=%08x FCSEID=%08x CTXIDR=%08x", b.iVBAR, b.iFCSEIDR, b.iCTXIDR);
|
sl@0
|
209 |
DEBUGPRINT("Thread ID RWRW=%08x RWRO=%08x RWNO=%08x", b.iRWRWTID, b.iRWROTID, b.iRWNOTID);
|
sl@0
|
210 |
DEBUGPRINT(" DFSR=%08x DFAR=%08x IFSR=%08x IFAR=%08x", b.iDFSR, b.iDFAR, b.iIFSR, b.iIFAR);
|
sl@0
|
211 |
DEBUGPRINT(" ADFSR=%08x AIFSR=%08x", b.iADFSR, b.iAIFSR);
|
sl@0
|
212 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
213 |
DEBUGPRINT("FPEXC %08x", a.iMore[0]);
|
sl@0
|
214 |
#endif
|
sl@0
|
215 |
DEBUGPRINT("ExcCode %08x", a.iExcCode);
|
sl@0
|
216 |
}
|
sl@0
|
217 |
|
sl@0
|
218 |
#define CONTEXT_ELEMENT_UNDEFINED(val) \
|
sl@0
|
219 |
{ \
|
sl@0
|
220 |
TArmContextElement::EUndefined, \
|
sl@0
|
221 |
val \
|
sl@0
|
222 |
}
|
sl@0
|
223 |
|
sl@0
|
224 |
#define CONTEXT_ELEMENT_EXCEPTION(reg) \
|
sl@0
|
225 |
{ \
|
sl@0
|
226 |
TArmContextElement::EOffsetFromStackTop, \
|
sl@0
|
227 |
(- (-sizeof(TArmExcInfo)+_FOFF(TArmExcInfo,reg)) )>>2 \
|
sl@0
|
228 |
}
|
sl@0
|
229 |
|
sl@0
|
230 |
#define CONTEXT_ELEMENT_FROM_SP(offset) \
|
sl@0
|
231 |
{ \
|
sl@0
|
232 |
TArmContextElement::EOffsetFromSp, \
|
sl@0
|
233 |
offset \
|
sl@0
|
234 |
}
|
sl@0
|
235 |
|
sl@0
|
236 |
#define CONTEXT_ELEMENT_FROM_STACK_TOP(offset) \
|
sl@0
|
237 |
{ \
|
sl@0
|
238 |
TArmContextElement::EOffsetFromStackTop, \
|
sl@0
|
239 |
offset \
|
sl@0
|
240 |
}
|
sl@0
|
241 |
|
sl@0
|
242 |
#define CONTEXT_ELEMENT_SP_PLUS(offset) \
|
sl@0
|
243 |
{ \
|
sl@0
|
244 |
TArmContextElement::ESpPlusOffset, \
|
sl@0
|
245 |
offset \
|
sl@0
|
246 |
}
|
sl@0
|
247 |
|
sl@0
|
248 |
const TArmContextElement ContextTableException[] =
|
sl@0
|
249 |
{
|
sl@0
|
250 |
CONTEXT_ELEMENT_EXCEPTION(iR0),
|
sl@0
|
251 |
CONTEXT_ELEMENT_EXCEPTION(iR1),
|
sl@0
|
252 |
CONTEXT_ELEMENT_EXCEPTION(iR2),
|
sl@0
|
253 |
CONTEXT_ELEMENT_EXCEPTION(iR3),
|
sl@0
|
254 |
CONTEXT_ELEMENT_EXCEPTION(iR4),
|
sl@0
|
255 |
CONTEXT_ELEMENT_EXCEPTION(iR5),
|
sl@0
|
256 |
CONTEXT_ELEMENT_EXCEPTION(iR6),
|
sl@0
|
257 |
CONTEXT_ELEMENT_EXCEPTION(iR7),
|
sl@0
|
258 |
CONTEXT_ELEMENT_EXCEPTION(iR8),
|
sl@0
|
259 |
CONTEXT_ELEMENT_EXCEPTION(iR9),
|
sl@0
|
260 |
CONTEXT_ELEMENT_EXCEPTION(iR10),
|
sl@0
|
261 |
CONTEXT_ELEMENT_EXCEPTION(iR11),
|
sl@0
|
262 |
CONTEXT_ELEMENT_EXCEPTION(iR12),
|
sl@0
|
263 |
CONTEXT_ELEMENT_EXCEPTION(iR13),
|
sl@0
|
264 |
CONTEXT_ELEMENT_EXCEPTION(iR14),
|
sl@0
|
265 |
CONTEXT_ELEMENT_EXCEPTION(iR15),
|
sl@0
|
266 |
CONTEXT_ELEMENT_EXCEPTION(iCpsr),
|
sl@0
|
267 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
268 |
};
|
sl@0
|
269 |
|
sl@0
|
270 |
const TArmContextElement ContextTableUndefined[] =
|
sl@0
|
271 |
{
|
sl@0
|
272 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
273 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
274 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
275 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
276 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
277 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
278 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
279 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
280 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
281 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
282 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
283 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
284 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
285 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
286 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
287 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
288 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode),
|
sl@0
|
289 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
290 |
};
|
sl@0
|
291 |
|
sl@0
|
292 |
// Table used for non dying threads which have been preempted by an interrupt
|
sl@0
|
293 |
// while in user mode.
|
sl@0
|
294 |
|
sl@0
|
295 |
const TArmContextElement ContextTableUserInterrupt[] =
|
sl@0
|
296 |
{
|
sl@0
|
297 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
sl@0
|
298 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
sl@0
|
299 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
sl@0
|
300 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
sl@0
|
301 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
sl@0
|
302 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
sl@0
|
303 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
sl@0
|
304 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
sl@0
|
305 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
sl@0
|
306 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
sl@0
|
307 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
sl@0
|
308 |
CONTEXT_ELEMENT_FROM_SP(SP_R11),
|
sl@0
|
309 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
310 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
311 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
312 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
313 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
sl@0
|
314 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
315 |
};
|
sl@0
|
316 |
|
sl@0
|
317 |
// Table used for threads which have been asynchronously killed after being
|
sl@0
|
318 |
// preempted by interrupt while in user mode.
|
sl@0
|
319 |
|
sl@0
|
320 |
const TArmContextElement ContextTableUserInterruptDied[] =
|
sl@0
|
321 |
{
|
sl@0
|
322 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
sl@0
|
323 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
sl@0
|
324 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
sl@0
|
325 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
sl@0
|
326 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
327 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
328 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
329 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
330 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
331 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
332 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
333 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
334 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
335 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
336 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
337 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
338 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
sl@0
|
339 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
340 |
};
|
sl@0
|
341 |
|
sl@0
|
342 |
// Table used for threads which have been preempted by an interrupt while in
|
sl@0
|
343 |
// supervisor mode in the SWI handler either before the return address was
|
sl@0
|
344 |
// saved or after the registers were restored.
|
sl@0
|
345 |
|
sl@0
|
346 |
const TArmContextElement ContextTableSvsrInterrupt1[] =
|
sl@0
|
347 |
{
|
sl@0
|
348 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
sl@0
|
349 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
sl@0
|
350 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
sl@0
|
351 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
sl@0
|
352 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
sl@0
|
353 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
sl@0
|
354 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
sl@0
|
355 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
sl@0
|
356 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
sl@0
|
357 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
sl@0
|
358 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
sl@0
|
359 |
CONTEXT_ELEMENT_FROM_SP(SP_R11),
|
sl@0
|
360 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
sl@0
|
361 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
362 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
363 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6), // r15 = r12
|
sl@0
|
364 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
sl@0
|
365 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
366 |
};
|
sl@0
|
367 |
|
sl@0
|
368 |
// Table used for threads which have been asynchronously killed while in the situation
|
sl@0
|
369 |
// described above (see ContextTableSvsrInterrupt1).
|
sl@0
|
370 |
|
sl@0
|
371 |
const TArmContextElement ContextTableSvsrInterrupt1Died[] =
|
sl@0
|
372 |
{
|
sl@0
|
373 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
374 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
375 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
376 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
377 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
378 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
379 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
380 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
381 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
382 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
383 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
384 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
385 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
386 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
387 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
388 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
389 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
sl@0
|
390 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
391 |
};
|
sl@0
|
392 |
|
sl@0
|
393 |
// Table used for threads which have been preempted by an interrupt while in
|
sl@0
|
394 |
// supervisor mode in the SWI handler after the return address was saved.
|
sl@0
|
395 |
|
sl@0
|
396 |
const TArmContextElement ContextTableSvsrInterrupt2[] =
|
sl@0
|
397 |
{
|
sl@0
|
398 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
sl@0
|
399 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
sl@0
|
400 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
sl@0
|
401 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
sl@0
|
402 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
sl@0
|
403 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
sl@0
|
404 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
sl@0
|
405 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
sl@0
|
406 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
sl@0
|
407 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
sl@0
|
408 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
sl@0
|
409 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
410 |
CONTEXT_ELEMENT_FROM_SP(SP_NEXT+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
sl@0
|
411 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
412 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
413 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
414 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
sl@0
|
415 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
416 |
};
|
sl@0
|
417 |
|
sl@0
|
418 |
// Table used for threads which have been asynchronously killed while in the situation
|
sl@0
|
419 |
// described above (see ContextTableSvsrInterrupt2).
|
sl@0
|
420 |
|
sl@0
|
421 |
const TArmContextElement ContextTableSvsrInterrupt2Died[] =
|
sl@0
|
422 |
{
|
sl@0
|
423 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
424 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
425 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
426 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
427 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
428 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
429 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
430 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
431 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
432 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
433 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
434 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
435 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
436 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
437 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
438 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
439 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
sl@0
|
440 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
441 |
};
|
sl@0
|
442 |
|
sl@0
|
443 |
// Table used for non-dying threads blocked on their request semaphore.
|
sl@0
|
444 |
|
sl@0
|
445 |
const TArmContextElement ContextTableWFAR[] =
|
sl@0
|
446 |
{
|
sl@0
|
447 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
448 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
449 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
450 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
451 |
CONTEXT_ELEMENT_FROM_SP(SP_R4),
|
sl@0
|
452 |
CONTEXT_ELEMENT_FROM_SP(SP_R5),
|
sl@0
|
453 |
CONTEXT_ELEMENT_FROM_SP(SP_R6),
|
sl@0
|
454 |
CONTEXT_ELEMENT_FROM_SP(SP_R7),
|
sl@0
|
455 |
CONTEXT_ELEMENT_FROM_SP(SP_R8),
|
sl@0
|
456 |
CONTEXT_ELEMENT_FROM_SP(SP_R9),
|
sl@0
|
457 |
CONTEXT_ELEMENT_FROM_SP(SP_R10),
|
sl@0
|
458 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
459 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
460 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
461 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
462 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
463 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
sl@0
|
464 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
465 |
};
|
sl@0
|
466 |
|
sl@0
|
467 |
// Table used for threads killed asynchronously while blocked on their request
|
sl@0
|
468 |
// semaphore.
|
sl@0
|
469 |
|
sl@0
|
470 |
const TArmContextElement ContextTableWFARDied[] =
|
sl@0
|
471 |
{
|
sl@0
|
472 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
473 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
474 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
475 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
476 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
477 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
478 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
479 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
480 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
481 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
482 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
483 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
484 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
485 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
486 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
487 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
488 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
sl@0
|
489 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
490 |
};
|
sl@0
|
491 |
|
sl@0
|
492 |
const TArmContextElement ContextTableExec[] =
|
sl@0
|
493 |
{
|
sl@0
|
494 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
495 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
496 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
497 |
CONTEXT_ELEMENT_FROM_STACK_TOP(10),
|
sl@0
|
498 |
CONTEXT_ELEMENT_FROM_STACK_TOP(9),
|
sl@0
|
499 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8),
|
sl@0
|
500 |
CONTEXT_ELEMENT_FROM_STACK_TOP(7),
|
sl@0
|
501 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
sl@0
|
502 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
sl@0
|
503 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
sl@0
|
504 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
sl@0
|
505 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
506 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
507 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
508 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
509 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
510 |
CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
|
sl@0
|
511 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
512 |
};
|
sl@0
|
513 |
|
sl@0
|
514 |
// Table used to retrieve a thread's kernel side context.
|
sl@0
|
515 |
// Used for kernel threads.
|
sl@0
|
516 |
const TArmContextElement ContextTableKernel[] =
|
sl@0
|
517 |
{
|
sl@0
|
518 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
519 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
520 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
521 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
522 |
CONTEXT_ELEMENT_FROM_SP(SP_R4), // r4 before reschedule
|
sl@0
|
523 |
CONTEXT_ELEMENT_FROM_SP(SP_R5), // r5 before reschedule
|
sl@0
|
524 |
CONTEXT_ELEMENT_FROM_SP(SP_R6), // r6 before reschedule
|
sl@0
|
525 |
CONTEXT_ELEMENT_FROM_SP(SP_R7), // r7 before reschedule
|
sl@0
|
526 |
CONTEXT_ELEMENT_FROM_SP(SP_R8), // r8 before reschedule
|
sl@0
|
527 |
CONTEXT_ELEMENT_FROM_SP(SP_R9), // r9 before reschedule
|
sl@0
|
528 |
CONTEXT_ELEMENT_FROM_SP(SP_R10), // r10 before reschedule
|
sl@0
|
529 |
CONTEXT_ELEMENT_FROM_SP(SP_R11), // r11 before reschedule
|
sl@0
|
530 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
531 |
CONTEXT_ELEMENT_SP_PLUS(SP_NEXT), // supervisor stack pointer before reschedule
|
sl@0
|
532 |
CONTEXT_ELEMENT_UNDEFINED(0), // supervisor lr is unknown
|
sl@0
|
533 |
CONTEXT_ELEMENT_FROM_SP(SP_PC), // return address from reschedule
|
sl@0
|
534 |
CONTEXT_ELEMENT_UNDEFINED(ESvcMode), // can't get flags so just use 'supervisor mode'
|
sl@0
|
535 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
536 |
};
|
sl@0
|
537 |
|
sl@0
|
538 |
// Table used for non dying threads which are in a user callback while returning
|
sl@0
|
539 |
// from having been preempted by an interrupt while in user mode.
|
sl@0
|
540 |
|
sl@0
|
541 |
const TArmContextElement ContextTableUserIntrCallback[] =
|
sl@0
|
542 |
{
|
sl@0
|
543 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
sl@0
|
544 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
sl@0
|
545 |
CONTEXT_ELEMENT_FROM_STACK_TOP(4),
|
sl@0
|
546 |
CONTEXT_ELEMENT_FROM_STACK_TOP(3),
|
sl@0
|
547 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+9),
|
sl@0
|
548 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+8),
|
sl@0
|
549 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+7),
|
sl@0
|
550 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+6),
|
sl@0
|
551 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+5),
|
sl@0
|
552 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+4),
|
sl@0
|
553 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+3),
|
sl@0
|
554 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8+USER_MEMORY_GUARD_SAVE_WORDS+2),
|
sl@0
|
555 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
556 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
557 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
558 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
559 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8), // interrupted CPSR
|
sl@0
|
560 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
561 |
};
|
sl@0
|
562 |
|
sl@0
|
563 |
// Table used for non-dying threads which are in a user callback while returning
|
sl@0
|
564 |
// from being blocked on their request semaphore.
|
sl@0
|
565 |
|
sl@0
|
566 |
const TArmContextElement ContextTableWFARCallback[] =
|
sl@0
|
567 |
{
|
sl@0
|
568 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
569 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
570 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
571 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
572 |
CONTEXT_ELEMENT_FROM_STACK_TOP(11),
|
sl@0
|
573 |
CONTEXT_ELEMENT_FROM_STACK_TOP(10),
|
sl@0
|
574 |
CONTEXT_ELEMENT_FROM_STACK_TOP(9),
|
sl@0
|
575 |
CONTEXT_ELEMENT_FROM_STACK_TOP(8),
|
sl@0
|
576 |
CONTEXT_ELEMENT_FROM_STACK_TOP(7),
|
sl@0
|
577 |
CONTEXT_ELEMENT_FROM_STACK_TOP(6),
|
sl@0
|
578 |
CONTEXT_ELEMENT_FROM_STACK_TOP(5),
|
sl@0
|
579 |
CONTEXT_ELEMENT_FROM_STACK_TOP(2),
|
sl@0
|
580 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
581 |
CONTEXT_ELEMENT_FROM_SP(SP_R13U),
|
sl@0
|
582 |
CONTEXT_ELEMENT_FROM_SP(SP_R14U),
|
sl@0
|
583 |
CONTEXT_ELEMENT_FROM_STACK_TOP(1),
|
sl@0
|
584 |
CONTEXT_ELEMENT_FROM_SP(SP_SPSR),
|
sl@0
|
585 |
CONTEXT_ELEMENT_UNDEFINED(0),
|
sl@0
|
586 |
};
|
sl@0
|
587 |
|
sl@0
|
588 |
const TArmContextElement* const ThreadUserContextTables[] =
|
sl@0
|
589 |
{
|
sl@0
|
590 |
ContextTableUndefined, // EContextNone
|
sl@0
|
591 |
ContextTableException,
|
sl@0
|
592 |
ContextTableUndefined,
|
sl@0
|
593 |
ContextTableUserInterrupt,
|
sl@0
|
594 |
ContextTableUserInterruptDied,
|
sl@0
|
595 |
ContextTableSvsrInterrupt1,
|
sl@0
|
596 |
ContextTableSvsrInterrupt1Died,
|
sl@0
|
597 |
ContextTableSvsrInterrupt2,
|
sl@0
|
598 |
ContextTableSvsrInterrupt2Died,
|
sl@0
|
599 |
ContextTableWFAR,
|
sl@0
|
600 |
ContextTableWFARDied,
|
sl@0
|
601 |
ContextTableExec,
|
sl@0
|
602 |
ContextTableKernel,
|
sl@0
|
603 |
ContextTableUserIntrCallback,
|
sl@0
|
604 |
ContextTableWFARCallback,
|
sl@0
|
605 |
0 // Null terminated
|
sl@0
|
606 |
};
|
sl@0
|
607 |
|
sl@0
|
608 |
/** Return table of pointers to user context tables.
|
sl@0
|
609 |
|
sl@0
|
610 |
Each user context table is an array of TArmContextElement objects, one per
|
sl@0
|
611 |
ARM CPU register, in the order defined in TArmRegisters.
|
sl@0
|
612 |
|
sl@0
|
613 |
The master table contains pointers to the user context tables in the order
|
sl@0
|
614 |
defined in TUserContextType. There are as many user context tables as
|
sl@0
|
615 |
scenarii leading a user thread to switch to privileged mode.
|
sl@0
|
616 |
|
sl@0
|
617 |
Stop-mode debug agents should use this function to store the address of the
|
sl@0
|
618 |
master table at a location known to the host debugger. Run-mode debug
|
sl@0
|
619 |
agents are advised to use NKern::GetUserContext() and
|
sl@0
|
620 |
NKern::SetUserContext() instead.
|
sl@0
|
621 |
|
sl@0
|
622 |
@return A pointer to the master table. The master table is NULL
|
sl@0
|
623 |
terminated. The master and user context tables are guaranteed to remain at
|
sl@0
|
624 |
the same location for the lifetime of the OS execution so it is safe the
|
sl@0
|
625 |
cache the returned address.
|
sl@0
|
626 |
|
sl@0
|
627 |
@see UserContextType
|
sl@0
|
628 |
@see TArmContextElement
|
sl@0
|
629 |
@see TArmRegisters
|
sl@0
|
630 |
@see TUserContextType
|
sl@0
|
631 |
@see NKern::SetUserContext
|
sl@0
|
632 |
@see NKern::GetUserContext
|
sl@0
|
633 |
|
sl@0
|
634 |
@publishedPartner
|
sl@0
|
635 |
*/
|
sl@0
|
636 |
EXPORT_C const TArmContextElement* const* NThread::UserContextTables()
|
sl@0
|
637 |
{
|
sl@0
|
638 |
return &ThreadUserContextTables[0];
|
sl@0
|
639 |
}
|
sl@0
|
640 |
|
sl@0
|
641 |
|
sl@0
|
642 |
#ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
|
sl@0
|
643 |
extern TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/);
|
sl@0
|
644 |
|
sl@0
|
645 |
/** Get a value which indicates where a thread's user mode context is stored.
|
sl@0
|
646 |
|
sl@0
|
647 |
@return A value that can be used as an index into the tables returned by
|
sl@0
|
648 |
NThread::UserContextTables().
|
sl@0
|
649 |
|
sl@0
|
650 |
@pre any context
|
sl@0
|
651 |
@pre kernel locked
|
sl@0
|
652 |
@post kernel locked
|
sl@0
|
653 |
|
sl@0
|
654 |
@see UserContextTables
|
sl@0
|
655 |
@publishedPartner
|
sl@0
|
656 |
*/
|
sl@0
|
657 |
EXPORT_C NThread::TUserContextType NThread::UserContextType()
|
sl@0
|
658 |
{
|
sl@0
|
659 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThread::UserContextType");
|
sl@0
|
660 |
// Dying thread? use context saved earlier by kernel
|
sl@0
|
661 |
if (iCsFunction == ECSExitInProgress)
|
sl@0
|
662 |
return (TUserContextType)iUserContextType;
|
sl@0
|
663 |
|
sl@0
|
664 |
// Check for EContextNone and EContextException
|
sl@0
|
665 |
// Also EContextUserIntrCallback and EContextWFARCallback
|
sl@0
|
666 |
if(iUserContextType<=EContextException || iUserContextType==EContextUserIntrCallback
|
sl@0
|
667 |
|| iUserContextType==EContextWFARCallback)
|
sl@0
|
668 |
return (TUserContextType)iUserContextType;
|
sl@0
|
669 |
|
sl@0
|
670 |
// Getting current thread context? must be in exec call as exception
|
sl@0
|
671 |
// and dying thread cases were tested above.
|
sl@0
|
672 |
if (this == NCurrentThread())
|
sl@0
|
673 |
return EContextExec;
|
sl@0
|
674 |
|
sl@0
|
675 |
// Check what caused the thread to enter supervisor mode
|
sl@0
|
676 |
TUint32* sst=(TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
sl@0
|
677 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
678 |
TInt n=sst-sp; // number of words on the supervisor stack
|
sl@0
|
679 |
TUint32 resched_ret=sp[SP_PC]; // return address from reschedule
|
sl@0
|
680 |
if (RescheduledAfterInterrupt(resched_ret))
|
sl@0
|
681 |
{
|
sl@0
|
682 |
// thread was preempted due to an interrupt
|
sl@0
|
683 |
// interrupt and reschedule will have pushed 20+EXTRA words onto the stack
|
sl@0
|
684 |
if ((sp[SP_NEXT]&EMaskMode)==EUserMode) // interrupted mode = user?
|
sl@0
|
685 |
return NThread::EContextUserInterrupt;
|
sl@0
|
686 |
if (n<(30+EXTRA_WORDS)) // n<30 if interrupt occurred in exec call entry before r3-r10 saved
|
sl@0
|
687 |
{ // or after r3-r10 restored
|
sl@0
|
688 |
if (n==(20+EXTRA_WORDS))
|
sl@0
|
689 |
{
|
sl@0
|
690 |
// interrupt before return address, r11 were saved or after registers restored
|
sl@0
|
691 |
return EContextSvsrInterrupt1;
|
sl@0
|
692 |
}
|
sl@0
|
693 |
else
|
sl@0
|
694 |
{
|
sl@0
|
695 |
// interrupt after return address, r11 saved
|
sl@0
|
696 |
return EContextSvsrInterrupt2;
|
sl@0
|
697 |
}
|
sl@0
|
698 |
}
|
sl@0
|
699 |
// thread was interrupted in supervisor mode
|
sl@0
|
700 |
// return address and r3-r11 were saved
|
sl@0
|
701 |
}
|
sl@0
|
702 |
|
sl@0
|
703 |
// Transition to supervisor mode must have been due to a SWI
|
sl@0
|
704 |
if (n==(15+EXTRA_WORDS))
|
sl@0
|
705 |
{
|
sl@0
|
706 |
// thread must have blocked doing Exec::WaitForAnyRequest
|
sl@0
|
707 |
return EContextWFAR;
|
sl@0
|
708 |
}
|
sl@0
|
709 |
|
sl@0
|
710 |
// Thread must have been in a SLOW or UNPROTECTED Exec call
|
sl@0
|
711 |
return EContextExec;
|
sl@0
|
712 |
}
|
sl@0
|
713 |
|
sl@0
|
714 |
#endif // __USER_CONTEXT_TYPE_MACHINE_CODED__
|
sl@0
|
715 |
|
sl@0
|
716 |
// Enter and return with kernel locked
|
sl@0
|
717 |
void NThread::GetContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask, const TArmContextElement* aContextTable)
|
sl@0
|
718 |
{
|
sl@0
|
719 |
TUint32* sp = (TUint32*)iSavedSP;
|
sl@0
|
720 |
TUint32* st = (TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
sl@0
|
721 |
TArmReg* out = (TArmReg*)(&aContext);
|
sl@0
|
722 |
TBool currentThread = (NCurrentThread() == this);
|
sl@0
|
723 |
|
sl@0
|
724 |
aAvailRegistersMask = 0;
|
sl@0
|
725 |
if (iNState == EDead)
|
sl@0
|
726 |
{// This thread's stack may no longer exist so just exit.
|
sl@0
|
727 |
return;
|
sl@0
|
728 |
}
|
sl@0
|
729 |
|
sl@0
|
730 |
// Copy available context into provided structure.
|
sl@0
|
731 |
for (TInt i = 0; i<KArmRegisterCount; ++i)
|
sl@0
|
732 |
{
|
sl@0
|
733 |
TInt v = aContextTable[i].iValue;
|
sl@0
|
734 |
TInt t = aContextTable[i].iType;
|
sl@0
|
735 |
if(!currentThread && t==TArmContextElement::EOffsetFromSp)
|
sl@0
|
736 |
{
|
sl@0
|
737 |
// thread has been preempted, it is safe to fetch its context
|
sl@0
|
738 |
// from the info saved in Reschedule().
|
sl@0
|
739 |
v = sp[v];
|
sl@0
|
740 |
aAvailRegistersMask |= (1<<i);
|
sl@0
|
741 |
}
|
sl@0
|
742 |
else if(t==TArmContextElement::EOffsetFromStackTop)
|
sl@0
|
743 |
{
|
sl@0
|
744 |
v = st[-v];
|
sl@0
|
745 |
aAvailRegistersMask |= (1<<i);
|
sl@0
|
746 |
}
|
sl@0
|
747 |
else if(!currentThread && t==TArmContextElement::ESpPlusOffset)
|
sl@0
|
748 |
{
|
sl@0
|
749 |
v = (TInt)(sp+v);
|
sl@0
|
750 |
aAvailRegistersMask |= (1<<i);
|
sl@0
|
751 |
}
|
sl@0
|
752 |
out[i] = v;
|
sl@0
|
753 |
}
|
sl@0
|
754 |
|
sl@0
|
755 |
// Getting context of current thread? some values can be fetched directly
|
sl@0
|
756 |
// from the registers if they are not available from the stack.
|
sl@0
|
757 |
if (currentThread && aContextTable[EArmSp].iType == TArmContextElement::EOffsetFromSp)
|
sl@0
|
758 |
{
|
sl@0
|
759 |
Arm::GetUserSpAndLr(out+EArmSp);
|
sl@0
|
760 |
aAvailRegistersMask |= (1<<EArmSp) | (1<<EArmLr);
|
sl@0
|
761 |
}
|
sl@0
|
762 |
}
|
sl@0
|
763 |
|
sl@0
|
764 |
// Enter and return with kernel locked
|
sl@0
|
765 |
void NThread::GetUserContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
|
sl@0
|
766 |
{
|
sl@0
|
767 |
TUserContextType type=UserContextType();
|
sl@0
|
768 |
NThread::GetContext(aContext, aAvailRegistersMask, UserContextTables()[type]);
|
sl@0
|
769 |
}
|
sl@0
|
770 |
|
sl@0
|
771 |
// Enter and return with kernel locked
|
sl@0
|
772 |
void NThread::GetSystemContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
|
sl@0
|
773 |
{
|
sl@0
|
774 |
NThread::GetContext(aContext, aAvailRegistersMask, UserContextTables()[EContextKernel]);
|
sl@0
|
775 |
}
|
sl@0
|
776 |
|
sl@0
|
777 |
// Enter and return with kernel locked
|
sl@0
|
778 |
void NThread::SetUserContext(const TArmRegSet& aContext)
|
sl@0
|
779 |
{
|
sl@0
|
780 |
if (iNState == EDead)
|
sl@0
|
781 |
{// This thread's stack may no longer exist so just exit.
|
sl@0
|
782 |
return;
|
sl@0
|
783 |
}
|
sl@0
|
784 |
TUserContextType type=UserContextType();
|
sl@0
|
785 |
const TArmContextElement* c = NThread::UserContextTables()[type];
|
sl@0
|
786 |
TUint32* sp = (TUint32*)iSavedSP;
|
sl@0
|
787 |
TUint32* st = (TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
sl@0
|
788 |
TArmReg* in = (TArmReg*)(&aContext);
|
sl@0
|
789 |
TBool currentThread = (NCurrentThread() == this);
|
sl@0
|
790 |
|
sl@0
|
791 |
// Check that target thread is in USR mode, and update only the flags part of the PSR
|
sl@0
|
792 |
TUint32 tFlags = 0;
|
sl@0
|
793 |
TUint32* tFlagsPtr = &tFlags;
|
sl@0
|
794 |
TUint32 flagsCtxValue = c[EArmFlags].iValue;
|
sl@0
|
795 |
switch (c[EArmFlags].iType) // describes how to interpret flagsCtxValue
|
sl@0
|
796 |
{
|
sl@0
|
797 |
case TArmContextElement::EUndefined:
|
sl@0
|
798 |
// Flags register not saved; not necessarily an error, but we can't update the flags
|
sl@0
|
799 |
tFlags = flagsCtxValue; // use mode bits of flagsCtxValue itself
|
sl@0
|
800 |
break;
|
sl@0
|
801 |
|
sl@0
|
802 |
case TArmContextElement::EOffsetFromStackTop:
|
sl@0
|
803 |
// Flags register saved, flagsCtxValue is offset from ToS
|
sl@0
|
804 |
tFlagsPtr = &st[-flagsCtxValue];
|
sl@0
|
805 |
break;
|
sl@0
|
806 |
|
sl@0
|
807 |
case TArmContextElement::EOffsetFromSp:
|
sl@0
|
808 |
// Flags register saved, flagsCtxValue is offset from SP
|
sl@0
|
809 |
if (!currentThread)
|
sl@0
|
810 |
tFlagsPtr = &sp[flagsCtxValue];
|
sl@0
|
811 |
else
|
sl@0
|
812 |
{
|
sl@0
|
813 |
// This can only occur when the thread is exiting. Therefore,
|
sl@0
|
814 |
// we allow it, but the changed values will never be used.
|
sl@0
|
815 |
tFlags = 0x10;
|
sl@0
|
816 |
}
|
sl@0
|
817 |
break;
|
sl@0
|
818 |
|
sl@0
|
819 |
default:
|
sl@0
|
820 |
// Assertion below will fail with default value ...
|
sl@0
|
821 |
;
|
sl@0
|
822 |
}
|
sl@0
|
823 |
|
sl@0
|
824 |
tFlags = *tFlagsPtr; // retrieve saved flags
|
sl@0
|
825 |
__NK_ASSERT_ALWAYS((tFlags & 0x1f) == 0x10); // target thread must be in USR mode
|
sl@0
|
826 |
const TUint32 writableFlags = 0xF80F0000; // NZCVQ.......GE3-0................
|
sl@0
|
827 |
tFlags &= ~writableFlags;
|
sl@0
|
828 |
tFlags |= in[EArmFlags] & writableFlags;
|
sl@0
|
829 |
*tFlagsPtr = tFlags; // update saved flags
|
sl@0
|
830 |
|
sl@0
|
831 |
// Copy provided context into stack if possible
|
sl@0
|
832 |
for (TInt i = 0; i<KArmRegisterCount; ++i)
|
sl@0
|
833 |
{
|
sl@0
|
834 |
// The Flags were already processed above, and we don't allow
|
sl@0
|
835 |
// changing the DACR, so we can just skip these two index values
|
sl@0
|
836 |
if (i == EArmFlags || i == EArmDacr)
|
sl@0
|
837 |
continue;
|
sl@0
|
838 |
|
sl@0
|
839 |
TInt v = c[i].iValue;
|
sl@0
|
840 |
TInt t = c[i].iType;
|
sl@0
|
841 |
if(!currentThread && t==TArmContextElement::EOffsetFromSp)
|
sl@0
|
842 |
{
|
sl@0
|
843 |
// thread has been preempted, it is safe to change context
|
sl@0
|
844 |
// saved in Reschedule().
|
sl@0
|
845 |
sp[v] = in[i];
|
sl@0
|
846 |
}
|
sl@0
|
847 |
if(t==TArmContextElement::EOffsetFromStackTop)
|
sl@0
|
848 |
st[-v] = in[i];
|
sl@0
|
849 |
}
|
sl@0
|
850 |
|
sl@0
|
851 |
// Current thread? some values can be loaded straight into the registers
|
sl@0
|
852 |
// if they haven't been stored on the stack yet.
|
sl@0
|
853 |
if (currentThread && c[EArmSp].iType == TArmContextElement::EOffsetFromSp)
|
sl@0
|
854 |
Arm::SetUserSpAndLr(in+EArmSp);
|
sl@0
|
855 |
}
|
sl@0
|
856 |
|
sl@0
|
857 |
// Modify a non-running thread's user stack pointer
|
sl@0
|
858 |
// Enter and return with kernel locked
|
sl@0
|
859 |
void NThread::ModifyUsp(TLinAddr aUsp)
|
sl@0
|
860 |
{
|
sl@0
|
861 |
// Check what caused the thread to enter supervisor mode
|
sl@0
|
862 |
TUint32* sst=(TUint32*)((TUint32)iStackBase+(TUint32)iStackSize);
|
sl@0
|
863 |
if (iSpare3)
|
sl@0
|
864 |
{
|
sl@0
|
865 |
// exception caused transition to supervisor mode
|
sl@0
|
866 |
TArmExcInfo& e=((TArmExcInfo*)sst)[-1];
|
sl@0
|
867 |
e.iR13=aUsp;
|
sl@0
|
868 |
return;
|
sl@0
|
869 |
}
|
sl@0
|
870 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
871 |
sp[SP_R13U]=aUsp;
|
sl@0
|
872 |
}
|
sl@0
|
873 |
|
sl@0
|
874 |
/** Get (subset of) user context of specified thread.
|
sl@0
|
875 |
|
sl@0
|
876 |
The nanokernel does not systematically save all registers in the supervisor
|
sl@0
|
877 |
stack on entry into privileged mode and the exact subset depends on why the
|
sl@0
|
878 |
switch to privileged mode occured. So in general only a subset of the
|
sl@0
|
879 |
register set is available.
|
sl@0
|
880 |
|
sl@0
|
881 |
@param aThread Thread to inspect. It can be the current thread or a
|
sl@0
|
882 |
non-current one.
|
sl@0
|
883 |
|
sl@0
|
884 |
@param aContext Pointer to TArmRegSet structure where the context is
|
sl@0
|
885 |
copied.
|
sl@0
|
886 |
|
sl@0
|
887 |
@param aAvailRegistersMask Bit mask telling which subset of the context is
|
sl@0
|
888 |
available and has been copied to aContext (1: register available / 0: not
|
sl@0
|
889 |
available). Bit 0 stands for register R0.
|
sl@0
|
890 |
|
sl@0
|
891 |
@see TArmRegSet
|
sl@0
|
892 |
@see ThreadSetUserContext
|
sl@0
|
893 |
|
sl@0
|
894 |
@pre Call in a thread context.
|
sl@0
|
895 |
@pre Interrupts must be enabled.
|
sl@0
|
896 |
*/
|
sl@0
|
897 |
EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
|
sl@0
|
898 |
{
|
sl@0
|
899 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext");
|
sl@0
|
900 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
sl@0
|
901 |
memclr(aContext, sizeof(TArmRegSet));
|
sl@0
|
902 |
NKern::Lock();
|
sl@0
|
903 |
aThread->GetUserContext(a, aAvailRegistersMask);
|
sl@0
|
904 |
NKern::Unlock();
|
sl@0
|
905 |
}
|
sl@0
|
906 |
|
sl@0
|
907 |
/** Get (subset of) system context of specified thread.
|
sl@0
|
908 |
|
sl@0
|
909 |
@param aThread Thread to inspect. It can be the current thread or a
|
sl@0
|
910 |
non-current one.
|
sl@0
|
911 |
|
sl@0
|
912 |
@param aContext Pointer to TArmRegSet structure where the context is
|
sl@0
|
913 |
copied.
|
sl@0
|
914 |
|
sl@0
|
915 |
@param aAvailRegistersMask Bit mask telling which subset of the context is
|
sl@0
|
916 |
available and has been copied to aContext (1: register available / 0: not
|
sl@0
|
917 |
available). Bit 0 stands for register R0.
|
sl@0
|
918 |
|
sl@0
|
919 |
@see TArmRegSet
|
sl@0
|
920 |
@see ThreadSetUserContext
|
sl@0
|
921 |
|
sl@0
|
922 |
@pre Call in a thread context.
|
sl@0
|
923 |
@pre Interrupts must be enabled.
|
sl@0
|
924 |
*/
|
sl@0
|
925 |
EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
|
sl@0
|
926 |
{
|
sl@0
|
927 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext");
|
sl@0
|
928 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
sl@0
|
929 |
memclr(aContext, sizeof(TArmRegSet));
|
sl@0
|
930 |
NKern::Lock();
|
sl@0
|
931 |
aThread->GetSystemContext(a, aAvailRegistersMask);
|
sl@0
|
932 |
NKern::Unlock();
|
sl@0
|
933 |
}
|
sl@0
|
934 |
|
sl@0
|
935 |
/** Set (subset of) user context of specified thread.
|
sl@0
|
936 |
|
sl@0
|
937 |
@param aThread Thread to modify. It can be the current thread or a
|
sl@0
|
938 |
non-current one.
|
sl@0
|
939 |
|
sl@0
|
940 |
@param aContext Pointer to TArmRegSet structure containing the context
|
sl@0
|
941 |
to set. The values of registers which aren't part of the context saved
|
sl@0
|
942 |
on the supervisor stack are ignored.
|
sl@0
|
943 |
|
sl@0
|
944 |
@see TArmRegSet
|
sl@0
|
945 |
@see ThreadGetUserContext
|
sl@0
|
946 |
|
sl@0
|
947 |
@pre Call in a thread context.
|
sl@0
|
948 |
@pre Interrupts must be enabled.
|
sl@0
|
949 |
*/
|
sl@0
|
950 |
EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext)
|
sl@0
|
951 |
{
|
sl@0
|
952 |
CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext");
|
sl@0
|
953 |
TArmRegSet& a=*(TArmRegSet*)aContext;
|
sl@0
|
954 |
NKern::Lock();
|
sl@0
|
955 |
aThread->SetUserContext(a);
|
sl@0
|
956 |
NKern::Unlock();
|
sl@0
|
957 |
}
|
sl@0
|
958 |
|
sl@0
|
959 |
/** @internalComponent */
|
sl@0
|
960 |
void NKern::ThreadModifyUsp(NThread* aThread, TLinAddr aUsp)
|
sl@0
|
961 |
{
|
sl@0
|
962 |
NKern::Lock();
|
sl@0
|
963 |
aThread->ModifyUsp(aUsp);
|
sl@0
|
964 |
NKern::Unlock();
|
sl@0
|
965 |
}
|
sl@0
|
966 |
|
sl@0
|
967 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
968 |
TUint32 NThread::Dacr()
|
sl@0
|
969 |
{
|
sl@0
|
970 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
971 |
return Arm::Dacr();
|
sl@0
|
972 |
NKern::Lock();
|
sl@0
|
973 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
974 |
TUint32 dacr=sp[SP_DACR];
|
sl@0
|
975 |
NKern::Unlock();
|
sl@0
|
976 |
return dacr;
|
sl@0
|
977 |
}
|
sl@0
|
978 |
|
sl@0
|
979 |
void NThread::SetDacr(TUint32 aDacr)
|
sl@0
|
980 |
{
|
sl@0
|
981 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
982 |
Arm::SetDacr(aDacr);
|
sl@0
|
983 |
NKern::Lock();
|
sl@0
|
984 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
985 |
sp[SP_DACR]=aDacr;
|
sl@0
|
986 |
NKern::Unlock();
|
sl@0
|
987 |
}
|
sl@0
|
988 |
|
sl@0
|
989 |
TUint32 NThread::ModifyDacr(TUint32 aClearMask, TUint32 aSetMask)
|
sl@0
|
990 |
{
|
sl@0
|
991 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
992 |
return Arm::ModifyDacr(aClearMask,aSetMask);
|
sl@0
|
993 |
NKern::Lock();
|
sl@0
|
994 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
995 |
TUint32 dacr=sp[SP_DACR];
|
sl@0
|
996 |
sp[SP_DACR]=(dacr&~aClearMask)|aSetMask;
|
sl@0
|
997 |
NKern::Unlock();
|
sl@0
|
998 |
return dacr;
|
sl@0
|
999 |
}
|
sl@0
|
1000 |
#endif
|
sl@0
|
1001 |
|
sl@0
|
1002 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1003 |
void NThread::SetCar(TUint32 aCar)
|
sl@0
|
1004 |
{
|
sl@0
|
1005 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1006 |
Arm::SetCar(aCar);
|
sl@0
|
1007 |
NKern::Lock();
|
sl@0
|
1008 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1009 |
sp[SP_CAR]=aCar;
|
sl@0
|
1010 |
NKern::Unlock();
|
sl@0
|
1011 |
}
|
sl@0
|
1012 |
#endif
|
sl@0
|
1013 |
|
sl@0
|
1014 |
|
sl@0
|
1015 |
|
sl@0
|
1016 |
/** Get the saved coprocessor access register value for a thread
|
sl@0
|
1017 |
|
sl@0
|
1018 |
@return The saved value of the CAR, 0 if CPU doesn't have CAR
|
sl@0
|
1019 |
@pre Don't call from ISR
|
sl@0
|
1020 |
|
sl@0
|
1021 |
@publishedPartner
|
sl@0
|
1022 |
@released
|
sl@0
|
1023 |
*/
|
sl@0
|
1024 |
EXPORT_C TUint32 NThread::Car()
|
sl@0
|
1025 |
{
|
sl@0
|
1026 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::Car");
|
sl@0
|
1027 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1028 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1029 |
return Arm::Car();
|
sl@0
|
1030 |
NKern::Lock();
|
sl@0
|
1031 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1032 |
TUint32 car=sp[SP_CAR];
|
sl@0
|
1033 |
NKern::Unlock();
|
sl@0
|
1034 |
return car;
|
sl@0
|
1035 |
#else
|
sl@0
|
1036 |
return 0;
|
sl@0
|
1037 |
#endif
|
sl@0
|
1038 |
}
|
sl@0
|
1039 |
|
sl@0
|
1040 |
|
sl@0
|
1041 |
|
sl@0
|
1042 |
/** Modify the saved coprocessor access register value for a thread
|
sl@0
|
1043 |
Does nothing if CPU does not have CAR.
|
sl@0
|
1044 |
|
sl@0
|
1045 |
@param aClearMask Mask of bits to clear (1 = clear this bit)
|
sl@0
|
1046 |
@param aSetMask Mask of bits to set (1 = set this bit)
|
sl@0
|
1047 |
@return The original saved value of the CAR, 0 if CPU doesn't have CAR
|
sl@0
|
1048 |
@pre Don't call from ISR
|
sl@0
|
1049 |
|
sl@0
|
1050 |
@publishedPartner
|
sl@0
|
1051 |
@released
|
sl@0
|
1052 |
*/
|
sl@0
|
1053 |
EXPORT_C TUint32 NThread::ModifyCar(TUint32 aClearMask, TUint32 aSetMask)
|
sl@0
|
1054 |
{
|
sl@0
|
1055 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::ModifyCar");
|
sl@0
|
1056 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1057 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1058 |
return Arm::ModifyCar(aClearMask,aSetMask);
|
sl@0
|
1059 |
NKern::Lock();
|
sl@0
|
1060 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1061 |
TUint32 car=sp[SP_CAR];
|
sl@0
|
1062 |
sp[SP_CAR]=(car&~aClearMask)|aSetMask;
|
sl@0
|
1063 |
NKern::Unlock();
|
sl@0
|
1064 |
return car;
|
sl@0
|
1065 |
#else
|
sl@0
|
1066 |
return 0;
|
sl@0
|
1067 |
#endif
|
sl@0
|
1068 |
}
|
sl@0
|
1069 |
|
sl@0
|
1070 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1071 |
void NThread::SetFpExc(TUint32 aVal)
|
sl@0
|
1072 |
{
|
sl@0
|
1073 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1074 |
Arm::SetFpExc(aVal);
|
sl@0
|
1075 |
NKern::Lock();
|
sl@0
|
1076 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1077 |
sp[SP_FPEXC]=aVal;
|
sl@0
|
1078 |
NKern::Unlock();
|
sl@0
|
1079 |
}
|
sl@0
|
1080 |
#endif
|
sl@0
|
1081 |
|
sl@0
|
1082 |
|
sl@0
|
1083 |
|
sl@0
|
1084 |
/** Get the saved VFP FPEXC register value for a thread
|
sl@0
|
1085 |
|
sl@0
|
1086 |
@return The saved value of FPEXC, 0 if VFP not present
|
sl@0
|
1087 |
@pre Don't call from ISR
|
sl@0
|
1088 |
|
sl@0
|
1089 |
@publishedPartner
|
sl@0
|
1090 |
@released
|
sl@0
|
1091 |
*/
|
sl@0
|
1092 |
EXPORT_C TUint32 NThread::FpExc()
|
sl@0
|
1093 |
{
|
sl@0
|
1094 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::FpExc");
|
sl@0
|
1095 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1096 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1097 |
return Arm::FpExc();
|
sl@0
|
1098 |
NKern::Lock();
|
sl@0
|
1099 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1100 |
TUint32 r=sp[SP_FPEXC];
|
sl@0
|
1101 |
NKern::Unlock();
|
sl@0
|
1102 |
return r;
|
sl@0
|
1103 |
#else
|
sl@0
|
1104 |
return 0;
|
sl@0
|
1105 |
#endif
|
sl@0
|
1106 |
}
|
sl@0
|
1107 |
|
sl@0
|
1108 |
|
sl@0
|
1109 |
|
sl@0
|
1110 |
/** Modify the saved VFP FPEXC register value for a thread
|
sl@0
|
1111 |
Does nothing if VFP not present
|
sl@0
|
1112 |
|
sl@0
|
1113 |
@param aClearMask Mask of bits to clear (1 = clear this bit)
|
sl@0
|
1114 |
@param aSetMask Mask of bits to set (1 = set this bit)
|
sl@0
|
1115 |
@return The original saved value of FPEXC, 0 if VFP not present
|
sl@0
|
1116 |
@pre Don't call from ISR
|
sl@0
|
1117 |
|
sl@0
|
1118 |
@publishedPartner
|
sl@0
|
1119 |
@released
|
sl@0
|
1120 |
*/
|
sl@0
|
1121 |
EXPORT_C TUint32 NThread::ModifyFpExc(TUint32 aClearMask, TUint32 aSetMask)
|
sl@0
|
1122 |
{
|
sl@0
|
1123 |
CHECK_PRECONDITIONS(MASK_NOT_ISR,"NThread::ModifyFpExc");
|
sl@0
|
1124 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1125 |
if (this==TheScheduler.iCurrentThread)
|
sl@0
|
1126 |
return Arm::ModifyFpExc(aClearMask,aSetMask);
|
sl@0
|
1127 |
NKern::Lock();
|
sl@0
|
1128 |
TUint32* sp=(TUint32*)iSavedSP; // saved supervisor stack pointer
|
sl@0
|
1129 |
TUint32 r=sp[SP_FPEXC];
|
sl@0
|
1130 |
sp[SP_FPEXC]=(r&~aClearMask)|aSetMask;
|
sl@0
|
1131 |
NKern::Unlock();
|
sl@0
|
1132 |
return r;
|
sl@0
|
1133 |
#else
|
sl@0
|
1134 |
return 0;
|
sl@0
|
1135 |
#endif
|
sl@0
|
1136 |
}
|
sl@0
|
1137 |
|