sl@0
|
1 |
// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32test\debug\context.cia
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#ifndef __KERNEL_MODE__
|
sl@0
|
19 |
#include "context.h"
|
sl@0
|
20 |
#include <u32exec.h>
|
sl@0
|
21 |
|
sl@0
|
22 |
__NAKED__ void SetRegs()
|
sl@0
|
23 |
{
|
sl@0
|
24 |
asm("SetRegs:");
|
sl@0
|
25 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR0));
|
sl@0
|
26 |
asm("add r1, r1, #1 ");
|
sl@0
|
27 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR1));
|
sl@0
|
28 |
asm("add r2, r1, #1 ");
|
sl@0
|
29 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR2));
|
sl@0
|
30 |
asm("add r3, r2, #1 ");
|
sl@0
|
31 |
asm("str r3, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR3));
|
sl@0
|
32 |
asm("add r4, r3, #1 ");
|
sl@0
|
33 |
asm("str r4, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR4));
|
sl@0
|
34 |
asm("add r5, r4, #1 ");
|
sl@0
|
35 |
asm("str r5, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR5));
|
sl@0
|
36 |
asm("add r6, r5, #1 ");
|
sl@0
|
37 |
asm("str r6, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR6));
|
sl@0
|
38 |
asm("add r7, r6, #1 ");
|
sl@0
|
39 |
asm("str r7, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR7));
|
sl@0
|
40 |
asm("add r8, r7, #1 ");
|
sl@0
|
41 |
asm("str r8, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR8));
|
sl@0
|
42 |
asm("add r9, r8, #1 ");
|
sl@0
|
43 |
asm("str r9, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR9));
|
sl@0
|
44 |
asm("add r10, r9, #1 ");
|
sl@0
|
45 |
asm("str r10, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR10));
|
sl@0
|
46 |
asm("add r11, r10, #1 ");
|
sl@0
|
47 |
asm("str r11, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR11));
|
sl@0
|
48 |
asm("add r12, r11, #1 ");
|
sl@0
|
49 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR12));
|
sl@0
|
50 |
asm("str r13, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR13));
|
sl@0
|
51 |
asm("str r14, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR14));
|
sl@0
|
52 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,iR0));
|
sl@0
|
53 |
__JUMP(,lr);
|
sl@0
|
54 |
}
|
sl@0
|
55 |
|
sl@0
|
56 |
#define CHECK_REGA(reg,val) \
|
sl@0
|
57 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,reg)); \
|
sl@0
|
58 |
asm("ldr r3, ="#val); \
|
sl@0
|
59 |
asm("cmp r2, r3"); \
|
sl@0
|
60 |
asm("movne r0, #0"); \
|
sl@0
|
61 |
__JUMP(ne,lr);
|
sl@0
|
62 |
|
sl@0
|
63 |
#define CHECK_REG(reg) \
|
sl@0
|
64 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TArmRegSet,reg)); \
|
sl@0
|
65 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TArmRegSet,reg)); \
|
sl@0
|
66 |
asm("cmp r2, r3"); \
|
sl@0
|
67 |
asm("movne r0, #0"); \
|
sl@0
|
68 |
__JUMP(ne,lr);
|
sl@0
|
69 |
|
sl@0
|
70 |
|
sl@0
|
71 |
|
sl@0
|
72 |
__NAKED__ TInt ThreadContextHwExc(TAny*)
|
sl@0
|
73 |
{
|
sl@0
|
74 |
asm("stmdb sp!, {r4-r11,lr} ");
|
sl@0
|
75 |
asm("mov r1, #0x80000000");
|
sl@0
|
76 |
asm("bl SetRegs");
|
sl@0
|
77 |
asm("ldr r0,[r13, #1]"); // Cause alignment fault
|
sl@0
|
78 |
asm("ThreadContextHwExc_pc:");
|
sl@0
|
79 |
asm("mov r0, #0 ");
|
sl@0
|
80 |
asm("ldmia sp!, {r4-r11,pc} ");
|
sl@0
|
81 |
}
|
sl@0
|
82 |
|
sl@0
|
83 |
__NAKED__ TInt CheckContextHwExc(TArmRegSet* aContext,TArmRegSet* aSavedData)
|
sl@0
|
84 |
{
|
sl@0
|
85 |
CHECK_REG(iR0);
|
sl@0
|
86 |
CHECK_REG(iR1);
|
sl@0
|
87 |
CHECK_REG(iR2);
|
sl@0
|
88 |
CHECK_REG(iR3);
|
sl@0
|
89 |
CHECK_REG(iR4);
|
sl@0
|
90 |
CHECK_REG(iR5);
|
sl@0
|
91 |
CHECK_REG(iR6);
|
sl@0
|
92 |
CHECK_REG(iR7);
|
sl@0
|
93 |
CHECK_REG(iR8);
|
sl@0
|
94 |
CHECK_REG(iR9);
|
sl@0
|
95 |
CHECK_REG(iR10);
|
sl@0
|
96 |
CHECK_REG(iR11);
|
sl@0
|
97 |
CHECK_REG(iR12);
|
sl@0
|
98 |
CHECK_REG(iR13);
|
sl@0
|
99 |
CHECK_REG(iR14);
|
sl@0
|
100 |
CHECK_REGA(iR15,ThreadContextHwExc_pc)
|
sl@0
|
101 |
asm("mov r0,#1");
|
sl@0
|
102 |
__JUMP(,lr);
|
sl@0
|
103 |
}
|
sl@0
|
104 |
|
sl@0
|
105 |
|
sl@0
|
106 |
|
sl@0
|
107 |
__NAKED__ TInt ThreadContextUserInt(TAny*)
|
sl@0
|
108 |
{
|
sl@0
|
109 |
asm("mov r1, #0x70000000");
|
sl@0
|
110 |
asm("bl SetRegs");
|
sl@0
|
111 |
asm("ThreadContextUserInt_pc:");
|
sl@0
|
112 |
asm("b ThreadContextUserInt_pc");
|
sl@0
|
113 |
}
|
sl@0
|
114 |
|
sl@0
|
115 |
__NAKED__ TInt CheckContextUserInt(TArmRegSet*,TArmRegSet*)
|
sl@0
|
116 |
{
|
sl@0
|
117 |
CHECK_REG(iR0);
|
sl@0
|
118 |
CHECK_REG(iR1);
|
sl@0
|
119 |
CHECK_REG(iR2);
|
sl@0
|
120 |
CHECK_REG(iR3);
|
sl@0
|
121 |
CHECK_REG(iR4);
|
sl@0
|
122 |
CHECK_REG(iR5);
|
sl@0
|
123 |
CHECK_REG(iR6);
|
sl@0
|
124 |
CHECK_REG(iR7);
|
sl@0
|
125 |
CHECK_REG(iR8);
|
sl@0
|
126 |
CHECK_REG(iR9);
|
sl@0
|
127 |
CHECK_REG(iR10);
|
sl@0
|
128 |
CHECK_REG(iR11);
|
sl@0
|
129 |
CHECK_REG(iR12);
|
sl@0
|
130 |
CHECK_REG(iR13);
|
sl@0
|
131 |
CHECK_REG(iR14);
|
sl@0
|
132 |
CHECK_REGA(iR15,ThreadContextUserInt_pc)
|
sl@0
|
133 |
asm("mov r0,#1");
|
sl@0
|
134 |
__JUMP(,lr);
|
sl@0
|
135 |
}
|
sl@0
|
136 |
|
sl@0
|
137 |
__NAKED__ TInt CheckContextUserIntDied(TArmRegSet*,TArmRegSet*)
|
sl@0
|
138 |
{
|
sl@0
|
139 |
CHECK_REG(iR0);
|
sl@0
|
140 |
CHECK_REG(iR1);
|
sl@0
|
141 |
CHECK_REG(iR2);
|
sl@0
|
142 |
CHECK_REG(iR3);
|
sl@0
|
143 |
CHECK_REG(iR12);
|
sl@0
|
144 |
CHECK_REG(iR13);
|
sl@0
|
145 |
CHECK_REG(iR14);
|
sl@0
|
146 |
CHECK_REGA(iR15,ThreadContextUserInt_pc)
|
sl@0
|
147 |
asm("mov r0,#1");
|
sl@0
|
148 |
__JUMP(,lr);
|
sl@0
|
149 |
}
|
sl@0
|
150 |
|
sl@0
|
151 |
|
sl@0
|
152 |
__NAKED__ TInt ThreadContextWFAR(TAny*)
|
sl@0
|
153 |
{
|
sl@0
|
154 |
asm("stmdb sp!, {r4-r11,lr} ");
|
sl@0
|
155 |
asm("mov r1, #0x60000000");
|
sl@0
|
156 |
asm("bl SetRegs");
|
sl@0
|
157 |
asm("adr lr, ThreadContextWFAR_return");
|
sl@0
|
158 |
FAST_EXEC0(EFastExecWaitForAnyRequest);
|
sl@0
|
159 |
asm("ThreadContextWFAR_return:");
|
sl@0
|
160 |
asm("mov r0, #0 ");
|
sl@0
|
161 |
asm("ldmia sp!, {r4-r11,pc} ");
|
sl@0
|
162 |
}
|
sl@0
|
163 |
|
sl@0
|
164 |
__NAKED__ TInt CheckContextWFAR(TArmRegSet*,TArmRegSet*)
|
sl@0
|
165 |
{
|
sl@0
|
166 |
CHECK_REG(iR4);
|
sl@0
|
167 |
CHECK_REG(iR5);
|
sl@0
|
168 |
CHECK_REG(iR6);
|
sl@0
|
169 |
CHECK_REG(iR7);
|
sl@0
|
170 |
CHECK_REG(iR8);
|
sl@0
|
171 |
CHECK_REG(iR9);
|
sl@0
|
172 |
CHECK_REG(iR10);
|
sl@0
|
173 |
CHECK_REG(iR11);
|
sl@0
|
174 |
CHECK_REG(iR13);
|
sl@0
|
175 |
CHECK_REGA(iR14,ThreadContextWFAR_return)
|
sl@0
|
176 |
CHECK_REGA(iR15,ThreadContextWFAR_return - 4)
|
sl@0
|
177 |
asm("mov r0,#1");
|
sl@0
|
178 |
__JUMP(,lr);
|
sl@0
|
179 |
}
|
sl@0
|
180 |
|
sl@0
|
181 |
__NAKED__ TInt CheckContextWFARDied(TArmRegSet*,TArmRegSet*)
|
sl@0
|
182 |
{
|
sl@0
|
183 |
CHECK_REG(iR13);
|
sl@0
|
184 |
CHECK_REGA(iR14,ThreadContextWFAR_return)
|
sl@0
|
185 |
CHECK_REGA(iR15,ThreadContextWFAR_return - 4)
|
sl@0
|
186 |
asm("mov r0,#1");
|
sl@0
|
187 |
__JUMP(,lr);
|
sl@0
|
188 |
}
|
sl@0
|
189 |
|
sl@0
|
190 |
|
sl@0
|
191 |
|
sl@0
|
192 |
__NAKED__ TInt ThreadContextExecCall(TAny*)
|
sl@0
|
193 |
{
|
sl@0
|
194 |
asm("stmdb sp!, {r4-r11,lr} ");
|
sl@0
|
195 |
asm("mov r1, #0x50000000");
|
sl@0
|
196 |
asm("bl SetRegs");
|
sl@0
|
197 |
asm("adr lr, ThreadContextExecCall_return");
|
sl@0
|
198 |
asm("ldr r0, current_thread_handle ");
|
sl@0
|
199 |
SLOW_EXEC1(EExecThreadSuspend);
|
sl@0
|
200 |
asm("ThreadContextExecCall_return:");
|
sl@0
|
201 |
asm("mov r0, #0 ");
|
sl@0
|
202 |
asm("ldmia sp!, {r4-r11,pc} ");
|
sl@0
|
203 |
asm("current_thread_handle: ");
|
sl@0
|
204 |
asm(".word 0xffff8001 ");
|
sl@0
|
205 |
}
|
sl@0
|
206 |
|
sl@0
|
207 |
__NAKED__ TInt CheckContextExecCall(TArmRegSet*,TArmRegSet*)
|
sl@0
|
208 |
{
|
sl@0
|
209 |
CHECK_REG(iR4);
|
sl@0
|
210 |
CHECK_REG(iR5);
|
sl@0
|
211 |
CHECK_REG(iR6);
|
sl@0
|
212 |
CHECK_REG(iR7);
|
sl@0
|
213 |
CHECK_REG(iR8);
|
sl@0
|
214 |
CHECK_REG(iR9);
|
sl@0
|
215 |
CHECK_REG(iR10);
|
sl@0
|
216 |
CHECK_REG(iR11);
|
sl@0
|
217 |
CHECK_REG(iR13);
|
sl@0
|
218 |
CHECK_REGA(iR14,ThreadContextExecCall_return)
|
sl@0
|
219 |
CHECK_REGA(iR15,ThreadContextExecCall_return - 4)
|
sl@0
|
220 |
asm("mov r0,#1");
|
sl@0
|
221 |
__JUMP(,lr);
|
sl@0
|
222 |
}
|
sl@0
|
223 |
|
sl@0
|
224 |
//
|
sl@0
|
225 |
// Simulate a software exception by invoking first the exec call which
|
sl@0
|
226 |
// triggers kernel-side handlers and on return panicking the current
|
sl@0
|
227 |
// thread.
|
sl@0
|
228 |
//
|
sl@0
|
229 |
|
sl@0
|
230 |
__NAKED__ TInt ThreadContextSwExc(TAny*)
|
sl@0
|
231 |
{
|
sl@0
|
232 |
asm("stmdb sp!, {r4-r11,lr} ");
|
sl@0
|
233 |
asm("mov r1, #0x50000000");
|
sl@0
|
234 |
asm("bl SetRegs");
|
sl@0
|
235 |
asm("adr lr, ThreadContextSwExc_return");
|
sl@0
|
236 |
asm("ldr r0, current_thread_handle ");
|
sl@0
|
237 |
asm("mov r2, #1");
|
sl@0
|
238 |
SLOW_EXEC3(EExecIsExceptionHandled);
|
sl@0
|
239 |
asm("ThreadContextSwExc_return:");
|
sl@0
|
240 |
asm("ldr r0, current_thread_handle ");
|
sl@0
|
241 |
asm("ldr r3, null_descriptor");
|
sl@0
|
242 |
asm("mov r1, #%a0 " : : "i" ((TInt)EExitPanic));
|
sl@0
|
243 |
asm("mov r2, #0");
|
sl@0
|
244 |
SLOW_EXEC4(EExecThreadKill);
|
sl@0
|
245 |
asm("ldmia sp!, {r4-r11,pc} ");
|
sl@0
|
246 |
asm("null_descriptor:");
|
sl@0
|
247 |
asm(".word 0x00000000");
|
sl@0
|
248 |
asm(".word 0x00000000");
|
sl@0
|
249 |
}
|
sl@0
|
250 |
|
sl@0
|
251 |
__NAKED__ TInt CheckContextSwExc(TArmRegSet*,TArmRegSet*)
|
sl@0
|
252 |
{
|
sl@0
|
253 |
CHECK_REG(iR4);
|
sl@0
|
254 |
CHECK_REG(iR5);
|
sl@0
|
255 |
CHECK_REG(iR6);
|
sl@0
|
256 |
CHECK_REG(iR7);
|
sl@0
|
257 |
CHECK_REG(iR8);
|
sl@0
|
258 |
CHECK_REG(iR9);
|
sl@0
|
259 |
CHECK_REG(iR10);
|
sl@0
|
260 |
CHECK_REG(iR11);
|
sl@0
|
261 |
CHECK_REG(iR13);
|
sl@0
|
262 |
CHECK_REGA(iR14,ThreadContextSwExc_return)
|
sl@0
|
263 |
CHECK_REGA(iR15,ThreadContextSwExc_return - 4)
|
sl@0
|
264 |
asm("mov r0,#1");
|
sl@0
|
265 |
__JUMP(,lr);
|
sl@0
|
266 |
}
|
sl@0
|
267 |
|
sl@0
|
268 |
__NAKED__ TInt CheckContextKernel(TArmRegSet*,TArmRegSet*)
|
sl@0
|
269 |
{
|
sl@0
|
270 |
CHECK_REG(iR4);
|
sl@0
|
271 |
CHECK_REG(iR5);
|
sl@0
|
272 |
CHECK_REG(iR6);
|
sl@0
|
273 |
CHECK_REG(iR7);
|
sl@0
|
274 |
CHECK_REG(iR8);
|
sl@0
|
275 |
CHECK_REG(iR9);
|
sl@0
|
276 |
CHECK_REG(iR10);
|
sl@0
|
277 |
CHECK_REG(iR11);
|
sl@0
|
278 |
// can't test r13 because we don't know how much the irq vector pushes onto the stack
|
sl@0
|
279 |
// CHECK_REG(iR13);
|
sl@0
|
280 |
// can't really test r15 because pc is somewhere in the irq
|
sl@0
|
281 |
// vector and we don't export that address
|
sl@0
|
282 |
asm("mov r0,#1");
|
sl@0
|
283 |
__JUMP(,lr);
|
sl@0
|
284 |
}
|
sl@0
|
285 |
|
sl@0
|
286 |
#else
|
sl@0
|
287 |
|
sl@0
|
288 |
#include <e32def.h>
|
sl@0
|
289 |
#include <cpudefs.h>
|
sl@0
|
290 |
|
sl@0
|
291 |
__NAKED__ TUint32 SpinInKernel(TBool)
|
sl@0
|
292 |
{
|
sl@0
|
293 |
asm("cmp r0, #0 ");
|
sl@0
|
294 |
#ifdef __SMP__
|
sl@0
|
295 |
asm("mov r0, sp ");
|
sl@0
|
296 |
#else
|
sl@0
|
297 |
asm("mov r0, sp");
|
sl@0
|
298 |
// asm("sub r0, sp, #32 "); // IRQ mode pushes 8 extra registers <--- NOT TRUE
|
sl@0
|
299 |
#endif
|
sl@0
|
300 |
asm("beq exit ");
|
sl@0
|
301 |
asm("mov r0, #0xa0000000 ");
|
sl@0
|
302 |
asm("add r1, r0, #1 ");
|
sl@0
|
303 |
asm("add r2, r1, #1 ");
|
sl@0
|
304 |
asm("add r3, r2, #1 ");
|
sl@0
|
305 |
asm("add r4, r3, #1 ");
|
sl@0
|
306 |
asm("add r5, r4, #1 ");
|
sl@0
|
307 |
asm("add r6, r5, #1 ");
|
sl@0
|
308 |
asm("add r7, r6, #1 ");
|
sl@0
|
309 |
asm("add r8, r7, #1 ");
|
sl@0
|
310 |
asm("add r9, r8, #1 ");
|
sl@0
|
311 |
asm("add r10, r9, #1 ");
|
sl@0
|
312 |
asm("add r11, r10, #1 ");
|
sl@0
|
313 |
asm("add r12, r11, #1 ");
|
sl@0
|
314 |
asm("add r14, r12, #2 ");
|
sl@0
|
315 |
asm("loopforever: ");
|
sl@0
|
316 |
asm("b loopforever ");
|
sl@0
|
317 |
asm("exit: ");
|
sl@0
|
318 |
__JUMP(,lr);
|
sl@0
|
319 |
}
|
sl@0
|
320 |
|
sl@0
|
321 |
#endif
|