sl@0
|
1 |
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\nkern\arm\ncsched.cia
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
// NThreadBase member data
|
sl@0
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
sl@0
|
20 |
|
sl@0
|
21 |
// TDfc member data
|
sl@0
|
22 |
#define __INCLUDE_TDFC_DEFINES__
|
sl@0
|
23 |
|
sl@0
|
24 |
#include <e32cia.h>
|
sl@0
|
25 |
#include <arm.h>
|
sl@0
|
26 |
#include "highrestimer.h"
|
sl@0
|
27 |
#include "nkern.h"
|
sl@0
|
28 |
#include "emievents.h"
|
sl@0
|
29 |
|
sl@0
|
30 |
#if defined(MONITOR_THREAD_CPU_TIME) && !defined(HAS_HIGH_RES_TIMER)
|
sl@0
|
31 |
#error MONITOR_THREAD_CPU_TIME is defined, but high res timer is not supported
|
sl@0
|
32 |
#endif
|
sl@0
|
33 |
|
sl@0
|
34 |
#ifdef _DEBUG
|
sl@0
|
35 |
#define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\
|
sl@0
|
36 |
asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
|
sl@0
|
37 |
asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
|
sl@0
|
38 |
asm("str "#rs", ["#rp"] ");\
|
sl@0
|
39 |
asm("str "#rs", ["#rp", #4] ");
|
sl@0
|
40 |
#else
|
sl@0
|
41 |
#define ASM_KILL_LINK(rp,rs)
|
sl@0
|
42 |
#endif
|
sl@0
|
43 |
|
sl@0
|
44 |
#define ALIGN_STACK_START \
|
sl@0
|
45 |
asm("mov r12, sp"); \
|
sl@0
|
46 |
asm("tst sp, #4"); \
|
sl@0
|
47 |
asm("subeq sp, sp, #4"); \
|
sl@0
|
48 |
asm("str r12, [sp,#-4]!")
|
sl@0
|
49 |
|
sl@0
|
50 |
#define ALIGN_STACK_END \
|
sl@0
|
51 |
asm("ldr sp, [sp]")
|
sl@0
|
52 |
|
sl@0
|
53 |
|
sl@0
|
54 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
55 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
56 |
#define FPEXC_REG 10
|
sl@0
|
57 |
#define FPEXC_REG3 4
|
sl@0
|
58 |
#else
|
sl@0
|
59 |
#define FPEXC_REG 11
|
sl@0
|
60 |
#define FPEXC_REG3 10
|
sl@0
|
61 |
#endif
|
sl@0
|
62 |
#endif
|
sl@0
|
63 |
|
sl@0
|
64 |
//////////////////////////////////////////////////////////////////////////////
|
sl@0
|
65 |
// Macros to define which standard ARM registers are used to save
|
sl@0
|
66 |
// required co-processor registers on a reschedule.
|
sl@0
|
67 |
// They rely on the fact that the compiler will concatenate adjacent strings
|
sl@0
|
68 |
// so "r" "9" "," "r" "10" "," will be converted in the assembler file to:
|
sl@0
|
69 |
// r9,r10
|
sl@0
|
70 |
/////////////////////////////////////////////////////////////////////////////
|
sl@0
|
71 |
|
sl@0
|
72 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
73 |
#define TID_SP_REG(reg) "r"#reg","
|
sl@0
|
74 |
#else
|
sl@0
|
75 |
#define TID_SP_REG(reg)
|
sl@0
|
76 |
#endif //__CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
77 |
|
sl@0
|
78 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
79 |
#define FPEXC_SP_REG(reg) "r"#reg","
|
sl@0
|
80 |
#else
|
sl@0
|
81 |
#define FPEXC_SP_REG(reg)
|
sl@0
|
82 |
#endif //__CPU_HAS_VFP
|
sl@0
|
83 |
|
sl@0
|
84 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
85 |
#define CAR_SP_REG(reg) "r"#reg","
|
sl@0
|
86 |
#else
|
sl@0
|
87 |
#define CAR_SP_REG(reg)
|
sl@0
|
88 |
#endif //__CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
89 |
|
sl@0
|
90 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
91 |
#define DACR_SP_REG(reg) "r"#reg","
|
sl@0
|
92 |
#else
|
sl@0
|
93 |
#define DACR_SP_REG(reg)
|
sl@0
|
94 |
#endif //__CPU_ARM_USE_DOMAINS
|
sl@0
|
95 |
|
sl@0
|
96 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
97 |
#define THUMB2EE_SP_REG(reg) "r"#reg","
|
sl@0
|
98 |
#else
|
sl@0
|
99 |
#define THUMB2EE_SP_REG(reg)
|
sl@0
|
100 |
#endif // __CPU_SUPPORT_THUMB2EE
|
sl@0
|
101 |
|
sl@0
|
102 |
// NOTE THIS WILL PRODUCE A WARNING IF REGISTERS ARE NOT IN ASCENDING ORDER
|
sl@0
|
103 |
#define EXTRA_STACK_LIST(thumb2ee, tid, fpexc, car, dacr)\
|
sl@0
|
104 |
THUMB2EE_SP_REG(thumb2ee) TID_SP_REG(tid) FPEXC_SP_REG(fpexc) CAR_SP_REG(car) DACR_SP_REG(dacr)
|
sl@0
|
105 |
|
sl@0
|
106 |
//////////////////////////////////////////////////////////////////////////////
|
sl@0
|
107 |
|
sl@0
|
108 |
//#define __DEBUG_BAD_ADDR
|
sl@0
|
109 |
|
sl@0
|
110 |
extern "C" void PanicFastSemaphoreWait();
|
sl@0
|
111 |
|
sl@0
|
112 |
#ifdef __DFC_MACHINE_CODED__
|
sl@0
|
113 |
|
sl@0
|
114 |
__ASSERT_COMPILE(_FOFF(TDfcQue,iPresent) == 0);
|
sl@0
|
115 |
__ASSERT_COMPILE(_FOFF(TDfc,iNext) == 0);
|
sl@0
|
116 |
__ASSERT_COMPILE(_FOFF(TDfc,iPrev) == 4);
|
sl@0
|
117 |
__ASSERT_COMPILE(_FOFF(TDfc,iPriority) % 4 == 0);
|
sl@0
|
118 |
__ASSERT_COMPILE(_FOFF(TDfc,iOnFinalQ) == _FOFF(TDfc,iPriority) + 2);
|
sl@0
|
119 |
__ASSERT_COMPILE(_FOFF(TDfc,iQueued) == _FOFF(TDfc,iOnFinalQ) + 1);
|
sl@0
|
120 |
|
sl@0
|
121 |
__NAKED__ void TDfcQue::ThreadFunction(TAny* /*aDfcQ*/)
|
sl@0
|
122 |
{
|
sl@0
|
123 |
asm("ldr r11, __TheScheduler2 ");
|
sl@0
|
124 |
|
sl@0
|
125 |
asm("mov r4, r0 "); // r4=aDfcQ
|
sl@0
|
126 |
asm("ldr r10, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
127 |
asm("mov r7, #0 ");
|
sl@0
|
128 |
asm("mov r9, #1 ");
|
sl@0
|
129 |
SET_INTS_1(r5, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
130 |
SET_INTS_1(r6, MODE_SVC, INTS_ALL_OFF);
|
sl@0
|
131 |
|
sl@0
|
132 |
asm("dfc_thrd_fn_check_queue: ");
|
sl@0
|
133 |
SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // enable interrupts
|
sl@0
|
134 |
|
sl@0
|
135 |
asm("dfc_thrd_fn_check_queue2: ");
|
sl@0
|
136 |
asm("str r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
|
sl@0
|
137 |
asm("ldr r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent)); // r3=aDfcQ->iPresent
|
sl@0
|
138 |
asm("add lr, r4, #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // lr=address of priority 0 queue
|
sl@0
|
139 |
#ifdef __CPU_ARM_HAS_CLZ
|
sl@0
|
140 |
CLZ(12,3); // r12=31-MSB(r3), 32 if r3=0
|
sl@0
|
141 |
asm("rsbs r12, r12, #31 "); // r12=ms bit number set, -1 if queue empty
|
sl@0
|
142 |
asm("bmi dfc_thrd_fn_wait "); // if empty, wait for next request
|
sl@0
|
143 |
#else
|
sl@0
|
144 |
asm("movs r2, r3 "); // check if queue empty
|
sl@0
|
145 |
asm("beq dfc_thrd_fn_wait "); // if empty, wait for next request
|
sl@0
|
146 |
asm("mov r12, #7 ");
|
sl@0
|
147 |
asm("cmp r2, #0x10 ");
|
sl@0
|
148 |
asm("movcc r2, r2, lsl #4 ");
|
sl@0
|
149 |
asm("subcc r12, r12, #4 ");
|
sl@0
|
150 |
asm("cmp r2, #0x40 ");
|
sl@0
|
151 |
asm("movcc r2, r2, lsl #2 ");
|
sl@0
|
152 |
asm("subcc r12, r12, #2 ");
|
sl@0
|
153 |
asm("cmp r2, #0x80 ");
|
sl@0
|
154 |
asm("subcc r12, r12, #1 "); // r12=ms bit number set
|
sl@0
|
155 |
#endif
|
sl@0
|
156 |
asm("ldr r8, [lr, r12, lsl #2]! "); // lr=address of highest priority non-empty queue, r8=address of first DFC
|
sl@0
|
157 |
asm("ldmia r8, {r0-r1} "); // r0=first->next, r1=first->prev
|
sl@0
|
158 |
asm("cmp r0, r8 "); // check if this is the only one at this priority
|
sl@0
|
159 |
asm("strne r0, [r1, #0] "); // if not, prev->next=next
|
sl@0
|
160 |
asm("strne r1, [r0, #4] "); // and next->prev=prev
|
sl@0
|
161 |
asm("streq r7, [lr] "); // if this was only one, set head pointer for this priority to NULL
|
sl@0
|
162 |
asm("strne r0, [lr] "); // else set head pointer to first->next
|
sl@0
|
163 |
ASM_KILL_LINK(r8,r1);
|
sl@0
|
164 |
asm("strh r7, [r8, #%a0]" : : "i" _FOFF(TDfc, iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - can't touch link pointers after this
|
sl@0
|
165 |
asm("biceq r3, r3, r9, lsl r12 "); // if no more at this priority clear bit in iPresent
|
sl@0
|
166 |
asm("streq r3, [r4, #%a0]" : : "i" _FOFF(TDfcQue,iPresent));
|
sl@0
|
167 |
|
sl@0
|
168 |
SET_INTS_2(r6, MODE_SVC, INTS_ALL_OFF); // interrupts off
|
sl@0
|
169 |
asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // check if reschedule required
|
sl@0
|
170 |
asm("cmp r3, #0 ");
|
sl@0
|
171 |
asm("streq r7, [r11, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if no reschedule required unlock the kernel
|
sl@0
|
172 |
asm("blne " CSM_ZN10TScheduler10RescheduleEv); // if reschedule required, do it
|
sl@0
|
173 |
SET_INTS_2(r5, MODE_SVC, INTS_ALL_ON); // restore interrupts
|
sl@0
|
174 |
|
sl@0
|
175 |
asm("ldr r1, [r8, #%a0]" : : "i" _FOFF(TDfc, iFunction)); // r1=function address
|
sl@0
|
176 |
asm("adr lr, dfc_thrd_fn_check_queue2 "); // set up return address
|
sl@0
|
177 |
asm("ldr r0, [r8, #%a0]" : : "i" _FOFF(TDfc, iPtr)); // r0=DFC argument
|
sl@0
|
178 |
__JUMP(,r1); // call DFC
|
sl@0
|
179 |
|
sl@0
|
180 |
asm("dfc_thrd_fn_wait: ");
|
sl@0
|
181 |
asm("mov r0, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc));
|
sl@0
|
182 |
asm("strb r0, [r10, #%a0]" : : "i" _FOFF(NThreadBase,iNState));
|
sl@0
|
183 |
asm("strb r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
184 |
asm("mov r0, r11 ");
|
sl@0
|
185 |
asm("mov r1, r10 ");
|
sl@0
|
186 |
asm("bl unready ");
|
sl@0
|
187 |
asm("adr lr, dfc_thrd_fn_check_queue "); // set up return address
|
sl@0
|
188 |
asm("b " CSM_ZN10TScheduler10RescheduleEv);
|
sl@0
|
189 |
|
sl@0
|
190 |
asm("__TheScheduler2: ");
|
sl@0
|
191 |
asm(".word TheScheduler ");
|
sl@0
|
192 |
}
|
sl@0
|
193 |
|
sl@0
|
194 |
|
sl@0
|
195 |
/** Cancels an IDFC or DFC.
|
sl@0
|
196 |
|
sl@0
|
197 |
This function does nothing if the IDFC or DFC is not queued.
|
sl@0
|
198 |
|
sl@0
|
199 |
@return TRUE if the DFC was actually dequeued by this call. In that case
|
sl@0
|
200 |
it is guaranteed that the DFC will not execute until it is
|
sl@0
|
201 |
queued again.
|
sl@0
|
202 |
FALSE if the DFC was not queued on entry to the call, or was in
|
sl@0
|
203 |
the process of being executed or cancelled. In this case
|
sl@0
|
204 |
it is possible that the DFC executes after this call
|
sl@0
|
205 |
returns.
|
sl@0
|
206 |
|
sl@0
|
207 |
@post However in either case it is safe to delete the DFC object on
|
sl@0
|
208 |
return from this call provided only that the DFC function does not
|
sl@0
|
209 |
refer to the DFC object itself.
|
sl@0
|
210 |
|
sl@0
|
211 |
@pre IDFC or thread context. Do not call from ISRs.
|
sl@0
|
212 |
|
sl@0
|
213 |
@pre If the DFC function accesses the DFC object itself, the user must ensure that
|
sl@0
|
214 |
Cancel() cannot be called while the DFC function is running.
|
sl@0
|
215 |
*/
|
sl@0
|
216 |
__NAKED__ EXPORT_C TBool TDfc::Cancel()
|
sl@0
|
217 |
{
|
sl@0
|
218 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
|
sl@0
|
219 |
|
sl@0
|
220 |
asm("ldr r1, __TheScheduler2 ");
|
sl@0
|
221 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
222 |
asm("add r3, r3, #1 ");
|
sl@0
|
223 |
asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
|
sl@0
|
224 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r2=priority/flags
|
sl@0
|
225 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF);
|
sl@0
|
226 |
asm("tst r2, #0xff000000 "); // test queued flag
|
sl@0
|
227 |
asm("moveq r0, #0 "); // if not queued, return FALSE
|
sl@0
|
228 |
asm("beq 0f ");
|
sl@0
|
229 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // otherwise disable interrupts while we dequeue
|
sl@0
|
230 |
asm("ldmia r0, {r3,r12} "); // r3=next, r12=prev
|
sl@0
|
231 |
SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
232 |
asm("str r3, [r12, #0] "); // prev->next=next
|
sl@0
|
233 |
asm("str r12, [r3, #4] "); // next->prev=prev
|
sl@0
|
234 |
SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // reenable interrupts
|
sl@0
|
235 |
asm("tst r2, #0x00ff0000 "); // check iOnFinalQ
|
sl@0
|
236 |
asm("beq 1f "); // if FALSE, finish up
|
sl@0
|
237 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r1=iDfcQ
|
sl@0
|
238 |
asm("and r2, r2, #0xff "); // r2=iPriority
|
sl@0
|
239 |
asm("subs r12, r3, r0 "); // check if queue is now empty, r12=0 if it is
|
sl@0
|
240 |
asm("beq 2f "); // branch if now empty
|
sl@0
|
241 |
asm("add r1, r1, r2, lsl #2 "); // r1=&iDfcQ->iQueue[iPriority]-_FOFF(TDfcQue.iPriority)
|
sl@0
|
242 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // r12=iDfcQ->iQueue[iPriority]
|
sl@0
|
243 |
asm("cmp r12, r0 "); // is this one first?
|
sl@0
|
244 |
asm("streq r3, [r1, #%a0]" : : "i" _FOFF(TDfcQue,iQueue)); // if so, iQueue[pri]=next
|
sl@0
|
245 |
asm("b 1f ");
|
sl@0
|
246 |
asm("2: "); // r0=this, r1=iDfcQ, r2=priority, r3=next, r12=0
|
sl@0
|
247 |
asm("ldr r3, [r1], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r3=iDfcQ->iPresent, r1=&iDfcQ->iQueue[0]
|
sl@0
|
248 |
asm("str r12, [r1, r2, lsl #2] "); // iDfcQ->iQueue[iPriority]=NULL
|
sl@0
|
249 |
asm("mov r12, #1 ");
|
sl@0
|
250 |
asm("bic r3, r3, r12, lsl r2 "); // clear present bit
|
sl@0
|
251 |
asm("str r3, [r1, #-%a0]" : : "i" _FOFF(TDfcQue,iQueue));
|
sl@0
|
252 |
asm("1: ");
|
sl@0
|
253 |
ASM_KILL_LINK(r0,r1);
|
sl@0
|
254 |
asm("mov r3, #0 ");
|
sl@0
|
255 |
asm("strh r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // iOnFinalQ=iQueued=FALSE - must be done last
|
sl@0
|
256 |
|
sl@0
|
257 |
// R0=this != 0 here
|
sl@0
|
258 |
|
sl@0
|
259 |
asm("0: ");
|
sl@0
|
260 |
asm("stmfd sp!, {r0,lr} ");
|
sl@0
|
261 |
asm("bl " CSM_ZN5NKern6UnlockEv); // unlock the kernel
|
sl@0
|
262 |
__POPRET("r0,");
|
sl@0
|
263 |
}
|
sl@0
|
264 |
#endif
|
sl@0
|
265 |
|
sl@0
|
266 |
#ifdef __FAST_SEM_MACHINE_CODED__
|
sl@0
|
267 |
/** Waits on a fast semaphore.
|
sl@0
|
268 |
|
sl@0
|
269 |
Decrements the signal count for the semaphore and
|
sl@0
|
270 |
removes the calling thread from the ready-list if the sempahore becomes
|
sl@0
|
271 |
unsignalled. Only the thread that owns a fast semaphore can wait on it.
|
sl@0
|
272 |
|
sl@0
|
273 |
Note that this function does not block, it merely updates the NThread state,
|
sl@0
|
274 |
rescheduling will only occur when the kernel is unlocked. Generally threads
|
sl@0
|
275 |
would use NKern::FSWait() which manipulates the kernel lock for you.
|
sl@0
|
276 |
|
sl@0
|
277 |
@pre The calling thread must own the semaphore.
|
sl@0
|
278 |
@pre Kernel must be locked.
|
sl@0
|
279 |
@pre No fast mutex can be held.
|
sl@0
|
280 |
|
sl@0
|
281 |
@post Kernel is locked.
|
sl@0
|
282 |
|
sl@0
|
283 |
@see NFastSemaphore::Signal()
|
sl@0
|
284 |
@see NKern::FSWait()
|
sl@0
|
285 |
@see NKern::Unlock()
|
sl@0
|
286 |
*/
|
sl@0
|
287 |
EXPORT_C __NAKED__ void NFastSemaphore::Wait()
|
sl@0
|
288 |
{
|
sl@0
|
289 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX);
|
sl@0
|
290 |
|
sl@0
|
291 |
asm("mov r2, r0 ");
|
sl@0
|
292 |
asm("ldr r0, __TheScheduler ");
|
sl@0
|
293 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread)); // r1=owning thread
|
sl@0
|
294 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=current thread
|
sl@0
|
295 |
asm("cmp r1, r3 ");
|
sl@0
|
296 |
asm("bne PanicFastSemaphoreWait "); // if wrong thread, fault
|
sl@0
|
297 |
// wait on a NFastSemaphore pointed to by r2
|
sl@0
|
298 |
// enter with r0=&TheScheduler, r1=the current thread, already validated
|
sl@0
|
299 |
asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
300 |
asm("mov r12, #%a0" : : "i" (NThread::EWaitFastSemaphore));
|
sl@0
|
301 |
asm("subs r3, r3, #1 ");
|
sl@0
|
302 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount)); // decrement iCount
|
sl@0
|
303 |
__JUMP(ge,lr); // if result>=0, finished
|
sl@0
|
304 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
305 |
asm("strb r12, [r1, #%a0]" : : "i" _FOFF(NThread,iNState));
|
sl@0
|
306 |
asm("mov r3, #1 ");
|
sl@0
|
307 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
308 |
|
sl@0
|
309 |
// remove thread from ready list
|
sl@0
|
310 |
asm("b unready ");
|
sl@0
|
311 |
}
|
sl@0
|
312 |
|
sl@0
|
313 |
|
sl@0
|
314 |
/** Waits for a signal on the current thread's I/O semaphore.
|
sl@0
|
315 |
@pre No fast mutex can be held.
|
sl@0
|
316 |
@pre Kernel must be unlocked.
|
sl@0
|
317 |
@pre Call in a thread context.
|
sl@0
|
318 |
@pre Interrupts must be enabled.
|
sl@0
|
319 |
*/
|
sl@0
|
320 |
EXPORT_C __NAKED__ void NKern::WaitForAnyRequest()
|
sl@0
|
321 |
{
|
sl@0
|
322 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC|MASK_NO_FAST_MUTEX);
|
sl@0
|
323 |
|
sl@0
|
324 |
asm("ldr r0, __TheScheduler ");
|
sl@0
|
325 |
asm("str lr, [sp, #-4]! "); // save lr
|
sl@0
|
326 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
327 |
asm("bl wait_for_any_request2 ");
|
sl@0
|
328 |
SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // turn interrupts back on
|
sl@0
|
329 |
asm("ldr pc, [sp], #4 ");
|
sl@0
|
330 |
|
sl@0
|
331 |
// Special case handler for Exec::WaitForAnyRequest() for efficiency reasons
|
sl@0
|
332 |
// Called from __ArmVectorSwi with R11=&TheScheduler, R1=current thread
|
sl@0
|
333 |
// Returns with interrupts disabled
|
sl@0
|
334 |
asm(".global wait_for_any_request ");
|
sl@0
|
335 |
asm("wait_for_any_request: ");
|
sl@0
|
336 |
|
sl@0
|
337 |
ASM_DEBUG0(WaitForAnyRequest);
|
sl@0
|
338 |
asm("mov r0, r11 ");
|
sl@0
|
339 |
asm("wait_for_any_request2: ");
|
sl@0
|
340 |
SET_INTS_1(r2, MODE_SVC, INTS_ALL_OFF);
|
sl@0
|
341 |
#ifdef _DEBUG
|
sl@0
|
342 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
343 |
asm("cmp r3, #0 ");
|
sl@0
|
344 |
asm("movne r12, #0xd8000001 "); // FAULT - calling Exec::WaitForAnyRequest() with the kernel locked is silly
|
sl@0
|
345 |
asm("strne r12, [r12] ");
|
sl@0
|
346 |
#endif
|
sl@0
|
347 |
SET_INTS_2(r2, MODE_SVC, INTS_ALL_OFF); // turn off interrupts
|
sl@0
|
348 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount));
|
sl@0
|
349 |
asm("mov r3, #1 ");
|
sl@0
|
350 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
351 |
asm("subs r2, r2, #1 ");
|
sl@0
|
352 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iRequestSemaphore.iCount)); // decrement iCount
|
sl@0
|
353 |
__JUMP(ge,lr); // if result non-negative, finished
|
sl@0
|
354 |
|
sl@0
|
355 |
asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
|
sl@0
|
356 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts
|
sl@0
|
357 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
358 |
|
sl@0
|
359 |
// r2 points to NFastSemaphore
|
sl@0
|
360 |
asm("add r2, r1, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
|
sl@0
|
361 |
asm("str lr, [sp, #-4]! ");
|
sl@0
|
362 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
363 |
asm("mov r3, #%a0" : : "i" (NThread::EWaitFastSemaphore));
|
sl@0
|
364 |
asm("strb r3, [r1, #%a0]" : : "i" _FOFF(NThread,iNState)); // mark thread waiting on semaphore
|
sl@0
|
365 |
asm("bl unready "); // remove thread from ready list - DOESN'T CLOBBER R0
|
sl@0
|
366 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // Reschedule
|
sl@0
|
367 |
asm("ldr lr, [sp], #4 ");
|
sl@0
|
368 |
asm("mov r3, #%a0 " : : "i" (NThread::EContextWFARCallback));
|
sl@0
|
369 |
asm("b callUserModeCallbacks "); // exit and call callbacks
|
sl@0
|
370 |
}
|
sl@0
|
371 |
|
sl@0
|
372 |
|
sl@0
|
373 |
/** Signals a fast semaphore multiple times.
|
sl@0
|
374 |
|
sl@0
|
375 |
@pre Kernel must be locked.
|
sl@0
|
376 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
377 |
|
sl@0
|
378 |
@post Kernel is locked.
|
sl@0
|
379 |
|
sl@0
|
380 |
@internalComponent
|
sl@0
|
381 |
*/
|
sl@0
|
382 |
EXPORT_C __NAKED__ void NFastSemaphore::SignalN(TInt /*aCount*/)
|
sl@0
|
383 |
{
|
sl@0
|
384 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
|
sl@0
|
385 |
|
sl@0
|
386 |
asm("req_sem_signaln: ");
|
sl@0
|
387 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
388 |
asm("adds r2, r2, r1 ");
|
sl@0
|
389 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
390 |
__JUMP(cc,lr); // if count did not cross 0 nothing more to do
|
sl@0
|
391 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
|
sl@0
|
392 |
asm("mov r1, #0 ");
|
sl@0
|
393 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
394 |
asm("b check_suspend_then_ready ");
|
sl@0
|
395 |
}
|
sl@0
|
396 |
|
sl@0
|
397 |
/** @internalComponent */
|
sl@0
|
398 |
__NAKED__ void NFastSemaphore::WaitCancel()
|
sl@0
|
399 |
{
|
sl@0
|
400 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
|
sl@0
|
401 |
asm("mov r1, #0 ");
|
sl@0
|
402 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
403 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
404 |
asm("mov r0, r3 ");
|
sl@0
|
405 |
asm("b check_suspend_then_ready ");
|
sl@0
|
406 |
}
|
sl@0
|
407 |
|
sl@0
|
408 |
|
sl@0
|
409 |
/** Resets a fast semaphore.
|
sl@0
|
410 |
|
sl@0
|
411 |
@pre Kernel must be locked.
|
sl@0
|
412 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
413 |
|
sl@0
|
414 |
@post Kernel is locked.
|
sl@0
|
415 |
|
sl@0
|
416 |
@internalComponent
|
sl@0
|
417 |
*/
|
sl@0
|
418 |
EXPORT_C __NAKED__ void NFastSemaphore::Reset()
|
sl@0
|
419 |
{
|
sl@0
|
420 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
|
sl@0
|
421 |
|
sl@0
|
422 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
423 |
asm("mov r1, #0 ");
|
sl@0
|
424 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
425 |
asm("cmp r2, #0 ");
|
sl@0
|
426 |
__JUMP(ge,lr); // if count was not negative, nothing to do
|
sl@0
|
427 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iOwningThread));
|
sl@0
|
428 |
asm("mov r1, #0 ");
|
sl@0
|
429 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
430 |
asm("b check_suspend_then_ready ");
|
sl@0
|
431 |
}
|
sl@0
|
432 |
|
sl@0
|
433 |
#endif
|
sl@0
|
434 |
|
sl@0
|
435 |
#ifdef __SCHEDULER_MACHINE_CODED__
|
sl@0
|
436 |
|
sl@0
|
437 |
__ASSERT_COMPILE(_FOFF(SDblQueLink,iNext) == 0);
|
sl@0
|
438 |
__ASSERT_COMPILE(_FOFF(SDblQueLink,iPrev) == 4);
|
sl@0
|
439 |
__ASSERT_COMPILE(_FOFF(TScheduler,iPresent) == 0);
|
sl@0
|
440 |
__ASSERT_COMPILE(_FOFF(NFastSemaphore,iCount) == 0);
|
sl@0
|
441 |
__ASSERT_COMPILE(_FOFF(NFastSemaphore,iOwningThread) == 4);
|
sl@0
|
442 |
__ASSERT_COMPILE(_FOFF(TDfc,iPtr) == _FOFF(TDfc,iPriority) + 4);
|
sl@0
|
443 |
__ASSERT_COMPILE(_FOFF(TDfc,iFunction) == _FOFF(TDfc,iPtr) + 4);
|
sl@0
|
444 |
|
sl@0
|
445 |
__NAKED__ void TScheduler::Remove(NThreadBase* /*aThread*/)
|
sl@0
|
446 |
//
|
sl@0
|
447 |
// Remove a thread from the ready list
|
sl@0
|
448 |
//
|
sl@0
|
449 |
{
|
sl@0
|
450 |
asm("unready: ");
|
sl@0
|
451 |
#ifdef _DEBUG
|
sl@0
|
452 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
|
sl@0
|
453 |
asm("mov r12, #0xd8000003 ");
|
sl@0
|
454 |
asm("cmp r2, #0 ");
|
sl@0
|
455 |
asm("strne r12, [r12] "); // crash if fast mutex held
|
sl@0
|
456 |
#endif
|
sl@0
|
457 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTimeslice));
|
sl@0
|
458 |
asm("ldmia r1, {r2,r3} "); // r2=next, r3=prev
|
sl@0
|
459 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice for next time
|
sl@0
|
460 |
|
sl@0
|
461 |
asm("pri_list_remove: ");
|
sl@0
|
462 |
ASM_KILL_LINK(r1,r12);
|
sl@0
|
463 |
asm("subs r12, r1, r2 "); // check if more threads at this priority, r12=0 if not
|
sl@0
|
464 |
asm("bne unready_1 "); // branch if there are more at same priority
|
sl@0
|
465 |
asm("ldrb r2, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r2=thread priority
|
sl@0
|
466 |
asm("add r1, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r1->iQueue[0]
|
sl@0
|
467 |
asm("str r12, [r1, r2, lsl #2] "); // iQueue[priority]=NULL
|
sl@0
|
468 |
asm("ldrb r1, [r0, r2, lsr #3] "); // r1=relevant byte in present mask
|
sl@0
|
469 |
asm("and r3, r2, #7 "); // r3=priority & 7
|
sl@0
|
470 |
asm("mov r12, #1 ");
|
sl@0
|
471 |
asm("bic r1, r1, r12, lsl r3 "); // clear bit in present mask
|
sl@0
|
472 |
asm("strb r1, [r0, r2, lsr #3] "); // update relevant byte in present mask
|
sl@0
|
473 |
__JUMP(,lr);
|
sl@0
|
474 |
asm("unready_1: "); // get here if there are other threads at same priority
|
sl@0
|
475 |
asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThread, iPriority)); // r12=thread priority
|
sl@0
|
476 |
asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler, iQueue)); // r0=&iQueue[0]
|
sl@0
|
477 |
asm("str r3, [r2, #4] "); // next->prev=prev
|
sl@0
|
478 |
asm("ldr r12, [r0, r12, lsl #2]! "); // r12=iQueue[priority], r0=&iQueue[priority]
|
sl@0
|
479 |
asm("str r2, [r3, #0] "); // and prev->next=next
|
sl@0
|
480 |
asm("cmp r12, r1 "); // if aThread was first...
|
sl@0
|
481 |
asm("streq r2, [r0, #0] "); // iQueue[priority]=aThread->next
|
sl@0
|
482 |
__JUMP(,lr); // finished
|
sl@0
|
483 |
}
|
sl@0
|
484 |
|
sl@0
|
485 |
|
sl@0
|
486 |
/** Removes an item from a priority list.
|
sl@0
|
487 |
|
sl@0
|
488 |
@param aLink A pointer to the item - this must not be NULL.
|
sl@0
|
489 |
*/
|
sl@0
|
490 |
EXPORT_C __NAKED__ void TPriListBase::Remove(TPriListLink* /*aLink*/)
|
sl@0
|
491 |
{
|
sl@0
|
492 |
asm("ldmia r1, {r2,r3} "); // r2=aLink->iNext, r3=aLink->iPrev
|
sl@0
|
493 |
asm("b pri_list_remove ");
|
sl@0
|
494 |
}
|
sl@0
|
495 |
|
sl@0
|
496 |
|
sl@0
|
497 |
/** Signals a fast semaphore.
|
sl@0
|
498 |
|
sl@0
|
499 |
Increments the signal count of a fast semaphore by
|
sl@0
|
500 |
one and releases any waiting thread if the semphore becomes signalled.
|
sl@0
|
501 |
|
sl@0
|
502 |
Note that a reschedule will not occur before this function returns, this will
|
sl@0
|
503 |
only take place when the kernel is unlocked. Generally threads
|
sl@0
|
504 |
would use NKern::FSSignal() which manipulates the kernel lock for you.
|
sl@0
|
505 |
|
sl@0
|
506 |
@pre Kernel must be locked.
|
sl@0
|
507 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
508 |
|
sl@0
|
509 |
@post Kernel is locked.
|
sl@0
|
510 |
|
sl@0
|
511 |
@see NFastSemaphore::Wait()
|
sl@0
|
512 |
@see NKern::FSSignal()
|
sl@0
|
513 |
@see NKern::Unlock()
|
sl@0
|
514 |
*/
|
sl@0
|
515 |
EXPORT_C __NAKED__ void NFastSemaphore::Signal()
|
sl@0
|
516 |
{
|
sl@0
|
517 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
|
sl@0
|
518 |
|
sl@0
|
519 |
asm("req_sem_signal: ");
|
sl@0
|
520 |
asm("ldmia r0, {r1,r2} "); // r1=iCount, r2=iOwningThread
|
sl@0
|
521 |
asm("mov r3, #0 ");
|
sl@0
|
522 |
asm("adds r1, r1, #1 ");
|
sl@0
|
523 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastSemaphore,iCount));
|
sl@0
|
524 |
__JUMP(gt,lr); // if count after incrementing is >0, nothing more to do
|
sl@0
|
525 |
asm("mov r0, r2 ");
|
sl@0
|
526 |
asm("str r3, [r0, #%a0]" : : "i" _FOFF(NThread,iWaitObj));
|
sl@0
|
527 |
|
sl@0
|
528 |
// fall through to NThreadBase::CheckSuspendThenReady()
|
sl@0
|
529 |
}
|
sl@0
|
530 |
|
sl@0
|
531 |
|
sl@0
|
532 |
/** Makes a nanothread ready provided that it is not explicitly suspended.
|
sl@0
|
533 |
|
sl@0
|
534 |
For use by RTOS personality layers.
|
sl@0
|
535 |
|
sl@0
|
536 |
@pre Kernel must be locked.
|
sl@0
|
537 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
538 |
|
sl@0
|
539 |
@post Kernel is locked.
|
sl@0
|
540 |
*/
|
sl@0
|
541 |
EXPORT_C __NAKED__ void NThreadBase::CheckSuspendThenReady()
|
sl@0
|
542 |
{
|
sl@0
|
543 |
ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
|
sl@0
|
544 |
|
sl@0
|
545 |
asm("check_suspend_then_ready: ");
|
sl@0
|
546 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThread,iSuspendCount));
|
sl@0
|
547 |
asm("mov r2, #%a0" : : "i" (NThread::ESuspended));
|
sl@0
|
548 |
asm("cmp r1, #0 ");
|
sl@0
|
549 |
asm("bne mark_thread_suspended "); // branch out if suspend count nonzero
|
sl@0
|
550 |
|
sl@0
|
551 |
// fall through to NThreadBase::Ready()
|
sl@0
|
552 |
}
|
sl@0
|
553 |
|
sl@0
|
554 |
|
sl@0
|
555 |
/** Makes a nanothread ready.
|
sl@0
|
556 |
|
sl@0
|
557 |
For use by RTOS personality layers.
|
sl@0
|
558 |
|
sl@0
|
559 |
@pre Kernel must be locked.
|
sl@0
|
560 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
561 |
@pre The calling thread must not be explicitly suspended.
|
sl@0
|
562 |
|
sl@0
|
563 |
@post Kernel is locked.
|
sl@0
|
564 |
*/
|
sl@0
|
565 |
EXPORT_C __NAKED__ void NThreadBase::Ready()
|
sl@0
|
566 |
{
|
sl@0
|
567 |
// on release builds just fall through to DoReady
|
sl@0
|
568 |
#ifdef _DEBUG
|
sl@0
|
569 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_KERNEL_LOCKED);
|
sl@0
|
570 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(NThreadBase,iSuspendCount));
|
sl@0
|
571 |
asm("cmp r1, #0 ");
|
sl@0
|
572 |
asm("beq 1f ");
|
sl@0
|
573 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
|
sl@0
|
574 |
asm("1: ");
|
sl@0
|
575 |
asm("stmfd sp!, {r0,lr} ");
|
sl@0
|
576 |
asm("mov r0, #%a0" : : "i" ((TInt)KCRAZYSCHEDDELAY));
|
sl@0
|
577 |
asm("bl " CSM_Z9KDebugNumi );
|
sl@0
|
578 |
asm("cmp r0, #0 "); // Z=1 => no delayed scheduler
|
sl@0
|
579 |
asm("ldmfd sp!, {r0,lr} ");
|
sl@0
|
580 |
asm("ldr r1, __TheScheduler ");
|
sl@0
|
581 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread
|
sl@0
|
582 |
asm("beq DoReadyInner "); // delayed scheduler is disabled
|
sl@0
|
583 |
asm("ldr r12, __TheTimerQ ");
|
sl@0
|
584 |
asm("cmp r2, #0 ");
|
sl@0
|
585 |
asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(NTimerQ,iMsCount));
|
sl@0
|
586 |
asm("cmpne r12, #0 "); // tick hasn't happened yet or this is priority 0
|
sl@0
|
587 |
asm("beq DoReadyInner "); // so ready it as usual
|
sl@0
|
588 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr));
|
sl@0
|
589 |
asm("tst r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed));
|
sl@0
|
590 |
__JUMP(ne,lr); // thread is already on the delayed queue
|
sl@0
|
591 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iDelayedQ));
|
sl@0
|
592 |
asm("ldr r12, [r3, #4] "); // r12->last thread
|
sl@0
|
593 |
asm("str r0, [r3, #4] "); // first->prev=this
|
sl@0
|
594 |
asm("str r0, [r12, #0] "); // old last->next=this
|
sl@0
|
595 |
asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last
|
sl@0
|
596 |
asm("orr r2, r2, #%a0 " : : "i" ((TInt)KThreadAttDelayed));
|
sl@0
|
597 |
asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,i_ThrdAttr));
|
sl@0
|
598 |
__JUMP(,lr);
|
sl@0
|
599 |
|
sl@0
|
600 |
asm("__TheTimerQ: ");
|
sl@0
|
601 |
asm(".word TheTimerQ ");
|
sl@0
|
602 |
asm("__SuperPageAddress: ");
|
sl@0
|
603 |
asm(".word SuperPageAddress ");
|
sl@0
|
604 |
#endif
|
sl@0
|
605 |
// on release builds just fall through to DoReady
|
sl@0
|
606 |
}
|
sl@0
|
607 |
|
sl@0
|
608 |
__NAKED__ void NThreadBase::DoReady()
|
sl@0
|
609 |
{
|
sl@0
|
610 |
asm("ldr r1, __TheScheduler ");
|
sl@0
|
611 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iPriority)); // r2=priority of aThread
|
sl@0
|
612 |
asm("DoReadyInner: ");
|
sl@0
|
613 |
asm("mov r3, #%a0" : : "i" (NThread::EReady));
|
sl@0
|
614 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(NThread,iNState));
|
sl@0
|
615 |
asm("ldmia r1!, {r3,r12} "); // r3=present mask low, r12=present mask high, r1=&iQueue[0]
|
sl@0
|
616 |
asm("cmp r2, #31 ");
|
sl@0
|
617 |
asm("bhi 1f ");
|
sl@0
|
618 |
asm("cmp r12, #0 ");
|
sl@0
|
619 |
asm("mov r12, r3 ");
|
sl@0
|
620 |
asm("mov r3, #1 ");
|
sl@0
|
621 |
asm("bne 2f "); // branch if high word set, so this has lower priority
|
sl@0
|
622 |
asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority)
|
sl@0
|
623 |
asm("beq 3f "); // branch if equality case (no need to update bitmask)
|
sl@0
|
624 |
asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
|
sl@0
|
625 |
asm("2: ");
|
sl@0
|
626 |
asm("tst r12, r3, lsl r2 "); // test bit in present mask
|
sl@0
|
627 |
asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ...
|
sl@0
|
628 |
asm("ldrne r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue
|
sl@0
|
629 |
asm("streq r12, [r1, #-8] "); // ... and update present mask low word
|
sl@0
|
630 |
asm("bne 4f "); // branch if not alone (don't need to touch bitmask)
|
sl@0
|
631 |
asm("6: "); // get here if thread is alone at this priority
|
sl@0
|
632 |
asm("str r0, [r1, r2, lsl #2] "); // thread is alone at this priority, so point queue to it
|
sl@0
|
633 |
asm("str r0, [r0, #0] "); // next=prev=this
|
sl@0
|
634 |
asm("str r0, [r0, #4] ");
|
sl@0
|
635 |
__JUMP(,lr); // NOTE: R0=this != 0
|
sl@0
|
636 |
asm("5: "); // get here if this thread has joint highest priority >= 32
|
sl@0
|
637 |
asm("add r2, r2, #32 "); // restore thread priority
|
sl@0
|
638 |
asm("3: "); // get here if this thread has joint highest priority < 32
|
sl@0
|
639 |
asm("ldr r3, [r1, r2, lsl #2] "); // r3->first thread on queue
|
sl@0
|
640 |
asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iTime)); // r12=first thread->time remaining
|
sl@0
|
641 |
asm("subs r12, r12, #1 "); // timeslice expired? if so, r12=-1 and C=0 else C=1
|
sl@0
|
642 |
asm("strccb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
|
sl@0
|
643 |
asm("4: "); // get here when adding to non-empty queue; r1->queue, r3->first thread on queue
|
sl@0
|
644 |
asm("ldr r12, [r3, #4] "); // r12->last thread
|
sl@0
|
645 |
asm("str r0, [r3, #4] "); // first->prev=this
|
sl@0
|
646 |
asm("str r0, [r12, #0] "); // old last->next=this
|
sl@0
|
647 |
asm("stmia r0, {r3,r12} "); // this->next=first, this->prev=old last
|
sl@0
|
648 |
__JUMP(,lr); // NOTE: R0=this != 0
|
sl@0
|
649 |
asm("1: "); // get here if this thread priority > 31
|
sl@0
|
650 |
asm("and r2, r2, #31 ");
|
sl@0
|
651 |
asm("mov r3, #1 ");
|
sl@0
|
652 |
asm("cmp r3, r12, lsr r2 "); // see if new thread may cause reschedule (CS if so, EQ if equal priority)
|
sl@0
|
653 |
asm("beq 5b "); // branch if equality case (no need to update bitmask)
|
sl@0
|
654 |
asm("strhib r3, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iRescheduleNeededFlag)-8)); // set reschedule flag if necessary
|
sl@0
|
655 |
asm("tst r12, r3, lsl r2 "); // test bit in present mask
|
sl@0
|
656 |
asm("orreq r12, r12, r3, lsl r2 "); // if clear, set it ...
|
sl@0
|
657 |
asm("add r2, r2, #32 ");
|
sl@0
|
658 |
asm("streq r12, [r1, #-4] "); // ... and update present mask high word
|
sl@0
|
659 |
asm("beq 6b "); // branch if alone
|
sl@0
|
660 |
asm("ldr r3, [r1, r2, lsl #2] "); // if not alone, r3->first thread on queue
|
sl@0
|
661 |
asm("b 4b "); // branch if not alone (don't need to touch bitmask)
|
sl@0
|
662 |
|
sl@0
|
663 |
asm("mark_thread_suspended: "); // continuation of CheckSuspendThenReady in unusual case
|
sl@0
|
664 |
asm("strb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iNState)); // set state to suspended
|
sl@0
|
665 |
__JUMP(,lr); // NOTE: R0=this != 0
|
sl@0
|
666 |
}
|
sl@0
|
667 |
|
sl@0
|
668 |
__NAKED__ void TScheduler::QueueDfcs()
|
sl@0
|
669 |
{
|
sl@0
|
670 |
// move DFCs from pending queue to their final queues
|
sl@0
|
671 |
// enter with interrupts off and kernel locked
|
sl@0
|
672 |
// leave with interrupts off and kernel locked
|
sl@0
|
673 |
// NOTE: WE MUST NOT CLOBBER R0 OR R2!
|
sl@0
|
674 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
675 |
|
sl@0
|
676 |
|
sl@0
|
677 |
SET_INTS(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts
|
sl@0
|
678 |
#ifdef __CPU_ARM_HAS_CPS
|
sl@0
|
679 |
asm("mov r1, #1 "); // (not necessary on ARMV5 as SET_INTS above leaves r1 == 0x13)
|
sl@0
|
680 |
#endif
|
sl@0
|
681 |
asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC));
|
sl@0
|
682 |
asm("stmfd sp!, {r2,r5,r11,lr} "); // save registers
|
sl@0
|
683 |
|
sl@0
|
684 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
685 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
|
sl@0
|
686 |
asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs));
|
sl@0
|
687 |
asm("mov r11, sp "); // r11 points to saved registers
|
sl@0
|
688 |
asm("cmp r1, #0");
|
sl@0
|
689 |
asm("blne idfc_start_trace");
|
sl@0
|
690 |
#else
|
sl@0
|
691 |
asm("add r5, r0, #%a0" : : "i" _FOFF(TScheduler,iDfcs));
|
sl@0
|
692 |
asm("mov r11, sp "); // r11 points to saved registers
|
sl@0
|
693 |
#endif
|
sl@0
|
694 |
|
sl@0
|
695 |
asm("queue_dfcs_1: ");
|
sl@0
|
696 |
SET_INTS(r0, MODE_SVC, INTS_ALL_OFF); // disable interrupts
|
sl@0
|
697 |
asm("ldr r0, [r5, #0] "); // r0 points to first pending DFC
|
sl@0
|
698 |
SET_INTS_1(r1, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
699 |
asm("subs r2, r0, r5 "); // check if queue empty
|
sl@0
|
700 |
asm("ldrne r3, [r0, #0] "); // r3 points to next DFC
|
sl@0
|
701 |
asm("beq queue_dfcs_0 "); // if so, exit
|
sl@0
|
702 |
asm("str r3, [r5, #0] "); // next one is now first
|
sl@0
|
703 |
asm("str r5, [r3, #4] "); // next->prev=queue head
|
sl@0
|
704 |
SET_INTS_2(r1, MODE_SVC, INTS_ALL_ON); // enable interrupts
|
sl@0
|
705 |
|
sl@0
|
706 |
asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority)); // r12=iPriority
|
sl@0
|
707 |
asm("adr lr, queue_dfcs_1 "); // return to queue_dfcs_1
|
sl@0
|
708 |
asm("cmp r12, #%a0" : : "i" ((TInt)KNumDfcPriorities)); // check for immediate DFC
|
sl@0
|
709 |
asm("bcs do_immediate_dfc ");
|
sl@0
|
710 |
|
sl@0
|
711 |
// enqueue the DFC and signal the DFC thread
|
sl@0
|
712 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ)); // r2=iDfcQ
|
sl@0
|
713 |
asm("mov r3, #1 ");
|
sl@0
|
714 |
asm("dfc_enque_1: ");
|
sl@0
|
715 |
asm("ldr r1, [r2], #%a0" : : "i" _FOFF(TDfcQue,iQueue)); // r1=present mask, r2 points to first queue
|
sl@0
|
716 |
asm("strb r3, [r0, #%a0]" : : "i" _FOFF(TDfc,iOnFinalQ)); // set flag to show DFC on final queue
|
sl@0
|
717 |
asm("tst r1, r3, lsl r12 "); // test bit in present mask
|
sl@0
|
718 |
asm("ldrne r1, [r2, r12, lsl #2] "); // if not originally empty, r1->first
|
sl@0
|
719 |
asm("orreq r1, r1, r3, lsl r12 "); // if bit clear, set it
|
sl@0
|
720 |
asm("streq r1, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iPresent)-_FOFF(TDfcQue,iQueue))); // if bit originally clear update present mask
|
sl@0
|
721 |
asm("ldrne r3, [r1, #4] "); // if not originally empty, r3->last
|
sl@0
|
722 |
asm("streq r0, [r2, r12, lsl #2] "); // if queue originally empty, iQueue[p]=this
|
sl@0
|
723 |
asm("streq r0, [r0, #0] "); // this->next=this
|
sl@0
|
724 |
asm("ldr r2, [r2, #%a0]" : : "i" (_FOFF(TDfcQue,iThread)-_FOFF(TDfcQue,iQueue))); // r2=iDfcQ->iThread
|
sl@0
|
725 |
asm("stmneia r0, {r1,r3} "); // this->next=first, this->prev=last
|
sl@0
|
726 |
asm("streq r0, [r0, #4] "); // this->prev=this
|
sl@0
|
727 |
asm("ldrb r12, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iNState)); // r2=thread NState
|
sl@0
|
728 |
asm("strne r0, [r1, #4] "); // first->prev=this
|
sl@0
|
729 |
asm("strne r0, [r3, #0] "); // last->next=this
|
sl@0
|
730 |
asm("cmp r12, #%a0" : : "i" ((TInt)NThreadBase::EWaitDfc)); // check for EWaitDfc
|
sl@0
|
731 |
asm("mov r0, r2 "); // r0->thread
|
sl@0
|
732 |
asm("beq check_suspend_then_ready "); // if it is, release thread
|
sl@0
|
733 |
__JUMP(,lr); // else we are finished - NOTE R0=thread ptr != 0
|
sl@0
|
734 |
|
sl@0
|
735 |
asm("queue_dfcs_0: ");
|
sl@0
|
736 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
737 |
asm("ldrb r1, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iCpuUsageFilter)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
738 |
asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
739 |
asm("strb r2, [r5, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
740 |
asm("cmp r1, #0");
|
sl@0
|
741 |
asm("blne idfc_end_trace");
|
sl@0
|
742 |
#else
|
sl@0
|
743 |
asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
744 |
asm("strb r2, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iInIDFC)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
745 |
#endif
|
sl@0
|
746 |
asm("sub r0, r5, #%a0" : : "i" _FOFF(TScheduler,iDfcs)); // restore r0
|
sl@0
|
747 |
asm("mov sp, r11 "); // retrieve stack pointer before alignment
|
sl@0
|
748 |
asm("ldmfd sp!, {r2,r5,r11,pc} ");
|
sl@0
|
749 |
|
sl@0
|
750 |
asm("do_immediate_dfc: ");
|
sl@0
|
751 |
ASM_KILL_LINK(r0,r1);
|
sl@0
|
752 |
asm("mov r1, #0x000000ff "); // pri=0xff (IDFC), spare1=0 (unused), spare2=0 (iOnFinalQ), spare3=0 (iQueued)
|
sl@0
|
753 |
asm("str r1, [r0, #%a0]!" : : "i" _FOFF(TDfc,iPriority)); // dfc->iQueued=FALSE, r0->iPriority
|
sl@0
|
754 |
asm("ldmib r0, {r0,r1} "); // r0 = DFC parameter, r1 = DFC function pointer
|
sl@0
|
755 |
asm("bic sp, sp, #4 "); // align stack
|
sl@0
|
756 |
__JUMP(,r1); // call DFC, return to queue_dfcs_1
|
sl@0
|
757 |
|
sl@0
|
758 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
759 |
asm("idfc_start_trace_header:");
|
sl@0
|
760 |
asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCStart<<BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
761 |
asm("idfc_end_trace_header:");
|
sl@0
|
762 |
asm(".word %a0" : : "i" ((TInt)(4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIDFCEnd<<BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
763 |
|
sl@0
|
764 |
asm("idfc_start_trace:");
|
sl@0
|
765 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
766 |
asm("ldr r0, idfc_start_trace_header" );
|
sl@0
|
767 |
__JUMP(,r1);
|
sl@0
|
768 |
|
sl@0
|
769 |
asm("idfc_end_trace:");
|
sl@0
|
770 |
asm("ldr r0, idfc_end_trace_header" );
|
sl@0
|
771 |
asm("ldr pc, [r5,#%a0]" : : "i" (_FOFF(TScheduler,iBTraceHandler)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
772 |
#endif
|
sl@0
|
773 |
|
sl@0
|
774 |
}
|
sl@0
|
775 |
#endif
|
sl@0
|
776 |
|
sl@0
|
777 |
#ifdef __DFC_MACHINE_CODED__
|
sl@0
|
778 |
|
sl@0
|
779 |
/** Queues an IDFC or a DFC from an ISR.
|
sl@0
|
780 |
|
sl@0
|
781 |
This function is the only way to queue an IDFC and is the only way to queue
|
sl@0
|
782 |
a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque()
|
sl@0
|
783 |
or DoEnque() should be used.
|
sl@0
|
784 |
|
sl@0
|
785 |
This function does nothing if the IDFC/DFC is already queued.
|
sl@0
|
786 |
|
sl@0
|
787 |
@pre Call only from ISR, IDFC or thread with the kernel locked.
|
sl@0
|
788 |
@pre Do not call from thread with the kernel unlocked.
|
sl@0
|
789 |
@return TRUE if DFC was actually queued by this call
|
sl@0
|
790 |
FALSE if DFC was already queued on entry so this call did nothing
|
sl@0
|
791 |
|
sl@0
|
792 |
@see TDfc::DoEnque()
|
sl@0
|
793 |
@see TDfc::Enque()
|
sl@0
|
794 |
*/
|
sl@0
|
795 |
__NAKED__ EXPORT_C TBool TDfc::Add()
|
sl@0
|
796 |
{
|
sl@0
|
797 |
ASM_CHECK_PRECONDITIONS(MASK_NO_RESCHED);
|
sl@0
|
798 |
#ifdef _DEBUG
|
sl@0
|
799 |
asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(TDfc,iPriority));
|
sl@0
|
800 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));
|
sl@0
|
801 |
asm("cmp r2, #%a0" : : "i" ((TInt)KNumDfcPriorities));
|
sl@0
|
802 |
asm("bhs 1f ");
|
sl@0
|
803 |
asm("cmp r1, #0 ");
|
sl@0
|
804 |
asm("bne 1f ");
|
sl@0
|
805 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
|
sl@0
|
806 |
asm("1: ");
|
sl@0
|
807 |
#endif
|
sl@0
|
808 |
// Fall through to TDfc::RawAdd() ...
|
sl@0
|
809 |
}
|
sl@0
|
810 |
|
sl@0
|
811 |
/** Queue an IDFC or a DFC.
|
sl@0
|
812 |
|
sl@0
|
813 |
This function is identical to TDfc::Add() but no checks are performed for correct usage,
|
sl@0
|
814 |
and it contains no instrumentation code.
|
sl@0
|
815 |
|
sl@0
|
816 |
@return TRUE if DFC was actually queued by this call
|
sl@0
|
817 |
FALSE if DFC was already queued on entry so this call did nothing
|
sl@0
|
818 |
@see TDfc::DoEnque()
|
sl@0
|
819 |
@see TDfc::Enque()
|
sl@0
|
820 |
@see TDfc::Add()
|
sl@0
|
821 |
*/
|
sl@0
|
822 |
__NAKED__ EXPORT_C TBool TDfc::RawAdd()
|
sl@0
|
823 |
{
|
sl@0
|
824 |
|
sl@0
|
825 |
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
|
sl@0
|
826 |
/* Optimize with LDREXB/STREXB */
|
sl@0
|
827 |
|
sl@0
|
828 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued's byte offset
|
sl@0
|
829 |
asm("mov r12, #1 "); // r12=TRUE
|
sl@0
|
830 |
|
sl@0
|
831 |
asm("tryagain: ");
|
sl@0
|
832 |
LDREXB(3,2); // r3 = already iQueued
|
sl@0
|
833 |
STREXB(1,12,2); // Try setting iQueued = TRUE
|
sl@0
|
834 |
asm("teq r1, #0 "); // Exclusive write succeeded?
|
sl@0
|
835 |
asm("bne tryagain "); // No - retry until it does
|
sl@0
|
836 |
|
sl@0
|
837 |
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
|
sl@0
|
838 |
/* Implement with LDREX/STREX and shifts */
|
sl@0
|
839 |
|
sl@0
|
840 |
#define IQUEUED_WORD (_FOFF(TDfc, iQueued) & ~3) // offset of word containing iQueued
|
sl@0
|
841 |
#define IQUEUED_SHIFT ((_FOFF(TDfc, iQueued) & 3) * 8) // bit position of byte within word
|
sl@0
|
842 |
|
sl@0
|
843 |
asm("add r2, r0, #%a0" : : "i" IQUEUED_WORD); // r2=&iQueued's word
|
sl@0
|
844 |
|
sl@0
|
845 |
asm("tryagain: ");
|
sl@0
|
846 |
LDREX(3, 2);
|
sl@0
|
847 |
asm("bic r12, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT)); // clear the bits to write to
|
sl@0
|
848 |
asm("orr r12, r12, #%a0" : : "i" ((TInt)0x01<<IQUEUED_SHIFT)); // &iQueued = TRUE;
|
sl@0
|
849 |
STREX(1, 12, 2);
|
sl@0
|
850 |
asm("teq r1, #0 ");
|
sl@0
|
851 |
asm("bne tryagain ");
|
sl@0
|
852 |
asm("and r3, r3, #%a0" : : "i" ((TInt)0xff<<IQUEUED_SHIFT)); // mask out unwanted bits
|
sl@0
|
853 |
#else
|
sl@0
|
854 |
asm("mov r12, #1 "); // r12=TRUE
|
sl@0
|
855 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued
|
sl@0
|
856 |
asm("swpb r3, r12, [r2] "); // ATOMIC {r3=iQueued; iQueued=TRUE}
|
sl@0
|
857 |
#endif
|
sl@0
|
858 |
|
sl@0
|
859 |
asm("ldr r1, __PendingDfcQueue "); // r1 points to DFC pending queue
|
sl@0
|
860 |
|
sl@0
|
861 |
asm("cmp r3, #0 "); // check if already queued
|
sl@0
|
862 |
asm("addeq r3, r1, #4 "); // if not r3=&TheScheduler.iDfcs.iPrev ...
|
sl@0
|
863 |
asm("streq r1, [r0, #0] "); // ...iNext=&TheScheduler.iDfcs ...
|
sl@0
|
864 |
|
sl@0
|
865 |
#ifdef __CPU_ARM_HAS_LDREX_STREX
|
sl@0
|
866 |
asm("movne r0, #0 ");
|
sl@0
|
867 |
asm("bne dontswap "); // easier this way
|
sl@0
|
868 |
asm("try2: ");
|
sl@0
|
869 |
LDREX(2, 3); // read
|
sl@0
|
870 |
STREX(12, 0, 3); // write
|
sl@0
|
871 |
asm("teq r12, #0 "); // success? also restore eq
|
sl@0
|
872 |
asm("bne try2 "); // no!
|
sl@0
|
873 |
asm("mov r12, #1");
|
sl@0
|
874 |
#else
|
sl@0
|
875 |
asm("swpeq r2, r0, [r3] "); // ...ATOMIC {r2=last; last=this} ...
|
sl@0
|
876 |
#endif
|
sl@0
|
877 |
|
sl@0
|
878 |
asm("streqb r12, [r1, #%a0]" : : "i" (_FOFF(TScheduler,iDfcPendingFlag)-_FOFF(TScheduler,iDfcs)));
|
sl@0
|
879 |
asm("streq r0, [r2, #0] "); // ...old last->iNext=this ...
|
sl@0
|
880 |
asm("streq r2, [r0, #4] "); // ...iPrev=old last
|
sl@0
|
881 |
|
sl@0
|
882 |
// NOTE: R0=this != 0
|
sl@0
|
883 |
|
sl@0
|
884 |
asm("dontswap: ");
|
sl@0
|
885 |
__JUMP(,lr);
|
sl@0
|
886 |
|
sl@0
|
887 |
asm("__PendingDfcQueue: ");
|
sl@0
|
888 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iDfcs));
|
sl@0
|
889 |
}
|
sl@0
|
890 |
|
sl@0
|
891 |
|
sl@0
|
892 |
/** Queues a DFC (not an IDFC) from an IDFC or thread with preemption disabled.
|
sl@0
|
893 |
|
sl@0
|
894 |
This function is the preferred way to queue a DFC from an IDFC. It should not
|
sl@0
|
895 |
be used to queue an IDFC - use TDfc::Add() for this.
|
sl@0
|
896 |
|
sl@0
|
897 |
This function does nothing if the DFC is already queued.
|
sl@0
|
898 |
|
sl@0
|
899 |
@pre Call only from IDFC or thread with the kernel locked.
|
sl@0
|
900 |
@pre Do not call from ISR or thread with the kernel unlocked.
|
sl@0
|
901 |
@return TRUE if DFC was actually queued by this call
|
sl@0
|
902 |
FALSE if DFC was already queued on entry so this call did nothing
|
sl@0
|
903 |
|
sl@0
|
904 |
@see TDfc::Add()
|
sl@0
|
905 |
@see TDfc::Enque()
|
sl@0
|
906 |
*/
|
sl@0
|
907 |
__NAKED__ EXPORT_C TBool TDfc::DoEnque()
|
sl@0
|
908 |
{
|
sl@0
|
909 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NO_RESCHED);
|
sl@0
|
910 |
#ifdef _DEBUG
|
sl@0
|
911 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TDfc,iDfcQ));
|
sl@0
|
912 |
asm("cmp r1, #0 ");
|
sl@0
|
913 |
asm("bne 1f ");
|
sl@0
|
914 |
ASM_CHECK_PRECONDITIONS(MASK_ALWAYS_FAIL);
|
sl@0
|
915 |
asm("1: ");
|
sl@0
|
916 |
#endif
|
sl@0
|
917 |
|
sl@0
|
918 |
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
|
sl@0
|
919 |
asm("add r2, r0, #%a0" : : "i" _FOFF(TDfc, iQueued)); // r2=&iQueued's byte offset
|
sl@0
|
920 |
asm("mov r3, #1 ");
|
sl@0
|
921 |
|
sl@0
|
922 |
asm("tryagain8: ");
|
sl@0
|
923 |
LDREXB(1, 2); // r1 = iQueued
|
sl@0
|
924 |
STREXB(12, 3, 2); // Try setting iQueued = True
|
sl@0
|
925 |
asm(" teq r12, #1 "); // worked?
|
sl@0
|
926 |
asm(" beq tryagain8 "); // nope
|
sl@0
|
927 |
// r3 = 1, r1 = old iQueued
|
sl@0
|
928 |
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
|
sl@0
|
929 |
asm(" add r0, r0, #8 "); // align address (struct always aligned)
|
sl@0
|
930 |
asm("tryagain8: ");
|
sl@0
|
931 |
LDREX(2, 0); // do the load/store half
|
sl@0
|
932 |
asm(" bic r12, r2, #0xff000000 "); // knock out unwanted bits
|
sl@0
|
933 |
asm(" orr r12, r12, #0x01000000 "); // 'looking' value
|
sl@0
|
934 |
STREX(1, 12, 0); // write looking value
|
sl@0
|
935 |
asm(" teq r1, #1 "); // worked?
|
sl@0
|
936 |
asm(" beq tryagain8 "); // nope
|
sl@0
|
937 |
asm(" mov r1, r2, lsr #24 "); // extract previous value byte
|
sl@0
|
938 |
asm(" sub r0, r0, #8 "); // restore base pointer
|
sl@0
|
939 |
asm(" mov r3, #1 "); // dfc_enque_1 expects r3 = 1
|
sl@0
|
940 |
#else
|
sl@0
|
941 |
asm("add r12, r0, #11 "); // r12=&iQueued
|
sl@0
|
942 |
asm("mov r3, #1 ");
|
sl@0
|
943 |
asm("swpb r1, r3, [r12] "); // ATOMIC {r1=iQueued; iQueued=TRUE}
|
sl@0
|
944 |
#endif
|
sl@0
|
945 |
|
sl@0
|
946 |
asm("ldrb r12, [r0, #8] "); // r12=iPriority
|
sl@0
|
947 |
asm("ldr r2, [r0, #20] "); // r2=iDfcQ
|
sl@0
|
948 |
asm("cmp r1, #0 "); // check if queued
|
sl@0
|
949 |
asm("beq dfc_enque_1 "); // if not, queue it and return with R0 nonzero
|
sl@0
|
950 |
asm("mov r0, #0 ");
|
sl@0
|
951 |
__JUMP(,lr);
|
sl@0
|
952 |
}
|
sl@0
|
953 |
#endif
|
sl@0
|
954 |
|
sl@0
|
955 |
#ifdef __FAST_MUTEX_MACHINE_CODED__
|
sl@0
|
956 |
|
sl@0
|
957 |
__ASSERT_COMPILE(_FOFF(NFastMutex,iHoldingThread) == 0);
|
sl@0
|
958 |
|
sl@0
|
959 |
/** Releases a previously acquired fast mutex.
|
sl@0
|
960 |
|
sl@0
|
961 |
Generally threads would use NKern::FMSignal() which manipulates the kernel lock
|
sl@0
|
962 |
for you.
|
sl@0
|
963 |
|
sl@0
|
964 |
@pre The calling thread must hold the mutex.
|
sl@0
|
965 |
@pre Kernel must be locked.
|
sl@0
|
966 |
|
sl@0
|
967 |
@post Kernel is locked.
|
sl@0
|
968 |
|
sl@0
|
969 |
@see NFastMutex::Wait()
|
sl@0
|
970 |
@see NKern::FMSignal()
|
sl@0
|
971 |
*/
|
sl@0
|
972 |
EXPORT_C __NAKED__ void NFastMutex::Signal()
|
sl@0
|
973 |
{
|
sl@0
|
974 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
975 |
ASM_DEBUG1(FMSignal,r0);
|
sl@0
|
976 |
asm("ldr r2, __TheScheduler ");
|
sl@0
|
977 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
978 |
asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
979 |
asm("cmp r1, #0");
|
sl@0
|
980 |
asm("bne fastmutex_signal_trace");
|
sl@0
|
981 |
asm("no_fastmutex_signal_trace:");
|
sl@0
|
982 |
#endif
|
sl@0
|
983 |
asm("mov r12, #0 ");
|
sl@0
|
984 |
asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting
|
sl@0
|
985 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
986 |
asm("ldr r3, [r0] "); // r3=iWaiting
|
sl@0
|
987 |
asm("str r12, [r0] "); // iWaiting=FALSE
|
sl@0
|
988 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL
|
sl@0
|
989 |
asm("cmp r3, #0 "); // check waiting flag
|
sl@0
|
990 |
asm("bne 2f ");
|
sl@0
|
991 |
asm("1: ");
|
sl@0
|
992 |
__JUMP(,lr); // if clear, finished
|
sl@0
|
993 |
asm("2: ");
|
sl@0
|
994 |
asm("ldr r12, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
|
sl@0
|
995 |
asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // Assumes iWaiting!=0 mod 256
|
sl@0
|
996 |
asm("cmp r12, #0 "); // check for outstanding CS function
|
sl@0
|
997 |
asm("beq 1b "); // if none, finished
|
sl@0
|
998 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // else check CS count
|
sl@0
|
999 |
asm("mov r0, r1 ");
|
sl@0
|
1000 |
asm("cmp r2, #0 ");
|
sl@0
|
1001 |
__JUMP(ne,lr); // if nonzero, finished
|
sl@0
|
1002 |
asm("DoDoCsFunction: ");
|
sl@0
|
1003 |
asm("stmfd sp!, {r11,lr} ");
|
sl@0
|
1004 |
asm("mov r11, sp ");
|
sl@0
|
1005 |
asm("bic sp, sp, #4 ");
|
sl@0
|
1006 |
asm("bl " CSM_ZN11NThreadBase12DoCsFunctionEv); // if iCsCount=0, DoCsFunction()
|
sl@0
|
1007 |
asm("mov sp, r11 ");
|
sl@0
|
1008 |
asm("ldmfd sp!, {r11,pc} ");
|
sl@0
|
1009 |
|
sl@0
|
1010 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1011 |
asm("fastmutex_signal_trace:");
|
sl@0
|
1012 |
ALIGN_STACK_START;
|
sl@0
|
1013 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1014 |
asm("bl fmsignal_lock_trace_unlock");
|
sl@0
|
1015 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1016 |
ALIGN_STACK_END;
|
sl@0
|
1017 |
asm("b no_fastmutex_signal_trace");
|
sl@0
|
1018 |
#endif
|
sl@0
|
1019 |
}
|
sl@0
|
1020 |
|
sl@0
|
1021 |
|
sl@0
|
1022 |
/** Acquires the fast mutex.
|
sl@0
|
1023 |
|
sl@0
|
1024 |
This will block until the mutex is available, and causes
|
sl@0
|
1025 |
the thread to enter an implicit critical section until the mutex is released.
|
sl@0
|
1026 |
|
sl@0
|
1027 |
Generally threads would use NKern::FMWait() which manipulates the kernel lock
|
sl@0
|
1028 |
for you.
|
sl@0
|
1029 |
|
sl@0
|
1030 |
@pre Kernel must be locked, with lock count 1.
|
sl@0
|
1031 |
|
sl@0
|
1032 |
@post Kernel is locked, with lock count 1.
|
sl@0
|
1033 |
@post The calling thread holds the mutex.
|
sl@0
|
1034 |
|
sl@0
|
1035 |
@see NFastMutex::Signal()
|
sl@0
|
1036 |
@see NKern::FMWait()
|
sl@0
|
1037 |
*/
|
sl@0
|
1038 |
EXPORT_C __NAKED__ void NFastMutex::Wait()
|
sl@0
|
1039 |
{
|
sl@0
|
1040 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1041 |
ASM_DEBUG1(FMWait,r0);
|
sl@0
|
1042 |
asm("ldr r2, __TheScheduler ");
|
sl@0
|
1043 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread
|
sl@0
|
1044 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1045 |
asm("cmp r3, #0 "); // check if mutex held
|
sl@0
|
1046 |
asm("bne fastmutex_wait_block ");
|
sl@0
|
1047 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread
|
sl@0
|
1048 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this
|
sl@0
|
1049 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1050 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1051 |
asm("cmp r12, #0");
|
sl@0
|
1052 |
asm("bne fmwait_trace2");
|
sl@0
|
1053 |
#endif
|
sl@0
|
1054 |
__JUMP(,lr); // and we're done
|
sl@0
|
1055 |
asm("fastmutex_wait_block:");
|
sl@0
|
1056 |
asm("str lr, [sp, #-4]! "); // We must wait - save return address
|
sl@0
|
1057 |
asm("mov r12, #1 ");
|
sl@0
|
1058 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=TRUE
|
sl@0
|
1059 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this
|
sl@0
|
1060 |
asm("mov r0, r3 "); // parameter for YieldTo
|
sl@0
|
1061 |
ASM_DEBUG1(FMWaitYield,r0);
|
sl@0
|
1062 |
asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread
|
sl@0
|
1063 |
// will not return until the mutex is free
|
sl@0
|
1064 |
// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
|
sl@0
|
1065 |
asm("mov r12, #1 ");
|
sl@0
|
1066 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
|
sl@0
|
1067 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts
|
sl@0
|
1068 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this
|
sl@0
|
1069 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL
|
sl@0
|
1070 |
asm("str r3, [r2, #0] "); // iHoldingThread=current thread
|
sl@0
|
1071 |
asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this
|
sl@0
|
1072 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1073 |
asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1074 |
asm("cmp r12, #0");
|
sl@0
|
1075 |
asm("bne fastmutex_wait_trace2");
|
sl@0
|
1076 |
#endif
|
sl@0
|
1077 |
asm("ldr pc, [sp], #4 ");
|
sl@0
|
1078 |
|
sl@0
|
1079 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1080 |
asm("fastmutex_wait_trace2:");
|
sl@0
|
1081 |
// r0=scheduler r2=mutex r3=thread
|
sl@0
|
1082 |
asm("ldr lr, [sp], #4 ");
|
sl@0
|
1083 |
ALIGN_STACK_START;
|
sl@0
|
1084 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1085 |
asm("bl fmwait_lockacquiredwait_trace");
|
sl@0
|
1086 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1087 |
ALIGN_STACK_END;
|
sl@0
|
1088 |
__JUMP(,lr);
|
sl@0
|
1089 |
#endif
|
sl@0
|
1090 |
}
|
sl@0
|
1091 |
|
sl@0
|
1092 |
|
sl@0
|
1093 |
/** Releases the System Lock.
|
sl@0
|
1094 |
|
sl@0
|
1095 |
@pre System lock must be held.
|
sl@0
|
1096 |
|
sl@0
|
1097 |
@see NKern::LockSystem()
|
sl@0
|
1098 |
@see NKern::FMSignal()
|
sl@0
|
1099 |
*/
|
sl@0
|
1100 |
EXPORT_C __NAKED__ void NKern::UnlockSystem()
|
sl@0
|
1101 |
{
|
sl@0
|
1102 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1103 |
ASM_CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED);
|
sl@0
|
1104 |
asm("ldr r0, __SystemLock ");
|
sl@0
|
1105 |
}
|
sl@0
|
1106 |
|
sl@0
|
1107 |
|
sl@0
|
1108 |
/** Releases a previously acquired fast mutex.
|
sl@0
|
1109 |
|
sl@0
|
1110 |
@param aMutex The fast mutex to be released.
|
sl@0
|
1111 |
|
sl@0
|
1112 |
@pre The calling thread must hold the mutex.
|
sl@0
|
1113 |
|
sl@0
|
1114 |
@see NFastMutex::Signal()
|
sl@0
|
1115 |
@see NKern::FMWait()
|
sl@0
|
1116 |
*/
|
sl@0
|
1117 |
EXPORT_C __NAKED__ void NKern::FMSignal(NFastMutex*)
|
sl@0
|
1118 |
{
|
sl@0
|
1119 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1120 |
ASM_DEBUG1(NKFMSignal,r0);
|
sl@0
|
1121 |
|
sl@0
|
1122 |
asm("ldr r2, __TheScheduler ");
|
sl@0
|
1123 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1124 |
asm("ldrb r1, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1125 |
asm("cmp r1, #0");
|
sl@0
|
1126 |
asm("bne fmsignal_trace1");
|
sl@0
|
1127 |
asm("no_fmsignal_trace1:");
|
sl@0
|
1128 |
#endif
|
sl@0
|
1129 |
|
sl@0
|
1130 |
#ifdef __CPU_ARM_HAS_CPS
|
sl@0
|
1131 |
asm("mov r12, #0 ");
|
sl@0
|
1132 |
CPSIDIF; // disable interrupts
|
sl@0
|
1133 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // r3=iWaiting
|
sl@0
|
1134 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1135 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=NULL
|
sl@0
|
1136 |
asm("cmp r3, #0 "); // check waiting flag
|
sl@0
|
1137 |
asm("str r12, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // iWaiting=FALSE
|
sl@0
|
1138 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL
|
sl@0
|
1139 |
asm("bne 1f ");
|
sl@0
|
1140 |
CPSIEIF; // reenable interrupts
|
sl@0
|
1141 |
__JUMP(,lr); // if clear, finished
|
sl@0
|
1142 |
asm("1: ");
|
sl@0
|
1143 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel if set (assumes iWaiting always 0 or 1)
|
sl@0
|
1144 |
CPSIEIF; // reenable interrupts
|
sl@0
|
1145 |
#else
|
sl@0
|
1146 |
SET_INTS_1(r3, MODE_SVC, INTS_ALL_OFF);
|
sl@0
|
1147 |
asm("mov r12, #0 ");
|
sl@0
|
1148 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1149 |
SET_INTS_2(r3, MODE_SVC, INTS_ALL_OFF); // disable interrupts
|
sl@0
|
1150 |
asm("str r12, [r0], #%a0" : : "i" _FOFF(NFastMutex,iWaiting)); // iHoldingThread=NULL, r0->iWaiting
|
sl@0
|
1151 |
asm("ldr r3, [r0] "); // r3=iWaiting
|
sl@0
|
1152 |
asm("str r12, [r0] "); // iWaiting=FALSE
|
sl@0
|
1153 |
asm("str r12, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL
|
sl@0
|
1154 |
asm("mov r12, #0x13 ");
|
sl@0
|
1155 |
asm("cmp r3, #0 "); // check waiting flag
|
sl@0
|
1156 |
__MSR_CPSR_C(eq, r12); // if clear, finished
|
sl@0
|
1157 |
__JUMP(eq,lr);
|
sl@0
|
1158 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1)
|
sl@0
|
1159 |
asm("msr cpsr_c, r12 "); // reenable interrupts
|
sl@0
|
1160 |
#endif
|
sl@0
|
1161 |
asm("strb r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
1162 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction
|
sl@0
|
1163 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount
|
sl@0
|
1164 |
asm("str lr, [sp, #-4]! ");
|
sl@0
|
1165 |
asm("cmp r3, #0 "); // outstanding CS function?
|
sl@0
|
1166 |
asm("beq 2f "); // branch if not
|
sl@0
|
1167 |
asm("cmp r2, #0 "); // iCsCount!=0 ?
|
sl@0
|
1168 |
asm("moveq r0, r1 "); // if iCsCount=0, DoCsFunction()
|
sl@0
|
1169 |
asm("bleq DoDoCsFunction ");
|
sl@0
|
1170 |
asm("2: ");
|
sl@0
|
1171 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in
|
sl@0
|
1172 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // reenable interrupts after reschedule
|
sl@0
|
1173 |
asm("ldr pc, [sp], #4 ");
|
sl@0
|
1174 |
|
sl@0
|
1175 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1176 |
asm("fmsignal_trace1:");
|
sl@0
|
1177 |
ALIGN_STACK_START;
|
sl@0
|
1178 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1179 |
asm("bl fmsignal_lock_trace_unlock");
|
sl@0
|
1180 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1181 |
ALIGN_STACK_END;
|
sl@0
|
1182 |
asm("b no_fmsignal_trace1");
|
sl@0
|
1183 |
#endif
|
sl@0
|
1184 |
}
|
sl@0
|
1185 |
|
sl@0
|
1186 |
|
sl@0
|
1187 |
/** Acquires the System Lock.
|
sl@0
|
1188 |
|
sl@0
|
1189 |
This will block until the mutex is available, and causes
|
sl@0
|
1190 |
the thread to enter an implicit critical section until the mutex is released.
|
sl@0
|
1191 |
|
sl@0
|
1192 |
@post System lock is held.
|
sl@0
|
1193 |
|
sl@0
|
1194 |
@see NKern::UnlockSystem()
|
sl@0
|
1195 |
@see NKern::FMWait()
|
sl@0
|
1196 |
|
sl@0
|
1197 |
@pre No fast mutex can be held.
|
sl@0
|
1198 |
@pre Kernel must be unlocked.
|
sl@0
|
1199 |
@pre Call in a thread context.
|
sl@0
|
1200 |
@pre Interrupts must be enabled.
|
sl@0
|
1201 |
*/
|
sl@0
|
1202 |
EXPORT_C __NAKED__ void NKern::LockSystem()
|
sl@0
|
1203 |
{
|
sl@0
|
1204 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
1205 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1206 |
asm("ldr r0, __SystemLock ");
|
sl@0
|
1207 |
}
|
sl@0
|
1208 |
|
sl@0
|
1209 |
|
sl@0
|
1210 |
/** Acquires a fast mutex.
|
sl@0
|
1211 |
|
sl@0
|
1212 |
This will block until the mutex is available, and causes
|
sl@0
|
1213 |
the thread to enter an implicit critical section until the mutex is released.
|
sl@0
|
1214 |
|
sl@0
|
1215 |
@param aMutex The fast mutex to be acquired.
|
sl@0
|
1216 |
|
sl@0
|
1217 |
@post The calling thread holds the mutex.
|
sl@0
|
1218 |
|
sl@0
|
1219 |
@see NFastMutex::Wait()
|
sl@0
|
1220 |
@see NKern::FMSignal()
|
sl@0
|
1221 |
|
sl@0
|
1222 |
@pre No fast mutex can be held.
|
sl@0
|
1223 |
@pre Kernel must be unlocked.
|
sl@0
|
1224 |
@pre Call in a thread context.
|
sl@0
|
1225 |
@pre Interrupts must be enabled.
|
sl@0
|
1226 |
*/
|
sl@0
|
1227 |
EXPORT_C __NAKED__ void NKern::FMWait(NFastMutex*)
|
sl@0
|
1228 |
{
|
sl@0
|
1229 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NO_FAST_MUTEX|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
1230 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1231 |
ASM_DEBUG1(NKFMWait,r0);
|
sl@0
|
1232 |
asm("ldr r2, __TheScheduler ");
|
sl@0
|
1233 |
|
sl@0
|
1234 |
#ifdef __CPU_ARM_HAS_CPS
|
sl@0
|
1235 |
CPSIDIF; // disable interrupts
|
sl@0
|
1236 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread
|
sl@0
|
1237 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1238 |
asm("cmp r3, #0 "); // check if mutex held
|
sl@0
|
1239 |
asm("bne 1f");
|
sl@0
|
1240 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // iHoldingThread=current thread
|
sl@0
|
1241 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this
|
sl@0
|
1242 |
CPSIEIF; // reenable interrupts
|
sl@0
|
1243 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1244 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1245 |
asm("cmp r12, #0");
|
sl@0
|
1246 |
asm("bne fmwait_trace2");
|
sl@0
|
1247 |
#endif
|
sl@0
|
1248 |
__JUMP(,lr); // we're finished
|
sl@0
|
1249 |
asm("1: ");
|
sl@0
|
1250 |
asm("mov r3, #1 ");
|
sl@0
|
1251 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel
|
sl@0
|
1252 |
CPSIEIF; // reenable interrupts
|
sl@0
|
1253 |
#else
|
sl@0
|
1254 |
asm("mov r3, #0xd3 ");
|
sl@0
|
1255 |
asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1256 |
asm("msr cpsr, r3 "); // disable interrupts
|
sl@0
|
1257 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // r3=iHoldingThread
|
sl@0
|
1258 |
asm("mov r12, #0x13 ");
|
sl@0
|
1259 |
asm("cmp r3, #0"); // check if mutex held
|
sl@0
|
1260 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if not, iHoldingThread=current thread
|
sl@0
|
1261 |
asm("streq r0, [r1, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // and current thread->iHeldFastMutex=this
|
sl@0
|
1262 |
__MSR_CPSR_C(eq, r12); // and we're finished
|
sl@0
|
1263 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1264 |
asm("bne no_fmwait_trace2");
|
sl@0
|
1265 |
asm("ldrb r12, [r2,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1266 |
asm("cmp r12, #0");
|
sl@0
|
1267 |
asm("bne fmwait_trace2");
|
sl@0
|
1268 |
__JUMP(,lr);
|
sl@0
|
1269 |
asm("no_fmwait_trace2:");
|
sl@0
|
1270 |
#endif
|
sl@0
|
1271 |
__JUMP(eq,lr);
|
sl@0
|
1272 |
asm("mov r3, #1 ");
|
sl@0
|
1273 |
asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // mutex held, so lock the kernel
|
sl@0
|
1274 |
asm("msr cpsr_c, r12 "); // and reenable interrupts
|
sl@0
|
1275 |
#endif
|
sl@0
|
1276 |
asm("str lr, [sp, #-4]! ");
|
sl@0
|
1277 |
asm("str r3, [r0, #4] "); // iWaiting=TRUE
|
sl@0
|
1278 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=this
|
sl@0
|
1279 |
asm("ldr r0, [r0, #0] "); // parameter for YieldTo
|
sl@0
|
1280 |
ASM_DEBUG1(NKFMWaitYield,r0);
|
sl@0
|
1281 |
asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread
|
sl@0
|
1282 |
// will not return until the mutex is free
|
sl@0
|
1283 |
// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
|
sl@0
|
1284 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // r2=this
|
sl@0
|
1285 |
asm("ldr lr, [sp], #4 ");
|
sl@0
|
1286 |
asm("str r1, [r3, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL
|
sl@0
|
1287 |
asm("str r2, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=this
|
sl@0
|
1288 |
asm("str r3, [r2, #0] "); // iHoldingThread=current thread
|
sl@0
|
1289 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
1290 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1291 |
asm("ldrb r12, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1292 |
asm("cmp r12, #0");
|
sl@0
|
1293 |
asm("bne fmwait_trace3");
|
sl@0
|
1294 |
#endif
|
sl@0
|
1295 |
__JUMP(,lr);
|
sl@0
|
1296 |
|
sl@0
|
1297 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1298 |
asm("fmwait_trace2:");
|
sl@0
|
1299 |
// r0=mutex r1=thread r2=scheduler
|
sl@0
|
1300 |
ALIGN_STACK_START;
|
sl@0
|
1301 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1302 |
asm("bl fmwait_lockacquiredwait_trace2");
|
sl@0
|
1303 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1304 |
ALIGN_STACK_END;
|
sl@0
|
1305 |
__JUMP(,lr);
|
sl@0
|
1306 |
|
sl@0
|
1307 |
asm("fmwait_trace3:");
|
sl@0
|
1308 |
// r0=scheduler r2=mutex r3=thread
|
sl@0
|
1309 |
ALIGN_STACK_START;
|
sl@0
|
1310 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1311 |
asm("bl fmwait_lockacquiredwait_trace");
|
sl@0
|
1312 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1313 |
ALIGN_STACK_END;
|
sl@0
|
1314 |
__JUMP(,lr);
|
sl@0
|
1315 |
#endif
|
sl@0
|
1316 |
}
|
sl@0
|
1317 |
#endif
|
sl@0
|
1318 |
|
sl@0
|
1319 |
__NAKED__ void TScheduler::YieldTo(NThreadBase*)
|
sl@0
|
1320 |
{
|
sl@0
|
1321 |
//
|
sl@0
|
1322 |
// Enter in mode_svc with kernel locked, interrupts can be on or off
|
sl@0
|
1323 |
// Exit in mode_svc with kernel unlocked, interrupts off
|
sl@0
|
1324 |
// On exit r0=&TheScheduler, r1=0, r2!=0, r3=TheCurrentThread, r4-r11 unaltered
|
sl@0
|
1325 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1326 |
//
|
sl@0
|
1327 |
asm("mrs r1, spsr "); // r1=spsr_svc
|
sl@0
|
1328 |
asm("mov r2, r0 "); // r2=new thread
|
sl@0
|
1329 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data
|
sl@0
|
1330 |
asm("stmfd sp!, {r1,r4-r11,lr} "); // store registers and return address
|
sl@0
|
1331 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
1332 |
asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR
|
sl@0
|
1333 |
#endif
|
sl@0
|
1334 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r1=iCurrentThread
|
sl@0
|
1335 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1336 |
VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC
|
sl@0
|
1337 |
#endif
|
sl@0
|
1338 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1339 |
GET_CAR(,r11); // r11=CAR
|
sl@0
|
1340 |
#endif
|
sl@0
|
1341 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
1342 |
GET_RWRW_TID(,r9); // r9=Thread ID
|
sl@0
|
1343 |
#endif
|
sl@0
|
1344 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
1345 |
GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base
|
sl@0
|
1346 |
#endif
|
sl@0
|
1347 |
|
sl@0
|
1348 |
asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for original thread, extras, sp_usr and lr_usr
|
sl@0
|
1349 |
|
sl@0
|
1350 |
// Save the sp_usr and lr_usr and only the required coprocessor registers
|
sl@0
|
1351 |
// Thumb-2EE TID FPEXC CAR DACR
|
sl@0
|
1352 |
asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ ");
|
sl@0
|
1353 |
#if defined(__CPU_ARMV4) || defined(__CPU_ARMV4T) || defined(__CPU_ARMV5T)
|
sl@0
|
1354 |
asm("nop "); // Can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1355 |
#endif
|
sl@0
|
1356 |
asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer
|
sl@0
|
1357 |
asm("b switch_threads ");
|
sl@0
|
1358 |
}
|
sl@0
|
1359 |
|
sl@0
|
1360 |
#ifdef MONITOR_THREAD_CPU_TIME
|
sl@0
|
1361 |
|
sl@0
|
1362 |
#ifdef HIGH_RES_TIMER_COUNTS_UP
|
sl@0
|
1363 |
#define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("sub "#Rd", "#Rn", "#Rm)
|
sl@0
|
1364 |
#else
|
sl@0
|
1365 |
#define CALC_HIGH_RES_DIFF(Rd, Rn, Rm) asm("rsb "#Rd", "#Rn", "#Rm)
|
sl@0
|
1366 |
#endif
|
sl@0
|
1367 |
|
sl@0
|
1368 |
// Update thread cpu time counters
|
sl@0
|
1369 |
// Called just before thread switch with r2 == new thread
|
sl@0
|
1370 |
// Corrupts r3-r8, Leaves r5=current Time, r6=current thread
|
sl@0
|
1371 |
#define UPDATE_THREAD_CPU_TIME \
|
sl@0
|
1372 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); \
|
sl@0
|
1373 |
GET_HIGH_RES_TICK_COUNT(r5); \
|
sl@0
|
1374 |
asm("ldr r3, [r6, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \
|
sl@0
|
1375 |
asm("str r5, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iLastStartTime)); \
|
sl@0
|
1376 |
CALC_HIGH_RES_DIFF(r4, r5, r3); \
|
sl@0
|
1377 |
asm("add r3, r6, #%a0" : : "i" _FOFF(NThreadBase,iTotalCpuTime)); \
|
sl@0
|
1378 |
asm("ldmia r3, {r7-r8}"); \
|
sl@0
|
1379 |
asm("adds r7, r7, r4"); \
|
sl@0
|
1380 |
asm("adc r8, r8, #0"); \
|
sl@0
|
1381 |
asm("stmia r3, {r7-r8}")
|
sl@0
|
1382 |
|
sl@0
|
1383 |
#else
|
sl@0
|
1384 |
#define UPDATE_THREAD_CPU_TIME
|
sl@0
|
1385 |
#endif
|
sl@0
|
1386 |
|
sl@0
|
1387 |
// EMI - Schedule Logging
|
sl@0
|
1388 |
// Needs: r0=TScheduler, r2 = new thread
|
sl@0
|
1389 |
// If CPU_TIME, needs: r5=time, r6=current thread
|
sl@0
|
1390 |
// preserve r0 r2 r9(new address space), r10(&iLock), sp. Trashes r3-r8, lr
|
sl@0
|
1391 |
|
sl@0
|
1392 |
#ifdef __EMI_SUPPORT__
|
sl@0
|
1393 |
#define EMI_EVENTLOGGER \
|
sl@0
|
1394 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLogging)); \
|
sl@0
|
1395 |
asm("cmp r3,#0"); \
|
sl@0
|
1396 |
asm("blne AddTaskSwitchEvent");
|
sl@0
|
1397 |
|
sl@0
|
1398 |
// Needs: r0=TScheduler, r2 = new thread
|
sl@0
|
1399 |
#define EMI_CHECKDFCTAG(no) \
|
sl@0
|
1400 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iEmiMask)); \
|
sl@0
|
1401 |
asm("ldr r4, [r2,#%a0]" : : "i" _FOFF(NThread, iTag)); \
|
sl@0
|
1402 |
asm("ands r3, r3, r4"); \
|
sl@0
|
1403 |
asm("bne emi_add_dfc" #no); \
|
sl@0
|
1404 |
asm("check_dfc_tag_done" #no ": ");
|
sl@0
|
1405 |
|
sl@0
|
1406 |
#define EMI_ADDDFC(no) \
|
sl@0
|
1407 |
asm("emi_add_dfc" #no ": "); \
|
sl@0
|
1408 |
asm("ldr r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \
|
sl@0
|
1409 |
asm("mov r5, r2"); \
|
sl@0
|
1410 |
asm("orr r4, r3, r4"); \
|
sl@0
|
1411 |
asm("str r4, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfcTrigger)); \
|
sl@0
|
1412 |
asm("mov r6, r0"); \
|
sl@0
|
1413 |
asm("ldr r0, [r0,#%a0]" : : "i" _FOFF(TScheduler, iEmiDfc)); \
|
sl@0
|
1414 |
asm("bl " CSM_ZN4TDfc3AddEv); \
|
sl@0
|
1415 |
asm("mov r2, r5"); \
|
sl@0
|
1416 |
asm("mov r0, r6"); \
|
sl@0
|
1417 |
asm("b check_dfc_tag_done" #no);
|
sl@0
|
1418 |
|
sl@0
|
1419 |
#else
|
sl@0
|
1420 |
#define EMI_EVENTLOGGER
|
sl@0
|
1421 |
#define EMI_CHECKDFCTAG(no)
|
sl@0
|
1422 |
#define EMI_ADDDFC(no)
|
sl@0
|
1423 |
#endif
|
sl@0
|
1424 |
|
sl@0
|
1425 |
|
sl@0
|
1426 |
__ASSERT_COMPILE(_FOFF(NThread,iPriority) == _FOFF(NThread,iPrev) + 4);
|
sl@0
|
1427 |
__ASSERT_COMPILE(_FOFF(NThread,i_ThrdAttr) == _FOFF(NThread,iPriority) + 2);
|
sl@0
|
1428 |
__ASSERT_COMPILE(_FOFF(NThread,iHeldFastMutex) == _FOFF(NThread,i_ThrdAttr) + 2);
|
sl@0
|
1429 |
__ASSERT_COMPILE(_FOFF(NThread,iWaitFastMutex) == _FOFF(NThread,iHeldFastMutex) + 4);
|
sl@0
|
1430 |
__ASSERT_COMPILE(_FOFF(NThread,iAddressSpace) == _FOFF(NThread,iWaitFastMutex) + 4);
|
sl@0
|
1431 |
|
sl@0
|
1432 |
__NAKED__ void TScheduler::Reschedule()
|
sl@0
|
1433 |
{
|
sl@0
|
1434 |
//
|
sl@0
|
1435 |
// Enter in mode_svc with kernel locked, interrupts can be on or off
|
sl@0
|
1436 |
// Exit in mode_svc with kernel unlocked, interrupts off
|
sl@0
|
1437 |
// On exit r0=&TheScheduler, r1=0, r3=TheCurrentThread, r4-r11 unaltered
|
sl@0
|
1438 |
// r2=0 if no reschedule occurred, non-zero if a reschedule did occur.
|
sl@0
|
1439 |
// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
|
sl@0
|
1440 |
//
|
sl@0
|
1441 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data
|
sl@0
|
1442 |
asm("str lr, [sp, #-4]! "); // save return address
|
sl@0
|
1443 |
SET_INTS(r3, MODE_SVC, INTS_ALL_OFF); // interrupts off
|
sl@0
|
1444 |
asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iDfcPendingFlag));
|
sl@0
|
1445 |
asm("mov r2, #0 "); // start with r2=0
|
sl@0
|
1446 |
asm("cmp r1, #0 "); // check if DFCs pending
|
sl@0
|
1447 |
|
sl@0
|
1448 |
asm("start_resched: ");
|
sl@0
|
1449 |
asm("blne " CSM_ZN10TScheduler9QueueDfcsEv); // queue any pending DFCs - PRESERVES R2
|
sl@0
|
1450 |
asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
1451 |
SET_INTS_1(r3, MODE_SVC, INTS_ALL_ON);
|
sl@0
|
1452 |
asm("cmp r1, #0 "); // check if a reschedule is required
|
sl@0
|
1453 |
asm("beq no_resched_needed "); // branch out if not
|
sl@0
|
1454 |
SET_INTS_2(r3, MODE_SVC, INTS_ALL_ON); // enable interrupts
|
sl@0
|
1455 |
asm("mrs r2, spsr "); // r2=spsr_svc
|
sl@0
|
1456 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
1457 |
asm("stmfd sp!, {r2,r4-r11} "); // store registers and return address
|
sl@0
|
1458 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1459 |
VFP_FMRX(,FPEXC_REG,VFP_XREG_FPEXC); // r10/r11=FPEXC
|
sl@0
|
1460 |
#endif
|
sl@0
|
1461 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1462 |
GET_CAR(,r11); // r11=CAR
|
sl@0
|
1463 |
#endif
|
sl@0
|
1464 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
1465 |
GET_RWRW_TID(,r9); // r9=Thread ID
|
sl@0
|
1466 |
#endif
|
sl@0
|
1467 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
1468 |
asm("mrc p15, 0, r12, c3, c0, 0 "); // r12=DACR
|
sl@0
|
1469 |
#endif
|
sl@0
|
1470 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
1471 |
GET_THUMB2EE_HNDLR_BASE(,r8); // r8=Thumb-2EE Handler Base
|
sl@0
|
1472 |
#endif
|
sl@0
|
1473 |
asm("ldr lr, [r0, #4] "); // lr=present mask high
|
sl@0
|
1474 |
asm("sub sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // make room for extras, sp_usr and lr_usr
|
sl@0
|
1475 |
asm("str sp, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer
|
sl@0
|
1476 |
|
sl@0
|
1477 |
|
sl@0
|
1478 |
// Save the sp_usr and lr_usr and only the required coprocessor registers
|
sl@0
|
1479 |
// Thumb-2EE TID FPEXC CAR DACR
|
sl@0
|
1480 |
asm("stmia sp, {" EXTRA_STACK_LIST( 8, 9, FPEXC_REG, 11, 12) "r13-r14}^ ");
|
sl@0
|
1481 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1482 |
|
sl@0
|
1483 |
asm("ldr r1, [r0], #%a0" : : "i" _FOFF(TScheduler,iQueue)); // r1=present mask low, r0=&iQueue[0]
|
sl@0
|
1484 |
#ifdef __CPU_ARM_HAS_CLZ
|
sl@0
|
1485 |
CLZ(12,14); // r12=31-MSB(r14)
|
sl@0
|
1486 |
asm("subs r12, r12, #32 "); // r12=-1-MSB(r14), 0 if r14=0
|
sl@0
|
1487 |
CLZcc(CC_EQ,12,1); // if r14=0, r12=31-MSB(r1)
|
sl@0
|
1488 |
asm("rsb r12, r12, #31 "); // r12=highest ready thread priority
|
sl@0
|
1489 |
#else
|
sl@0
|
1490 |
asm("mov r12, #31 "); // find the highest priority ready thread
|
sl@0
|
1491 |
asm("cmp r14, #0 "); // high word nonzero?
|
sl@0
|
1492 |
asm("moveq r14, r1 "); // if zero, r14=low word
|
sl@0
|
1493 |
asm("movne r12, #63 "); // else start at pri 63
|
sl@0
|
1494 |
asm("cmp r14, #0x00010000 ");
|
sl@0
|
1495 |
asm("movlo r14, r14, lsl #16 ");
|
sl@0
|
1496 |
asm("sublo r12, r12, #16 ");
|
sl@0
|
1497 |
asm("cmp r14, #0x01000000 ");
|
sl@0
|
1498 |
asm("movlo r14, r14, lsl #8 ");
|
sl@0
|
1499 |
asm("sublo r12, r12, #8 ");
|
sl@0
|
1500 |
asm("cmp r14, #0x10000000 ");
|
sl@0
|
1501 |
asm("movlo r14, r14, lsl #4 ");
|
sl@0
|
1502 |
asm("sublo r12, r12, #4 ");
|
sl@0
|
1503 |
asm("cmp r14, #0x40000000 ");
|
sl@0
|
1504 |
asm("movlo r14, r14, lsl #2 ");
|
sl@0
|
1505 |
asm("sublo r12, r12, #2 ");
|
sl@0
|
1506 |
asm("cmp r14, #0x80000000 ");
|
sl@0
|
1507 |
asm("sublo r12, r12, #1 "); // r12 now equals highest ready priority
|
sl@0
|
1508 |
#endif
|
sl@0
|
1509 |
asm("ldr r2, [r0, r12, lsl #2] "); // r2=pointer to highest priority thread's link field
|
sl@0
|
1510 |
asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
|
sl@0
|
1511 |
asm("mov r4, #0 ");
|
sl@0
|
1512 |
asm("ldmia r2, {r3,r5-r9,lr} "); // r3=next r5=prev r6=attributes, r7=heldFM, r8=waitFM, r9=address space
|
sl@0
|
1513 |
// lr=time
|
sl@0
|
1514 |
asm("add r10, r0, #%a0" : : "i" _FOFF(TScheduler,iLock));
|
sl@0
|
1515 |
asm("strb r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // clear flag
|
sl@0
|
1516 |
ASM_DEBUG1(InitSelection,r2);
|
sl@0
|
1517 |
asm("cmp lr, #0 "); // check if timeslice expired
|
sl@0
|
1518 |
asm("bne no_other "); // skip if not
|
sl@0
|
1519 |
asm("cmp r3, r2 "); // check for thread at same priority
|
sl@0
|
1520 |
asm("bne round_robin "); // branch if there is one
|
sl@0
|
1521 |
asm("no_other: ");
|
sl@0
|
1522 |
asm("cmp r7, #0 "); // does this thread hold a fast mutex?
|
sl@0
|
1523 |
asm("bne holds_fast_mutex "); // branch if it does
|
sl@0
|
1524 |
asm("cmp r8, #0 "); // is thread blocked on a fast mutex?
|
sl@0
|
1525 |
asm("bne resched_blocked "); // branch out if it is
|
sl@0
|
1526 |
|
sl@0
|
1527 |
asm("resched_not_blocked: ");
|
sl@0
|
1528 |
asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttImplicitSystemLock<<16)); // implicit system lock required?
|
sl@0
|
1529 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1530 |
asm("beq resched_end "); // no, switch to this thread
|
sl@0
|
1531 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread
|
sl@0
|
1532 |
asm("cmp r1, #0 "); // lock held?
|
sl@0
|
1533 |
asm("beq resched_end "); // no, switch to this thread
|
sl@0
|
1534 |
asm("b resched_imp_sys_held ");
|
sl@0
|
1535 |
#else
|
sl@0
|
1536 |
asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // yes, look at system lock holding thread
|
sl@0
|
1537 |
asm("beq resched_end "); // no, switch to this thread
|
sl@0
|
1538 |
asm("cmp r1, #0 "); // lock held?
|
sl@0
|
1539 |
asm("ldreq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // no, get current address space ptr
|
sl@0
|
1540 |
asm("bne resched_imp_sys_held ");
|
sl@0
|
1541 |
asm("tst r6, #%a0" : : "i" ((TInt)KThreadAttAddressSpace<<16)); // does thread require address space switch?
|
sl@0
|
1542 |
asm("cmpne r9, r5 "); // change of address space required?
|
sl@0
|
1543 |
asm("beq resched_end "); // branch if not
|
sl@0
|
1544 |
|
sl@0
|
1545 |
ASM_DEBUG1(Resched,r2) // r2->new thread
|
sl@0
|
1546 |
UPDATE_THREAD_CPU_TIME;
|
sl@0
|
1547 |
EMI_EVENTLOGGER;
|
sl@0
|
1548 |
EMI_CHECKDFCTAG(1)
|
sl@0
|
1549 |
|
sl@0
|
1550 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
1551 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
|
sl@0
|
1552 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer
|
sl@0
|
1553 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2
|
sl@0
|
1554 |
asm("cmp r1, #0");
|
sl@0
|
1555 |
asm("blne context_switch_trace");
|
sl@0
|
1556 |
#else
|
sl@0
|
1557 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer
|
sl@0
|
1558 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2
|
sl@0
|
1559 |
#endif
|
sl@0
|
1560 |
|
sl@0
|
1561 |
#ifdef __CPU_HAS_ETM_PROCID_REG
|
sl@0
|
1562 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread
|
sl@0
|
1563 |
#endif
|
sl@0
|
1564 |
SET_INTS_1(r12, MODE_SVC, INTS_ALL_OFF);
|
sl@0
|
1565 |
#if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG)
|
sl@0
|
1566 |
asm("mov r1, sp ");
|
sl@0
|
1567 |
asm("ldmia r1, {r13,r14}^ "); // restore sp_usr and lr_usr
|
sl@0
|
1568 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1569 |
#else
|
sl@0
|
1570 |
// Load the sp_usr and lr_usr and only the required coprocessor registers
|
sl@0
|
1571 |
// Thumb-2EE TID FPEXC CAR DACR
|
sl@0
|
1572 |
asm("ldmia sp, {" EXTRA_STACK_LIST( 3, 4, 5, 6, 11) "r13-r14}^ ");
|
sl@0
|
1573 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1574 |
#endif
|
sl@0
|
1575 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=new thread
|
sl@0
|
1576 |
asm("str r10, [r2, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock
|
sl@0
|
1577 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1578 |
asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1579 |
asm("cmp lr, #0");
|
sl@0
|
1580 |
asm("blne reschedule_syslock_wait_trace");
|
sl@0
|
1581 |
#endif
|
sl@0
|
1582 |
|
sl@0
|
1583 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
1584 |
SET_THUMB2EE_HNDLR_BASE(,r3);
|
sl@0
|
1585 |
#endif
|
sl@0
|
1586 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
1587 |
SET_RWRW_TID(,r4);
|
sl@0
|
1588 |
#endif
|
sl@0
|
1589 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1590 |
SET_CAR(,r6)
|
sl@0
|
1591 |
#endif
|
sl@0
|
1592 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
1593 |
asm("mcr p15, 0, r11, c3, c0, 0 ");
|
sl@0
|
1594 |
#endif
|
sl@0
|
1595 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1596 |
VFP_FMXR(,VFP_XREG_FPEXC,5); // restore FPEXC from R5
|
sl@0
|
1597 |
#endif
|
sl@0
|
1598 |
asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr
|
sl@0
|
1599 |
|
sl@0
|
1600 |
// Do process switching
|
sl@0
|
1601 |
// Handler called with:
|
sl@0
|
1602 |
// r0->scheduler, r2->current thread
|
sl@0
|
1603 |
// r9->new address space, r10->system lock
|
sl@0
|
1604 |
// Must preserve r0,r2, can modify other registers
|
sl@0
|
1605 |
CPWAIT(,r1);
|
sl@0
|
1606 |
SET_INTS_2(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts
|
sl@0
|
1607 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
1608 |
asm("mov r3, r2 ");
|
sl@0
|
1609 |
asm("cmp r1, #0 ");
|
sl@0
|
1610 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // unlock the kernel
|
sl@0
|
1611 |
asm("blne " CSM_ZN10TScheduler10RescheduleEv);
|
sl@0
|
1612 |
SET_INTS(r12, MODE_SVC, INTS_ALL_ON); // kernel is now unlocked, interrupts enabled, system lock held
|
sl@0
|
1613 |
asm("mov r2, r3 ");
|
sl@0
|
1614 |
asm("mov lr, pc ");
|
sl@0
|
1615 |
asm("ldr pc, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // do process switch
|
sl@0
|
1616 |
|
sl@0
|
1617 |
asm("mov r1, #1 ");
|
sl@0
|
1618 |
asm("mov r4, #0 ");
|
sl@0
|
1619 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
|
sl@0
|
1620 |
asm("mov r3, r2 "); // r3->new thread
|
sl@0
|
1621 |
asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // check system lock wait flag
|
sl@0
|
1622 |
asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // release system lock
|
sl@0
|
1623 |
asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
|
sl@0
|
1624 |
asm("str r4, [r3, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
|
sl@0
|
1625 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1626 |
asm("ldrb lr, [r0,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
1627 |
asm("cmp lr, #0");
|
sl@0
|
1628 |
asm("blne reschedule_syslock_signal_trace");
|
sl@0
|
1629 |
#endif
|
sl@0
|
1630 |
asm("cmp r2, #0 ");
|
sl@0
|
1631 |
asm("beq switch_threads_2 "); // no contention on system lock
|
sl@0
|
1632 |
asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
|
sl@0
|
1633 |
asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iCsCount));
|
sl@0
|
1634 |
asm("strb r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // contention - need to reschedule again
|
sl@0
|
1635 |
asm("cmp r2, #0 "); // outstanding CS function?
|
sl@0
|
1636 |
asm("beq switch_threads_2 "); // branch if not
|
sl@0
|
1637 |
asm("cmp r12, #0 "); // iCsCount!=0 ?
|
sl@0
|
1638 |
asm("bne switch_threads_2 "); // branch if it is
|
sl@0
|
1639 |
asm("ldr r1, [sp, #0] "); // r1=spsr_svc for this thread
|
sl@0
|
1640 |
asm("mov r4, r0 ");
|
sl@0
|
1641 |
asm("mov r5, r3 ");
|
sl@0
|
1642 |
asm("msr spsr, r1 "); // restore spsr_svc
|
sl@0
|
1643 |
asm("mov r0, r3 "); // if iCsCount=0, DoCsFunction()
|
sl@0
|
1644 |
asm("bl DoDoCsFunction ");
|
sl@0
|
1645 |
asm("mov r0, r4 ");
|
sl@0
|
1646 |
asm("mov r3, r5 ");
|
sl@0
|
1647 |
asm("b switch_threads_2 ");
|
sl@0
|
1648 |
#endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__
|
sl@0
|
1649 |
|
sl@0
|
1650 |
asm("round_robin: "); // get here if thread's timeslice has expired and there is another
|
sl@0
|
1651 |
// thread ready at the same priority
|
sl@0
|
1652 |
asm("cmp r7, #0 "); // does this thread hold a fast mutex?
|
sl@0
|
1653 |
asm("bne rr_holds_fast_mutex ");
|
sl@0
|
1654 |
asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTimeslice));
|
sl@0
|
1655 |
asm("add r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
|
sl@0
|
1656 |
asm("str r3, [r0, r12, lsl #2] "); // first thread at this priority is now the next one
|
sl@0
|
1657 |
asm("str lr, [r2, #%a0]" : : "i" _FOFF(NThread,iTime)); // fresh timeslice
|
sl@0
|
1658 |
ASM_DEBUG1(RR,r3);
|
sl@0
|
1659 |
asm("add r3, r3, #%a0" : : "i" _FOFF(NThread,iPriority));
|
sl@0
|
1660 |
asm("ldmia r3, {r6-r9} "); // r6=attributes, r7=heldFM, r8=waitFM, r9=address space
|
sl@0
|
1661 |
asm("sub r2, r3, #%a0" : : "i" _FOFF(NThread,iPriority)); // move to next thread at this priority
|
sl@0
|
1662 |
asm("sub r0, r0, #%a0" : : "i" _FOFF(TScheduler,iQueue));
|
sl@0
|
1663 |
asm("b no_other ");
|
sl@0
|
1664 |
|
sl@0
|
1665 |
asm("resched_blocked: "); // get here if thread is blocked on a fast mutex
|
sl@0
|
1666 |
ASM_DEBUG1(BlockedFM,r8)
|
sl@0
|
1667 |
asm("ldr r3, [r8, #%a0]" : : "i" _FOFF(NFastMutex,iHoldingThread)); // if so, get holding thread
|
sl@0
|
1668 |
asm("cmp r3, #0 "); // mutex now free?
|
sl@0
|
1669 |
asm("beq resched_not_blocked ");
|
sl@0
|
1670 |
asm("mov r2, r3 "); // no, switch to holding thread
|
sl@0
|
1671 |
asm("b resched_end ");
|
sl@0
|
1672 |
|
sl@0
|
1673 |
asm("holds_fast_mutex: ");
|
sl@0
|
1674 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1675 |
asm("cmp r7, r10 "); // does this thread hold system lock?
|
sl@0
|
1676 |
asm("tstne r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // if not, is implicit system lock required?
|
sl@0
|
1677 |
asm("beq resched_end "); // if neither, switch to this thread
|
sl@0
|
1678 |
asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // check if system lock held
|
sl@0
|
1679 |
asm("cmp r5, #0 ");
|
sl@0
|
1680 |
asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread
|
sl@0
|
1681 |
asm("b resched_end "); // else switch to thread and finish
|
sl@0
|
1682 |
#else
|
sl@0
|
1683 |
asm("cmp r7, r10 "); // does this thread hold system lock?
|
sl@0
|
1684 |
asm("beq resched_end "); // if so, switch to it
|
sl@0
|
1685 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttImplicitSystemLock)<<16)); // implicit system lock required?
|
sl@0
|
1686 |
asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // if so, check if system lock held
|
sl@0
|
1687 |
asm("beq resched_end "); // if lock not required, switch to thread and finish
|
sl@0
|
1688 |
asm("cmp r5, #0 ");
|
sl@0
|
1689 |
asm("bne rr_holds_fast_mutex "); // if implicit system lock contention, set waiting flag on held mutex but still schedule thread
|
sl@0
|
1690 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required?
|
sl@0
|
1691 |
asm("ldrne r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // if so, get current address space ptr
|
sl@0
|
1692 |
asm("beq resched_end "); // if not, switch to thread and finish
|
sl@0
|
1693 |
asm("cmp r5, r9 "); // do we have correct address space?
|
sl@0
|
1694 |
asm("beq resched_end "); // yes, switch to thread and finish
|
sl@0
|
1695 |
asm("b rr_holds_fast_mutex "); // no, set waiting flag on fast mutex
|
sl@0
|
1696 |
#endif // __MEMMODEL_MULTIPLE__ || __MEMMODEL_FLEXIBLE__
|
sl@0
|
1697 |
|
sl@0
|
1698 |
asm("resched_imp_sys_held: "); // get here if thread requires implicit system lock and lock is held
|
sl@0
|
1699 |
ASM_DEBUG1(ImpSysHeld,r1)
|
sl@0
|
1700 |
asm("mov r2, r1 "); // switch to holding thread
|
sl@0
|
1701 |
asm("add r7, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // set waiting flag on system lock
|
sl@0
|
1702 |
|
sl@0
|
1703 |
asm("rr_holds_fast_mutex: "); // get here if round-robin deferred due to fast mutex held
|
sl@0
|
1704 |
asm("mov r6, #1 ");
|
sl@0
|
1705 |
asm("str r6, [r7, #%a0]" : : "i" _FOFF(NFastMutex,iWaiting)); // if so, set waiting flag
|
sl@0
|
1706 |
|
sl@0
|
1707 |
asm("resched_end: ");
|
sl@0
|
1708 |
ASM_DEBUG1(Resched,r2)
|
sl@0
|
1709 |
|
sl@0
|
1710 |
asm("switch_threads: ");
|
sl@0
|
1711 |
UPDATE_THREAD_CPU_TIME;
|
sl@0
|
1712 |
EMI_EVENTLOGGER;
|
sl@0
|
1713 |
EMI_CHECKDFCTAG(2)
|
sl@0
|
1714 |
|
sl@0
|
1715 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
1716 |
asm("ldrb r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
|
sl@0
|
1717 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer
|
sl@0
|
1718 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2
|
sl@0
|
1719 |
asm("cmp r1, #0");
|
sl@0
|
1720 |
asm("blne context_switch_trace");
|
sl@0
|
1721 |
#else
|
sl@0
|
1722 |
asm("ldr sp, [r2, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // restore new thread's stack pointer
|
sl@0
|
1723 |
asm("str r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // iCurrentThread=r2
|
sl@0
|
1724 |
#endif
|
sl@0
|
1725 |
|
sl@0
|
1726 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1727 |
asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThread,iPriority)); // attributes into r6
|
sl@0
|
1728 |
asm("ldr r9, [r2, #%a0]" : : "i" _FOFF(NThread,iAddressSpace)); // address space into r9
|
sl@0
|
1729 |
#else
|
sl@0
|
1730 |
#ifdef __CPU_HAS_ETM_PROCID_REG
|
sl@0
|
1731 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // notify ETM of new thread
|
sl@0
|
1732 |
#endif
|
sl@0
|
1733 |
#endif
|
sl@0
|
1734 |
#if EXTRA_STACK_SPACE==0 && defined(__CPU_ARM9_USER_LDM_BUG)
|
sl@0
|
1735 |
asm("mov r3, sp ");
|
sl@0
|
1736 |
asm("ldmia r3, {r13,r14}^ "); // restore sp_usr and lr_usr
|
sl@0
|
1737 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1738 |
#else
|
sl@0
|
1739 |
// Load the sp_usr and lr_usr and only the required coprocessor registers
|
sl@0
|
1740 |
// Thumb-2EE TID FPEXC CAR DACR
|
sl@0
|
1741 |
asm("ldmia sp, {" EXTRA_STACK_LIST( 1, 3, FPEXC_REG3, 10, 11) "r13-r14}^ ");
|
sl@0
|
1742 |
// NOTE: Prior to ARMv6 can't have banked register access immediately after LDM/STM user registers
|
sl@0
|
1743 |
#endif
|
sl@0
|
1744 |
#ifdef __CPU_SUPPORT_THUMB2EE
|
sl@0
|
1745 |
SET_THUMB2EE_HNDLR_BASE(,r1);
|
sl@0
|
1746 |
#endif
|
sl@0
|
1747 |
#ifdef __CPU_HAS_CP15_THREAD_ID_REG
|
sl@0
|
1748 |
SET_RWRW_TID(,r3) // restore Thread ID from r3
|
sl@0
|
1749 |
#endif
|
sl@0
|
1750 |
asm("mov r3, r2 "); // r3=TheCurrentThread
|
sl@0
|
1751 |
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
|
sl@0
|
1752 |
SET_CAR(,r10)
|
sl@0
|
1753 |
#endif
|
sl@0
|
1754 |
#ifdef __CPU_ARM_USE_DOMAINS
|
sl@0
|
1755 |
asm("mcr p15, 0, r11, c3, c0, 0 ");
|
sl@0
|
1756 |
#endif
|
sl@0
|
1757 |
#ifdef __CPU_HAS_VFP
|
sl@0
|
1758 |
VFP_FMXR(,VFP_XREG_FPEXC,FPEXC_REG3); // restore FPEXC from R4 or R10
|
sl@0
|
1759 |
#endif
|
sl@0
|
1760 |
asm("add sp, sp, #%a0" : : "i" (8+EXTRA_STACK_SPACE)); // step past sp_usr and lr_usr
|
sl@0
|
1761 |
#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1762 |
// r2=r3=current thread here
|
sl@0
|
1763 |
asm("tst r6, #%a0" : : "i" (((TInt)KThreadAttAddressSpace)<<16)); // address space required?
|
sl@0
|
1764 |
asm("ldrne r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // if so, get pointer to process handler
|
sl@0
|
1765 |
asm("mov r2, r2, lsr #6 "); // r2=current thread>>6
|
sl@0
|
1766 |
asm("beq switch_threads_3 "); // skip if address space change not required
|
sl@0
|
1767 |
|
sl@0
|
1768 |
// Do address space switching
|
sl@0
|
1769 |
// Handler called with:
|
sl@0
|
1770 |
// r0->scheduler, r3->current thread
|
sl@0
|
1771 |
// r9->new address space, r5->old address space
|
sl@0
|
1772 |
// Return with r2 = (r2<<8) | ASID
|
sl@0
|
1773 |
// Must preserve r0,r3, can modify other registers
|
sl@0
|
1774 |
asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // get current address space ptr
|
sl@0
|
1775 |
#ifdef __MEMMODEL_FLEXIBLE__
|
sl@0
|
1776 |
asm("adr lr, switch_threads_5 ");
|
sl@0
|
1777 |
#else
|
sl@0
|
1778 |
asm("adr lr, switch_threads_4 ");
|
sl@0
|
1779 |
#endif
|
sl@0
|
1780 |
__JUMP(,r1);
|
sl@0
|
1781 |
|
sl@0
|
1782 |
asm("switch_threads_3: ");
|
sl@0
|
1783 |
asm("mrc p15, 0, r4, c13, c0, 1 "); // r4 = CONTEXTID (threadID:ASID)
|
sl@0
|
1784 |
asm("and r4, r4, #0xff "); // isolate ASID
|
sl@0
|
1785 |
asm("orr r2, r4, r2, lsl #8 "); // r2 = new thread ID : ASID
|
sl@0
|
1786 |
__DATA_SYNC_BARRIER_Z__(r12); // needed before change to ContextID
|
sl@0
|
1787 |
|
sl@0
|
1788 |
asm("switch_threads_4: ");
|
sl@0
|
1789 |
#if (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)) && !defined(__CPU_ARM1136_ERRATUM_408022_FIXED)
|
sl@0
|
1790 |
asm("nop");
|
sl@0
|
1791 |
#endif
|
sl@0
|
1792 |
asm("mcr p15, 0, r2, c13, c0, 1 "); // set ContextID (ASID + debugging thread ID)
|
sl@0
|
1793 |
__INST_SYNC_BARRIER_Z__(r12);
|
sl@0
|
1794 |
#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
|
sl@0
|
1795 |
asm("mcr p15, 0, r12, c7, c5, 6 "); // flush BTAC
|
sl@0
|
1796 |
#endif
|
sl@0
|
1797 |
|
sl@0
|
1798 |
// asm("switch_threads_3: "); // TEMPORARY UNTIL CONTEXTID BECOMES READABLE
|
sl@0
|
1799 |
asm("switch_threads_5: ");
|
sl@0
|
1800 |
#if defined(__CPU_ARM1136__) && defined(__CPU_HAS_VFP) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
|
sl@0
|
1801 |
VFP_FMRX(,14,VFP_XREG_FPEXC);
|
sl@0
|
1802 |
asm("mrc p15, 0, r4, c1, c0, 1 ");
|
sl@0
|
1803 |
asm("tst r14, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
|
sl@0
|
1804 |
asm("bic r4, r4, #2 "); // clear DB bit (disable dynamic prediction)
|
sl@0
|
1805 |
asm("and r12, r4, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled)
|
sl@0
|
1806 |
asm("orreq r4, r4, r12, lsl #1 "); // if VFP is being disabled set DB = RS
|
sl@0
|
1807 |
asm("mcr p15, 0, r4, c1, c0, 1 ");
|
sl@0
|
1808 |
#endif
|
sl@0
|
1809 |
#endif
|
sl@0
|
1810 |
CPWAIT(,r12);
|
sl@0
|
1811 |
|
sl@0
|
1812 |
asm("switch_threads_2: ");
|
sl@0
|
1813 |
asm("resched_trampoline_hook_address: ");
|
sl@0
|
1814 |
asm("ldmia sp!, {r2,r4-r11,lr} "); // r2=spsr_svc, restore r4-r11 and return address
|
sl@0
|
1815 |
asm("resched_trampoline_return: ");
|
sl@0
|
1816 |
|
sl@0
|
1817 |
SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts
|
sl@0
|
1818 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
|
sl@0
|
1819 |
asm("msr spsr, r2 "); // restore spsr_svc
|
sl@0
|
1820 |
asm("cmp r1, #0 "); // check for another reschedule
|
sl@0
|
1821 |
asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // if not needed unlock the kernel
|
sl@0
|
1822 |
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
|
sl@0
|
1823 |
asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround
|
sl@0
|
1824 |
// Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
|
sl@0
|
1825 |
#endif
|
sl@0
|
1826 |
__JUMP(eq,lr); // and return in context of new thread, with r2 non zero
|
sl@0
|
1827 |
asm("str lr, [sp, #-4]! ");
|
sl@0
|
1828 |
asm("b start_resched "); // if necessary, go back to beginning
|
sl@0
|
1829 |
|
sl@0
|
1830 |
asm("no_resched_needed: ");
|
sl@0
|
1831 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else unlock the kernel
|
sl@0
|
1832 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=iCurrentThread
|
sl@0
|
1833 |
asm("ldr pc, [sp], #4 "); // and exit immediately with r2=0 iff no reschedule occurred
|
sl@0
|
1834 |
|
sl@0
|
1835 |
asm("__TheScheduler: ");
|
sl@0
|
1836 |
asm(".word TheScheduler ");
|
sl@0
|
1837 |
asm("__SystemLock: ");
|
sl@0
|
1838 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
|
sl@0
|
1839 |
#ifdef BTRACE_CPU_USAGE
|
sl@0
|
1840 |
asm("context_switch_trace_header:");
|
sl@0
|
1841 |
asm(".word %a0" : : "i" ((TInt)(8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
1842 |
|
sl@0
|
1843 |
asm("context_switch_trace:");
|
sl@0
|
1844 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
1845 |
asm("stmdb sp!, {r0,r2,lr}");
|
sl@0
|
1846 |
asm("ldr r0, context_switch_trace_header" );
|
sl@0
|
1847 |
asm("mov lr, pc");
|
sl@0
|
1848 |
__JUMP(,r1);
|
sl@0
|
1849 |
asm("ldmia sp!, {r0,r2,pc}");
|
sl@0
|
1850 |
#endif
|
sl@0
|
1851 |
|
sl@0
|
1852 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
1853 |
asm("resched_trampoline: ");
|
sl@0
|
1854 |
asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
|
sl@0
|
1855 |
asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
1856 |
asm("mov r11, sp "); // save stack pointer
|
sl@0
|
1857 |
asm("bic sp, sp, #4 "); // align stack to 8 byte boundary
|
sl@0
|
1858 |
asm("tst r1, r1");
|
sl@0
|
1859 |
asm("movne lr, pc");
|
sl@0
|
1860 |
__JUMP(ne,r1);
|
sl@0
|
1861 |
asm("ldr r0, __TheScheduler "); // r0 points to scheduler data
|
sl@0
|
1862 |
asm("mov sp, r11 "); // restore stack pointer
|
sl@0
|
1863 |
asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r3=iCurrentThread
|
sl@0
|
1864 |
asm("resched_trampoline_unhook_data: ");
|
sl@0
|
1865 |
asm("ldmia sp!, {r2,r4-r11,lr} "); // r2=spsr_svc, restore r4-r11 and return address
|
sl@0
|
1866 |
asm("b resched_trampoline_return");
|
sl@0
|
1867 |
#endif
|
sl@0
|
1868 |
|
sl@0
|
1869 |
#ifdef __EMI_SUPPORT__
|
sl@0
|
1870 |
// EMI Task Event Logger
|
sl@0
|
1871 |
asm("AddTaskSwitchEvent: ");
|
sl@0
|
1872 |
#ifndef MONITOR_THREAD_CPU_TIME
|
sl@0
|
1873 |
// if we dont have it, get CurrentThread
|
sl@0
|
1874 |
asm("ldr r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
1875 |
#endif
|
sl@0
|
1876 |
|
sl@0
|
1877 |
// Check new thread for if loggable
|
sl@0
|
1878 |
asm("ldrb r3, [r2,#%a0]" : : "i" _FOFF(NThread, i_ThrdAttr));
|
sl@0
|
1879 |
asm("ldr r4, [r6,#%a0]" : : "i" _FOFF(NThread, iPriority)); // Load Spares. b2=state,b3=attrbutes
|
sl@0
|
1880 |
|
sl@0
|
1881 |
asm("tst r3, #%a0" : : "i" ((TInt) KThreadAttLoggable));
|
sl@0
|
1882 |
asm("ldreq r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iSigma));
|
sl@0
|
1883 |
asm("movne r7,r2");
|
sl@0
|
1884 |
|
sl@0
|
1885 |
// Check old thread for if loggable
|
sl@0
|
1886 |
asm("tst r4, #%a0" : : "i" (KThreadAttLoggable << 16));
|
sl@0
|
1887 |
asm("ldreq r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iSigma));
|
sl@0
|
1888 |
|
sl@0
|
1889 |
// Abort log entry if duplicate
|
sl@0
|
1890 |
asm("cmp r6,r7");
|
sl@0
|
1891 |
__JUMP(eq,lr);
|
sl@0
|
1892 |
|
sl@0
|
1893 |
// create record: r3=iType/iFlags/iExtra, r4=iUserState
|
sl@0
|
1894 |
// r5=iTime, r6=iPrevious, r7=iNext
|
sl@0
|
1895 |
// waiting = (2nd byte of r4)!=NThread::EReady (=0)
|
sl@0
|
1896 |
#ifndef MONITOR_THREAD_CPU_TIME
|
sl@0
|
1897 |
GET_HIGH_RES_TICK_COUNT(r5);
|
sl@0
|
1898 |
#endif
|
sl@0
|
1899 |
|
sl@0
|
1900 |
asm("tst r4, #0xff00");
|
sl@0
|
1901 |
asm("ldr r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferHead));
|
sl@0
|
1902 |
asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iEmiState));
|
sl@0
|
1903 |
asm("moveq r3, #0x200"); // #2 = waiting flag.
|
sl@0
|
1904 |
asm("movne r3, #0x0");
|
sl@0
|
1905 |
|
sl@0
|
1906 |
//Store record, move onto next
|
sl@0
|
1907 |
asm("stmia r8!,{r3-r7}");
|
sl@0
|
1908 |
|
sl@0
|
1909 |
// Check for and apply buffer wrap
|
sl@0
|
1910 |
asm("ldr r7,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferEnd)); // r7 = BufferEnd
|
sl@0
|
1911 |
asm("ldr r6,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferTail)); // r6 = BufferTail
|
sl@0
|
1912 |
asm("cmp r7,r8");
|
sl@0
|
1913 |
asm("ldrlo r8,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferStart));
|
sl@0
|
1914 |
|
sl@0
|
1915 |
// Check for event lost
|
sl@0
|
1916 |
asm("cmp r6,r8");
|
sl@0
|
1917 |
asm("str r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferHead)); // r8 = BufferHead
|
sl@0
|
1918 |
__JUMP(ne,lr);
|
sl@0
|
1919 |
|
sl@0
|
1920 |
// overflow, move on read pointer - event lost!
|
sl@0
|
1921 |
asm("add r6,r6,#%a0" : : "i" ((TInt) sizeof(TTaskEventRecord))); // iBufferTail++
|
sl@0
|
1922 |
asm("cmp r7,r6"); // iBufferTail > iBufferEnd ?
|
sl@0
|
1923 |
asm("ldrlo r6,[r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferStart));
|
sl@0
|
1924 |
|
sl@0
|
1925 |
asm("ldrb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags));
|
sl@0
|
1926 |
asm("orr r5, r5, #%a0" : : "i" ((TInt) KTskEvtFlag_EventLost));
|
sl@0
|
1927 |
asm("strb r5, [r6, #%a0]" : : "i" _FOFF(TTaskEventRecord,iFlags));
|
sl@0
|
1928 |
|
sl@0
|
1929 |
asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBufferTail));
|
sl@0
|
1930 |
|
sl@0
|
1931 |
__JUMP(,lr);
|
sl@0
|
1932 |
|
sl@0
|
1933 |
#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_FLEXIBLE__)
|
sl@0
|
1934 |
EMI_ADDDFC(1)
|
sl@0
|
1935 |
#endif
|
sl@0
|
1936 |
EMI_ADDDFC(2)
|
sl@0
|
1937 |
#endif
|
sl@0
|
1938 |
|
sl@0
|
1939 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
1940 |
asm("reschedule_syslock_wait_trace:");
|
sl@0
|
1941 |
// r0=scheduler r2=thread
|
sl@0
|
1942 |
asm("stmdb sp!, {r3,r12}");
|
sl@0
|
1943 |
ALIGN_STACK_START;
|
sl@0
|
1944 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1945 |
asm("bl syslock_wait_trace");
|
sl@0
|
1946 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1947 |
ALIGN_STACK_END;
|
sl@0
|
1948 |
asm("ldmia sp!, {r3,r12}");
|
sl@0
|
1949 |
__JUMP(,lr);
|
sl@0
|
1950 |
|
sl@0
|
1951 |
asm("reschedule_syslock_signal_trace:");
|
sl@0
|
1952 |
// r0=scheduler r3=thread
|
sl@0
|
1953 |
asm("stmdb sp!, {r3,r12}");
|
sl@0
|
1954 |
ALIGN_STACK_START;
|
sl@0
|
1955 |
asm("stmdb sp!, {r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
1956 |
asm("bl syslock_signal_trace");
|
sl@0
|
1957 |
asm("ldmia sp!, {r0-r2,lr}");
|
sl@0
|
1958 |
ALIGN_STACK_END;
|
sl@0
|
1959 |
asm("ldmia sp!, {r3,r12}");
|
sl@0
|
1960 |
__JUMP(,lr);
|
sl@0
|
1961 |
#endif
|
sl@0
|
1962 |
};
|
sl@0
|
1963 |
|
sl@0
|
1964 |
|
sl@0
|
1965 |
/**
|
sl@0
|
1966 |
* Returns the range of linear memory which inserting the scheduler hooks needs to modify.
|
sl@0
|
1967 |
*
|
sl@0
|
1968 |
* @param aStart Set to the lowest memory address which needs to be modified.
|
sl@0
|
1969 |
* @param aEnd Set to the highest memory address +1 which needs to be modified.
|
sl@0
|
1970 |
|
sl@0
|
1971 |
@pre Kernel must be locked.
|
sl@0
|
1972 |
@pre Call in a thread context.
|
sl@0
|
1973 |
@pre Interrupts must be enabled.
|
sl@0
|
1974 |
*/
|
sl@0
|
1975 |
EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
|
sl@0
|
1976 |
{
|
sl@0
|
1977 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
1978 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
1979 |
asm("adr r2,resched_trampoline_hook_address");
|
sl@0
|
1980 |
asm("str r2,[r0]");
|
sl@0
|
1981 |
asm("adr r2,resched_trampoline_hook_address+4");
|
sl@0
|
1982 |
asm("str r2,[r1]");
|
sl@0
|
1983 |
#else
|
sl@0
|
1984 |
asm("mov r2,#0");
|
sl@0
|
1985 |
asm("str r2,[r0]");
|
sl@0
|
1986 |
asm("str r2,[r1]");
|
sl@0
|
1987 |
#endif
|
sl@0
|
1988 |
__JUMP(,lr);
|
sl@0
|
1989 |
};
|
sl@0
|
1990 |
|
sl@0
|
1991 |
|
sl@0
|
1992 |
/**
|
sl@0
|
1993 |
* Modifies the scheduler code so that it can call the function set by
|
sl@0
|
1994 |
* NKern::SetRescheduleCallback().
|
sl@0
|
1995 |
*
|
sl@0
|
1996 |
* This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
|
sl@0
|
1997 |
|
sl@0
|
1998 |
@pre Kernel must be locked.
|
sl@0
|
1999 |
@pre Call in a thread context.
|
sl@0
|
2000 |
@pre Interrupts must be enabled.
|
sl@0
|
2001 |
*/
|
sl@0
|
2002 |
EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
|
sl@0
|
2003 |
{
|
sl@0
|
2004 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
2005 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
2006 |
asm("adr r0,resched_trampoline_hook_address");
|
sl@0
|
2007 |
asm("adr r1,resched_trampoline");
|
sl@0
|
2008 |
asm("sub r1, r1, r0");
|
sl@0
|
2009 |
asm("sub r1, r1, #8");
|
sl@0
|
2010 |
asm("mov r1, r1, asr #2");
|
sl@0
|
2011 |
asm("add r1, r1, #0xea000000"); // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
|
sl@0
|
2012 |
|
sl@0
|
2013 |
#if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
|
sl@0
|
2014 |
// These platforms have shadow memory in non-writable page. We cannot use the standard
|
sl@0
|
2015 |
// Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
|
sl@0
|
2016 |
// Instead, we'll temporarily disable access permission checking in MMU by switching
|
sl@0
|
2017 |
// domain#0 into Manager Mode (see Domain Access Control Register).
|
sl@0
|
2018 |
asm("mrs r12, CPSR "); // save cpsr setting and ...
|
sl@0
|
2019 |
CPSIDAIF; // ...disable interrupts
|
sl@0
|
2020 |
asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
|
sl@0
|
2021 |
asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
|
sl@0
|
2022 |
asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
|
sl@0
|
2023 |
asm("str r1,[r0]");
|
sl@0
|
2024 |
asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
|
sl@0
|
2025 |
asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
|
sl@0
|
2026 |
#else
|
sl@0
|
2027 |
asm("str r1,[r0]");
|
sl@0
|
2028 |
#endif
|
sl@0
|
2029 |
|
sl@0
|
2030 |
#endif
|
sl@0
|
2031 |
__JUMP(,lr);
|
sl@0
|
2032 |
};
|
sl@0
|
2033 |
|
sl@0
|
2034 |
|
sl@0
|
2035 |
/**
|
sl@0
|
2036 |
* Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
|
sl@0
|
2037 |
*
|
sl@0
|
2038 |
* This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
|
sl@0
|
2039 |
|
sl@0
|
2040 |
@pre Kernel must be locked.
|
sl@0
|
2041 |
@pre Call in a thread context.
|
sl@0
|
2042 |
@pre Interrupts must be enabled.
|
sl@0
|
2043 |
*/
|
sl@0
|
2044 |
EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
|
sl@0
|
2045 |
{
|
sl@0
|
2046 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
2047 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
2048 |
asm("adr r0,resched_trampoline_hook_address");
|
sl@0
|
2049 |
asm("ldr r1,resched_trampoline_unhook_data");
|
sl@0
|
2050 |
|
sl@0
|
2051 |
#if defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
|
sl@0
|
2052 |
// See comments above in InsertSchedulerHooks
|
sl@0
|
2053 |
asm("mrs r12, CPSR "); // save cpsr setting and ...
|
sl@0
|
2054 |
CPSIDAIF; // ...disable interrupts
|
sl@0
|
2055 |
asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
|
sl@0
|
2056 |
asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
|
sl@0
|
2057 |
asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
|
sl@0
|
2058 |
asm("str r1,[r0]");
|
sl@0
|
2059 |
asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
|
sl@0
|
2060 |
asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
|
sl@0
|
2061 |
#else
|
sl@0
|
2062 |
asm("str r1,[r0]");
|
sl@0
|
2063 |
#endif
|
sl@0
|
2064 |
|
sl@0
|
2065 |
#endif
|
sl@0
|
2066 |
__JUMP(,lr);
|
sl@0
|
2067 |
};
|
sl@0
|
2068 |
|
sl@0
|
2069 |
|
sl@0
|
2070 |
/**
|
sl@0
|
2071 |
* Set the function which is to be called on every thread reschedule.
|
sl@0
|
2072 |
*
|
sl@0
|
2073 |
* @param aCallback Pointer to callback function, or NULL to disable callback.
|
sl@0
|
2074 |
|
sl@0
|
2075 |
@pre Kernel must be locked.
|
sl@0
|
2076 |
@pre Call in a thread context.
|
sl@0
|
2077 |
@pre Interrupts must be enabled.
|
sl@0
|
2078 |
*/
|
sl@0
|
2079 |
EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
|
sl@0
|
2080 |
{
|
sl@0
|
2081 |
ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
|
sl@0
|
2082 |
#ifdef __DEBUGGER_SUPPORT__
|
sl@0
|
2083 |
asm("ldr r1, __TheScheduler ");
|
sl@0
|
2084 |
asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
|
sl@0
|
2085 |
#endif
|
sl@0
|
2086 |
__JUMP(,lr);
|
sl@0
|
2087 |
};
|
sl@0
|
2088 |
|
sl@0
|
2089 |
|
sl@0
|
2090 |
|
sl@0
|
2091 |
/** Disables interrupts to specified level.
|
sl@0
|
2092 |
|
sl@0
|
2093 |
Note that if we are not disabling all interrupts we must lock the kernel
|
sl@0
|
2094 |
here, otherwise a high priority interrupt which is still enabled could
|
sl@0
|
2095 |
cause a reschedule and the new thread could then reenable interrupts.
|
sl@0
|
2096 |
|
sl@0
|
2097 |
@param aLevel Interrupts are disbabled up to and including aLevel. On ARM,
|
sl@0
|
2098 |
level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
|
sl@0
|
2099 |
@return CPU-specific value passed to RestoreInterrupts.
|
sl@0
|
2100 |
|
sl@0
|
2101 |
@pre 1 <= aLevel <= maximum level (CPU-specific)
|
sl@0
|
2102 |
|
sl@0
|
2103 |
@see NKern::RestoreInterrupts()
|
sl@0
|
2104 |
*/
|
sl@0
|
2105 |
EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
|
sl@0
|
2106 |
{
|
sl@0
|
2107 |
asm("cmp r0, #1 ");
|
sl@0
|
2108 |
asm("bhi " CSM_ZN5NKern20DisableAllInterruptsEv); // if level>1, disable all
|
sl@0
|
2109 |
asm("ldreq r12, __TheScheduler ");
|
sl@0
|
2110 |
asm("mrs r2, cpsr "); // r2=original CPSR
|
sl@0
|
2111 |
asm("bcc 1f "); // skip if level=0
|
sl@0
|
2112 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
2113 |
asm("and r0, r2, #0xc0 ");
|
sl@0
|
2114 |
INTS_OFF_1(r2, r2, INTS_IRQ_OFF); // disable level 1 interrupts
|
sl@0
|
2115 |
asm("cmp r3, #0 "); // test if kernel locked
|
sl@0
|
2116 |
asm("addeq r3, r3, #1 "); // if not, lock the kernel
|
sl@0
|
2117 |
asm("streq r3, [r12] ");
|
sl@0
|
2118 |
asm("orreq r0, r0, #0x80000000 "); // and set top bit to indicate kernel locked
|
sl@0
|
2119 |
INTS_OFF_2(r2, r2, INTS_IRQ_OFF);
|
sl@0
|
2120 |
__JUMP(,lr);
|
sl@0
|
2121 |
asm("1: ");
|
sl@0
|
2122 |
asm("and r0, r2, #0xc0 ");
|
sl@0
|
2123 |
__JUMP(,lr);
|
sl@0
|
2124 |
}
|
sl@0
|
2125 |
|
sl@0
|
2126 |
|
sl@0
|
2127 |
/** Disables all interrupts (e.g. both IRQ and FIQ on ARM).
|
sl@0
|
2128 |
|
sl@0
|
2129 |
@return CPU-specific value passed to NKern::RestoreInterrupts().
|
sl@0
|
2130 |
|
sl@0
|
2131 |
@see NKern::RestoreInterrupts()
|
sl@0
|
2132 |
*/
|
sl@0
|
2133 |
EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
|
sl@0
|
2134 |
{
|
sl@0
|
2135 |
asm("mrs r1, cpsr ");
|
sl@0
|
2136 |
asm("and r0, r1, #0xc0 "); // return I and F bits of CPSR
|
sl@0
|
2137 |
INTS_OFF(r1, r1, INTS_ALL_OFF);
|
sl@0
|
2138 |
__JUMP(,lr);
|
sl@0
|
2139 |
}
|
sl@0
|
2140 |
|
sl@0
|
2141 |
|
sl@0
|
2142 |
/** Enables all interrupts (e.g. IRQ and FIQ on ARM).
|
sl@0
|
2143 |
|
sl@0
|
2144 |
This function never unlocks the kernel. So it must be used
|
sl@0
|
2145 |
only to complement NKern::DisableAllInterrupts. Never use it
|
sl@0
|
2146 |
to complement NKern::DisableInterrupts.
|
sl@0
|
2147 |
|
sl@0
|
2148 |
@see NKern::DisableInterrupts()
|
sl@0
|
2149 |
@see NKern::DisableAllInterrupts()
|
sl@0
|
2150 |
|
sl@0
|
2151 |
@internalComponent
|
sl@0
|
2152 |
*/
|
sl@0
|
2153 |
EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
|
sl@0
|
2154 |
{
|
sl@0
|
2155 |
#ifndef __CPU_ARM_HAS_CPS
|
sl@0
|
2156 |
asm("mrs r0, cpsr ");
|
sl@0
|
2157 |
asm("bic r0, r0, #0xc0 ");
|
sl@0
|
2158 |
asm("msr cpsr_c, r0 ");
|
sl@0
|
2159 |
#else
|
sl@0
|
2160 |
CPSIEIF;
|
sl@0
|
2161 |
#endif
|
sl@0
|
2162 |
__JUMP(,lr);
|
sl@0
|
2163 |
}
|
sl@0
|
2164 |
|
sl@0
|
2165 |
|
sl@0
|
2166 |
/** Restores interrupts to previous level and unlocks the kernel if it was
|
sl@0
|
2167 |
locked when disabling them.
|
sl@0
|
2168 |
|
sl@0
|
2169 |
@param aRestoreData CPU-specific data returned from NKern::DisableInterrupts
|
sl@0
|
2170 |
or NKern::DisableAllInterrupts specifying the previous interrupt level.
|
sl@0
|
2171 |
|
sl@0
|
2172 |
@see NKern::DisableInterrupts()
|
sl@0
|
2173 |
@see NKern::DisableAllInterrupts()
|
sl@0
|
2174 |
*/
|
sl@0
|
2175 |
EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
|
sl@0
|
2176 |
{
|
sl@0
|
2177 |
asm("tst r0, r0 "); // test state of top bit of aLevel
|
sl@0
|
2178 |
asm("mrs r1, cpsr ");
|
sl@0
|
2179 |
asm("and r0, r0, #0xc0 ");
|
sl@0
|
2180 |
asm("bic r1, r1, #0xc0 ");
|
sl@0
|
2181 |
asm("orr r1, r1, r0 "); // replace I and F bits with those supplied
|
sl@0
|
2182 |
asm("msr cpsr_c, r1 "); // flags are unchanged (in particular N)
|
sl@0
|
2183 |
__JUMP(pl,lr); // if top bit of aLevel clear, finished
|
sl@0
|
2184 |
|
sl@0
|
2185 |
// if top bit of aLevel set, fall through to unlock the kernel
|
sl@0
|
2186 |
}
|
sl@0
|
2187 |
|
sl@0
|
2188 |
|
sl@0
|
2189 |
/** Unlocks the kernel.
|
sl@0
|
2190 |
|
sl@0
|
2191 |
Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
|
sl@0
|
2192 |
pending, calls the scheduler to process them.
|
sl@0
|
2193 |
Must be called in mode_svc.
|
sl@0
|
2194 |
|
sl@0
|
2195 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
2196 |
@pre Do not call from an ISR.
|
sl@0
|
2197 |
*/
|
sl@0
|
2198 |
EXPORT_C __NAKED__ void NKern::Unlock()
|
sl@0
|
2199 |
{
|
sl@0
|
2200 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
|
sl@0
|
2201 |
|
sl@0
|
2202 |
asm("ldr r1, __TheScheduler ");
|
sl@0
|
2203 |
asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
2204 |
asm("subs r2, r3, #1 ");
|
sl@0
|
2205 |
asm("str r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
2206 |
asm("ldreq r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // if kernel now unlocked, check flags
|
sl@0
|
2207 |
asm("bne 1f "); // if kernel still locked, return
|
sl@0
|
2208 |
asm("cmp r2, #0 "); // check for DFCs or reschedule
|
sl@0
|
2209 |
asm("bne 2f"); // branch if needed
|
sl@0
|
2210 |
asm("1: ");
|
sl@0
|
2211 |
__JUMP(,lr);
|
sl@0
|
2212 |
asm("2: ");
|
sl@0
|
2213 |
asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // else lock the kernel again
|
sl@0
|
2214 |
asm("str lr, [sp, #-4]! "); // save return address
|
sl@0
|
2215 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv); // run DFCs and reschedule, return with kernel unlocked, interrupts disabled
|
sl@0
|
2216 |
SET_INTS(r0, MODE_SVC, INTS_ALL_ON); // reenable interrupts
|
sl@0
|
2217 |
asm("ldr pc, [sp], #4 ");
|
sl@0
|
2218 |
}
|
sl@0
|
2219 |
|
sl@0
|
2220 |
/** Locks the kernel.
|
sl@0
|
2221 |
|
sl@0
|
2222 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption.
|
sl@0
|
2223 |
Must be called in mode_svc.
|
sl@0
|
2224 |
|
sl@0
|
2225 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
2226 |
@pre Do not call from an ISR.
|
sl@0
|
2227 |
*/
|
sl@0
|
2228 |
EXPORT_C __NAKED__ void NKern::Lock()
|
sl@0
|
2229 |
{
|
sl@0
|
2230 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
|
sl@0
|
2231 |
|
sl@0
|
2232 |
asm("ldr r12, __TheScheduler ");
|
sl@0
|
2233 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
2234 |
asm("add r3, r3, #1 "); // lock the kernel
|
sl@0
|
2235 |
asm("str r3, [r12] ");
|
sl@0
|
2236 |
__JUMP(,lr);
|
sl@0
|
2237 |
}
|
sl@0
|
2238 |
|
sl@0
|
2239 |
|
sl@0
|
2240 |
/** Locks the kernel and returns a pointer to the current thread
|
sl@0
|
2241 |
Increments iKernCSLocked, thereby deferring IDFCs and preemption.
|
sl@0
|
2242 |
|
sl@0
|
2243 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
2244 |
@pre Do not call from an ISR.
|
sl@0
|
2245 |
*/
|
sl@0
|
2246 |
EXPORT_C __NAKED__ NThread* NKern::LockC()
|
sl@0
|
2247 |
{
|
sl@0
|
2248 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
|
sl@0
|
2249 |
|
sl@0
|
2250 |
asm("ldr r12, __TheScheduler ");
|
sl@0
|
2251 |
asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
2252 |
asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TScheduler,iKernCSLocked));
|
sl@0
|
2253 |
asm("add r3, r3, #1 "); // lock the kernel
|
sl@0
|
2254 |
asm("str r3, [r12] ");
|
sl@0
|
2255 |
__JUMP(,lr);
|
sl@0
|
2256 |
}
|
sl@0
|
2257 |
|
sl@0
|
2258 |
|
sl@0
|
2259 |
__ASSERT_COMPILE(_FOFF(TScheduler,iKernCSLocked) == _FOFF(TScheduler,iRescheduleNeededFlag) + 4);
|
sl@0
|
2260 |
|
sl@0
|
2261 |
/** Allows IDFCs and rescheduling if they are pending.
|
sl@0
|
2262 |
|
sl@0
|
2263 |
If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
|
sl@0
|
2264 |
calls the scheduler to process the IDFCs and possibly reschedule.
|
sl@0
|
2265 |
Must be called in mode_svc.
|
sl@0
|
2266 |
|
sl@0
|
2267 |
@return Nonzero if a reschedule actually occurred, zero if not.
|
sl@0
|
2268 |
|
sl@0
|
2269 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
2270 |
@pre Do not call from an ISR.
|
sl@0
|
2271 |
*/
|
sl@0
|
2272 |
EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
|
sl@0
|
2273 |
{
|
sl@0
|
2274 |
ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
|
sl@0
|
2275 |
|
sl@0
|
2276 |
asm("ldr r3, __RescheduleNeededFlag ");
|
sl@0
|
2277 |
asm("ldmia r3, {r0,r1} "); // r0=RescheduleNeededFlag, r1=KernCSLocked
|
sl@0
|
2278 |
asm("cmp r0, #0 ");
|
sl@0
|
2279 |
__JUMP(eq,lr); // if no reschedule required, return 0
|
sl@0
|
2280 |
asm("subs r1, r1, #1 ");
|
sl@0
|
2281 |
__JUMP(ne,lr); // if kernel still locked, exit
|
sl@0
|
2282 |
asm("str lr, [sp, #-4]! "); // store return address
|
sl@0
|
2283 |
|
sl@0
|
2284 |
// reschedule - this also switches context if necessary
|
sl@0
|
2285 |
// enter this function in mode_svc, interrupts on, kernel locked
|
sl@0
|
2286 |
// exit this function in mode_svc, all interrupts off, kernel unlocked
|
sl@0
|
2287 |
asm("bl " CSM_ZN10TScheduler10RescheduleEv);
|
sl@0
|
2288 |
|
sl@0
|
2289 |
asm("mov r1, #1 ");
|
sl@0
|
2290 |
asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel again
|
sl@0
|
2291 |
SET_INTS(r3, MODE_SVC, INTS_ALL_ON); // interrupts back on
|
sl@0
|
2292 |
asm("mov r0, r2 "); // Return 0 if no reschedule, non-zero if reschedule occurred
|
sl@0
|
2293 |
asm("ldr pc, [sp], #4 ");
|
sl@0
|
2294 |
|
sl@0
|
2295 |
asm("__RescheduleNeededFlag: ");
|
sl@0
|
2296 |
asm(".word %a0" : : "i" ((TInt)&TheScheduler.iRescheduleNeededFlag));
|
sl@0
|
2297 |
}
|
sl@0
|
2298 |
|
sl@0
|
2299 |
|
sl@0
|
2300 |
/** Returns the current processor context type (thread, IDFC or interrupt).
|
sl@0
|
2301 |
|
sl@0
|
2302 |
@return A value from NKern::TContext enumeration (but never EEscaped).
|
sl@0
|
2303 |
|
sl@0
|
2304 |
@pre Call in any context.
|
sl@0
|
2305 |
|
sl@0
|
2306 |
@see NKern::TContext
|
sl@0
|
2307 |
*/
|
sl@0
|
2308 |
EXPORT_C __NAKED__ TInt NKern::CurrentContext()
|
sl@0
|
2309 |
{
|
sl@0
|
2310 |
asm("mrs r1, cpsr ");
|
sl@0
|
2311 |
asm("mov r0, #2 "); // 2 = interrupt
|
sl@0
|
2312 |
asm("and r1, r1, #0x1f "); // r1 = mode
|
sl@0
|
2313 |
asm("cmp r1, #0x13 ");
|
sl@0
|
2314 |
asm("ldreq r2, __TheScheduler ");
|
sl@0
|
2315 |
__JUMP(ne,lr); // if not svc, must be interrupt
|
sl@0
|
2316 |
asm("ldrb r0, [r2, #%a0]" : : "i" _FOFF(TScheduler,iInIDFC));
|
sl@0
|
2317 |
asm("cmp r0, #0 ");
|
sl@0
|
2318 |
asm("movne r0, #1 "); // if iInIDFC, return 1 else return 0
|
sl@0
|
2319 |
__JUMP(,lr);
|
sl@0
|
2320 |
}
|
sl@0
|
2321 |
|
sl@0
|
2322 |
|
sl@0
|
2323 |
#ifdef __FAST_MUTEX_MACHINE_CODED__
|
sl@0
|
2324 |
|
sl@0
|
2325 |
/** Temporarily releases the System Lock if there is contention.
|
sl@0
|
2326 |
|
sl@0
|
2327 |
If there
|
sl@0
|
2328 |
is another thread attempting to acquire the System lock, the calling
|
sl@0
|
2329 |
thread releases the mutex and then acquires it again.
|
sl@0
|
2330 |
|
sl@0
|
2331 |
This is more efficient than the equivalent code:
|
sl@0
|
2332 |
|
sl@0
|
2333 |
@code
|
sl@0
|
2334 |
NKern::UnlockSystem();
|
sl@0
|
2335 |
NKern::LockSystem();
|
sl@0
|
2336 |
@endcode
|
sl@0
|
2337 |
|
sl@0
|
2338 |
Note that this can only allow higher priority threads to use the System
|
sl@0
|
2339 |
lock as lower priority cannot cause contention on a fast mutex.
|
sl@0
|
2340 |
|
sl@0
|
2341 |
@return TRUE if the system lock was relinquished, FALSE if not.
|
sl@0
|
2342 |
|
sl@0
|
2343 |
@pre System lock must be held.
|
sl@0
|
2344 |
|
sl@0
|
2345 |
@post System lock is held.
|
sl@0
|
2346 |
|
sl@0
|
2347 |
@see NKern::LockSystem()
|
sl@0
|
2348 |
@see NKern::UnlockSystem()
|
sl@0
|
2349 |
*/
|
sl@0
|
2350 |
EXPORT_C __NAKED__ TBool NKern::FlashSystem()
|
sl@0
|
2351 |
{
|
sl@0
|
2352 |
asm("ldr r0, __SystemLock ");
|
sl@0
|
2353 |
}
|
sl@0
|
2354 |
|
sl@0
|
2355 |
|
sl@0
|
2356 |
/** Temporarily releases a fast mutex if there is contention.
|
sl@0
|
2357 |
|
sl@0
|
2358 |
If there is another thread attempting to acquire the mutex, the calling
|
sl@0
|
2359 |
thread releases the mutex and then acquires it again.
|
sl@0
|
2360 |
|
sl@0
|
2361 |
This is more efficient than the equivalent code:
|
sl@0
|
2362 |
|
sl@0
|
2363 |
@code
|
sl@0
|
2364 |
NKern::FMSignal();
|
sl@0
|
2365 |
NKern::FMWait();
|
sl@0
|
2366 |
@endcode
|
sl@0
|
2367 |
|
sl@0
|
2368 |
@return TRUE if the mutex was relinquished, FALSE if not.
|
sl@0
|
2369 |
|
sl@0
|
2370 |
@pre The mutex must be held.
|
sl@0
|
2371 |
|
sl@0
|
2372 |
@post The mutex is held.
|
sl@0
|
2373 |
*/
|
sl@0
|
2374 |
EXPORT_C __NAKED__ TBool NKern::FMFlash(NFastMutex*)
|
sl@0
|
2375 |
{
|
sl@0
|
2376 |
ASM_DEBUG1(NKFMFlash,r0);
|
sl@0
|
2377 |
|
sl@0
|
2378 |
asm("ldr r1, [r0,#%a0]" : : "i" _FOFF(NFastMutex,iWaiting));
|
sl@0
|
2379 |
asm("cmp r1, #0");
|
sl@0
|
2380 |
asm("bne fmflash_contended");
|
sl@0
|
2381 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
2382 |
asm("ldr r1, __TheScheduler ");
|
sl@0
|
2383 |
asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iFastMutexFilter));
|
sl@0
|
2384 |
asm("cmp r2, #0");
|
sl@0
|
2385 |
asm("bne fmflash_trace");
|
sl@0
|
2386 |
#endif
|
sl@0
|
2387 |
asm("mov r0, #0");
|
sl@0
|
2388 |
__JUMP(,lr);
|
sl@0
|
2389 |
|
sl@0
|
2390 |
asm("fmflash_contended:");
|
sl@0
|
2391 |
asm("stmfd sp!,{r4,lr}");
|
sl@0
|
2392 |
asm("mov r4, r0");
|
sl@0
|
2393 |
asm("bl " CSM_ZN5NKern4LockEv);
|
sl@0
|
2394 |
asm("mov r0, r4");
|
sl@0
|
2395 |
asm("bl " CSM_ZN10NFastMutex6SignalEv);
|
sl@0
|
2396 |
asm("bl " CSM_ZN5NKern15PreemptionPointEv);
|
sl@0
|
2397 |
asm("mov r0, r4");
|
sl@0
|
2398 |
asm("bl " CSM_ZN10NFastMutex4WaitEv);
|
sl@0
|
2399 |
asm("bl " CSM_ZN5NKern6UnlockEv);
|
sl@0
|
2400 |
asm("mov r0, #-1");
|
sl@0
|
2401 |
__POPRET("r4,");
|
sl@0
|
2402 |
|
sl@0
|
2403 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
2404 |
asm("fmflash_trace:");
|
sl@0
|
2405 |
ALIGN_STACK_START;
|
sl@0
|
2406 |
asm("stmdb sp!,{r0-r2,lr}"); // 4th item on stack is PC value for trace
|
sl@0
|
2407 |
asm("mov r3, r0"); // fast mutex parameter in r3
|
sl@0
|
2408 |
asm("ldr r0, fmflash_trace_header"); // header parameter in r0
|
sl@0
|
2409 |
asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
|
sl@0
|
2410 |
asm("mov lr, pc");
|
sl@0
|
2411 |
asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2412 |
asm("ldmia sp!,{r0-r2,lr}");
|
sl@0
|
2413 |
ALIGN_STACK_END;
|
sl@0
|
2414 |
asm("mov r0, #0");
|
sl@0
|
2415 |
__JUMP(,lr);
|
sl@0
|
2416 |
|
sl@0
|
2417 |
asm("fmflash_trace_header:");
|
sl@0
|
2418 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
2419 |
#endif
|
sl@0
|
2420 |
}
|
sl@0
|
2421 |
#endif
|
sl@0
|
2422 |
|
sl@0
|
2423 |
|
sl@0
|
2424 |
// Need to put the code here because the H2 ekern build complains about the
|
sl@0
|
2425 |
// offset of __TheSchduler label offset from the first function in the file
|
sl@0
|
2426 |
// files outside the permissible range
|
sl@0
|
2427 |
#ifdef BTRACE_FAST_MUTEX
|
sl@0
|
2428 |
__NAKED__ TInt BtraceFastMutexHolder()
|
sl@0
|
2429 |
{
|
sl@0
|
2430 |
asm("fmsignal_lock_trace_header:");
|
sl@0
|
2431 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
2432 |
|
sl@0
|
2433 |
asm("fmwait_lockacquired_trace_header:");
|
sl@0
|
2434 |
asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) );
|
sl@0
|
2435 |
|
sl@0
|
2436 |
asm("fmsignal_lock_trace_unlock:");
|
sl@0
|
2437 |
// r0=mutex r2=scheduler
|
sl@0
|
2438 |
asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2439 |
asm("mov r3, r0"); // mutex
|
sl@0
|
2440 |
asm("ldr r0, fmsignal_lock_trace_header"); // header
|
sl@0
|
2441 |
asm("ldr r2, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // context id
|
sl@0
|
2442 |
__JUMP(,r12);
|
sl@0
|
2443 |
|
sl@0
|
2444 |
asm("fmwait_lockacquiredwait_trace:");
|
sl@0
|
2445 |
// r0=scheduler r2=mutex r3=thread
|
sl@0
|
2446 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2447 |
asm("mov r1, r2");
|
sl@0
|
2448 |
asm("mov r2, r3"); // context id
|
sl@0
|
2449 |
asm("mov r3, r1"); // mutex
|
sl@0
|
2450 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header
|
sl@0
|
2451 |
__JUMP(,r12);
|
sl@0
|
2452 |
|
sl@0
|
2453 |
asm("fmwait_lockacquiredwait_trace2:");
|
sl@0
|
2454 |
// r0=mutex r1=thread r2=scheduler
|
sl@0
|
2455 |
asm("ldr r12, [r2, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2456 |
asm("mov r3, r0"); // mutex
|
sl@0
|
2457 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header
|
sl@0
|
2458 |
asm("mov r2, r1"); // context id
|
sl@0
|
2459 |
__JUMP(,r12);
|
sl@0
|
2460 |
|
sl@0
|
2461 |
asm("syslock_wait_trace:");
|
sl@0
|
2462 |
// r0=scheduler r2=thread
|
sl@0
|
2463 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2464 |
// asm("mov r2, r2"); // context id
|
sl@0
|
2465 |
asm("add r3, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // mutex
|
sl@0
|
2466 |
asm("ldr r0, fmwait_lockacquired_trace_header"); // header
|
sl@0
|
2467 |
__JUMP(,r12);
|
sl@0
|
2468 |
|
sl@0
|
2469 |
asm("syslock_signal_trace:");
|
sl@0
|
2470 |
// r0=scheduler r3=thread
|
sl@0
|
2471 |
asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
|
sl@0
|
2472 |
asm("mov r2, r3"); // context id
|
sl@0
|
2473 |
asm("add r3, r0, #%a0" : : "i" _FOFF(TScheduler,iLock)); // mutex
|
sl@0
|
2474 |
asm("ldr r0, fmsignal_lock_trace_header"); // header
|
sl@0
|
2475 |
__JUMP(,r12);
|
sl@0
|
2476 |
|
sl@0
|
2477 |
}
|
sl@0
|
2478 |
#endif // BTRACE_FAST_MUTEX
|