1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncsched.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1064 @@
1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkernsmp\arm\ncsched.cia
1.18 +//
1.19 +//
1.20 +
1.21 +// NThreadBase member data
1.22 +#define __INCLUDE_NTHREADBASE_DEFINES__
1.23 +
1.24 +// TDfc member data
1.25 +#define __INCLUDE_TDFC_DEFINES__
1.26 +
1.27 +#include <e32cia.h>
1.28 +#include <arm.h>
1.29 +#include "nkern.h"
1.30 +#include <arm_gic.h>
1.31 +#include <arm_scu.h>
1.32 +#include <arm_tmr.h>
1.33 +//#include <highrestimer.h>
1.34 +//#include "emievents.h"
1.35 +
1.36 +#ifdef _DEBUG
1.37 +#define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\
1.38 + asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
1.39 + asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
1.40 + asm("str "#rs", ["#rp"] ");\
1.41 + asm("str "#rs", ["#rp", #4] ");
1.42 +#else
1.43 +#define ASM_KILL_LINK(rp,rs)
1.44 +#endif
1.45 +
1.46 +#define ALIGN_STACK_START \
1.47 + asm("mov r12, sp"); \
1.48 + asm("tst sp, #4"); \
1.49 + asm("subeq sp, sp, #4"); \
1.50 + asm("str r12, [sp,#-4]!")
1.51 +
1.52 +#define ALIGN_STACK_END \
1.53 + asm("ldr sp, [sp]")
1.54 +
1.55 +
1.56 +//#define __DEBUG_BAD_ADDR
1.57 +
1.58 +extern "C" void NewThreadTrace(NThread* a);
1.59 +extern "C" void send_accumulated_resched_ipis();
1.60 +
1.61 +
1.62 +__NAKED__ void TScheduler::Reschedule()
1.63 + {
1.64 + //
1.65 + // Enter in mode_svc with kernel locked, interrupts can be on or off
1.66 + // Exit in mode_svc with kernel unlocked, interrupts off
1.67 + // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
1.68 + // NOTE: R4-R11 are modified
1.69 + //
1.70 + asm("mov r2, sp "); // bit 0 will be reschedule flag
1.71 + asm("bic sp, sp, #4 "); // align stack
1.72 + GET_RWNO_TID(,r0) // r0->TSubScheduler
1.73 + asm("stmfd sp!, {r2,lr} "); // save original SP/resched flag, return address
1.74 + __ASM_CLI(); // interrupts off
1.75 + asm("ldr r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3)); // check iDfcPendingFlag and iExIDfcPendingFlag
1.76 + asm("mov r11, r0 "); // r11->TSubScheduler
1.77 + asm("ldr r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr)); // r10->CPU local timer
1.78 +
1.79 + asm("start_resched: ");
1.80 + asm("movs r1, r1, lsr #16 "); // check if IDFCs or ExIDFCs pending
1.81 +
1.82 + asm("blne " CSM_ZN13TSubScheduler9QueueDfcsEv); // queue any pending DFCs
1.83 + asm("ldrb r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.84 + asm("ldr r3, [sp, #0] ");
1.85 + asm("mrs r2, spsr "); // r2=spsr_svc
1.86 + asm("cmp r1, #0 "); // check if a reschedule is required
1.87 + asm("beq no_resched_needed "); // branch out if not
1.88 + __ASM_STI(); // interrupts back on
1.89 + asm("orr r3, r3, #1 ");
1.90 + asm("str r3, [sp, #0] "); // set resched flag
1.91 + asm("stmfd sp!, {r0,r2} "); // store SPSR_SVC
1.92 + asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1.93 +#ifdef __CPU_ARMV7
1.94 + asm("mrc p14, 6, r7, c1, c0, 0 "); // r7 = TEEHBR
1.95 +#else
1.96 + asm("mov r7, #0 ");
1.97 +#endif
1.98 + GET_RWRO_TID(,r8); // r8 = User RO Thread ID
1.99 + GET_RWRW_TID(,r9); // r9 = User RW Thread ID
1.100 +#ifdef __CPU_HAS_VFP
1.101 + VFP_FMRX(,0,VFP_XREG_FPEXC); // r0 = FPEXC
1.102 + asm("bic r0, r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Store FPEXC with VFP disabled in case this thread runs on a different core next time
1.103 +#else
1.104 + asm("mov r0, #0 ");
1.105 +#endif
1.106 + GET_CAR(, r1); // r1 = CAR
1.107 + asm("mrc p15, 0, r12, c3, c0, 0 "); // r12 = DACR
1.108 + asm("ldr r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
1.109 +
1.110 + // Save auxiliary registers
1.111 + // R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
1.112 + asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));
1.113 + asm("str sp, [r5, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer
1.114 + asm("stmia sp, {r0-r1,r7-r9,r12} ");
1.115 +
1.116 + // We must move to a temporary stack before selecting the next thread.
1.117 + // This is because another CPU may begin executing this thread before the
1.118 + // select_next_thread() function returns and our stack would then be
1.119 + // corrupted. We use the stack belonging to this CPU's initial thread since
1.120 + // we are guaranteed that will never run on another CPU.
1.121 + asm("ldr sp, [r4, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
1.122 +
1.123 + asm("select_thread: ");
1.124 + asm("mov r0, r11 ");
1.125 + asm("bl " CSM_ZN13TSubScheduler16SelectNextThreadEv ); // also sets r0->iCurrentThread
1.126 +#ifdef BTRACE_CPU_USAGE
1.127 + asm("ldr r2, __BTraceFilter ");
1.128 +#endif
1.129 + asm("movs r3, r0 "); // r3 = new thread (might be 0)
1.130 + asm("ldrne sp, [r0, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // if a thread has been selected, move to its stack
1.131 + asm("beq no_thread "); // branch out if no thread is ready
1.132 +
1.133 +#ifdef BTRACE_CPU_USAGE
1.134 + asm("ldrb r1, [r2, #4] "); // check category 4 trace
1.135 + asm("cmp r1, #0 ");
1.136 + asm("beq 1f ");
1.137 + asm("stmfd sp!, {r0-r3} ");
1.138 + asm("bl NewThreadTrace ");
1.139 + asm("ldmfd sp!, {r0-r3} ");
1.140 + asm("1: ");
1.141 +#endif // BTRACE_CPU_USAGE
1.142 +
1.143 + asm("cmp r3, r5 "); // same thread?
1.144 + asm("beq same_thread ");
1.145 + asm("ldrb r1, [r3, #%a0]" : : "i" _FOFF(NThreadBase, i_ThrdAttr));
1.146 + asm("ldr r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
1.147 + asm("mov r2, r3, lsr #6 "); // r2=current thread>>6
1.148 + asm("tst r1, #%a0" : : "i" ((TInt)KThreadAttAddressSpace)); // address space required?
1.149 + asm("ldrne r4, [r4, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // if so, get pointer to process handler
1.150 +
1.151 + // we are doing a thread switch so restore new thread's auxiliary registers
1.152 + // R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
1.153 + asm("ldmia sp, {r0-r1,r7-r9,r12} ");
1.154 +
1.155 +#ifdef __CPU_ARMV7
1.156 + asm("mcr p14, 6, r7, c1, c0, 0 "); // r7 = TEEHBR
1.157 +#endif
1.158 + SET_RWRO_TID(,r8); // r8 = User RO Thread ID
1.159 + SET_RWRW_TID(,r9); // r9 = User RW Thread ID
1.160 +#ifdef __CPU_HAS_VFP
1.161 + VFP_FMXR(,VFP_XREG_FPEXC,0); // r0 = FPEXC
1.162 +#endif
1.163 + SET_CAR(, r1); // r1 = CAR
1.164 + asm("mcr p15, 0, r12, c3, c0, 0 "); // r12 = DACR
1.165 +
1.166 + asm("beq no_as_switch "); // skip if address space change not required
1.167 +
1.168 + // Do address space switching
1.169 + // Handler called with:
1.170 + // r11->subscheduler, r3->current thread
1.171 + // r9->new address space, r5->old address space
1.172 + // Must preserve r10,r11,r3, can modify other registers
1.173 + asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iAddressSpace)); // get current address space ptr
1.174 + asm("ldr r9, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iAddressSpace)); // get new address space ptr
1.175 + asm("adr lr, as_switch_return ");
1.176 + __JUMP(, r4);
1.177 +
1.178 + asm("no_as_switch: ");
1.179 + asm("mrc p15, 0, r4, c13, c0, 1 "); // r4 = CONTEXTID (threadID:ASID)
1.180 + asm("and r4, r4, #0xff "); // isolate ASID
1.181 + asm("orr r2, r4, r2, lsl #8 "); // r2 = new ContextID (new thread ID : ASID)
1.182 + __DATA_SYNC_BARRIER_Z__(r12); // needed before change to ContextID
1.183 + asm("mcr p15, 0, r2, c13, c0, 1 "); // set ContextID (ASID + debugging thread ID)
1.184 + __INST_SYNC_BARRIER__(r12);
1.185 +#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
1.186 + asm("mcr p15, 0, r12, c7, c5, 6 "); // flush BTAC
1.187 +#endif
1.188 +
1.189 + asm("as_switch_return: ");
1.190 + asm("same_thread: ");
1.191 + asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare)); // step past auxiliary registers
1.192 + asm("ldmib sp!, {r2,r12} "); // r2=SPSR_SVC, r12=original SP + resched flag
1.193 + __ASM_CLI(); // interrupts off
1.194 + asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.195 + asm("msr spsr, r2 "); // restore spsr_svc
1.196 + asm("mov r0, r11 ");
1.197 + asm("mov r2, r12 "); // r2 = original SP + reschedule flag
1.198 + asm("cmp r1, #0 "); // check for more IDFCs and/or another reschedule
1.199 + asm("bne start_resched "); // loop if required
1.200 + asm("ldr r14, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
1.201 + asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.202 + asm("str r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.203 + asm("cmp r14, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
1.204 + asm("ldr lr, [sp, #4] "); // restore R10, R11, return address
1.205 + asm("bic sp, r2, #3 "); // restore initial unaligned stack pointer
1.206 + asm("and r2, r2, #1 "); // r2 = reschedule flag
1.207 + asm("beq resched_thread_divert ");
1.208 +
1.209 + // Return with: R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
1.210 + // R12=iReschedIPIs
1.211 + __JUMP(, lr);
1.212 +
1.213 + asm("no_resched_needed: ");
1.214 + asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1.215 + asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.216 + asm("mov r0, r11 ");
1.217 + asm("str r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.218 + asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
1.219 + asm("cmp r2, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
1.220 + asm("ldmfd sp, {r2,lr} "); // r2 = original SP + reschedule flag, restore lr
1.221 + asm("bic sp, r2, #3 "); // restore initial unaligned stack pointer
1.222 + asm("and r2, r2, #1 "); // r2 = reschedule flag
1.223 + asm("beq resched_thread_divert ");
1.224 +
1.225 + // Return with: R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
1.226 + // R12=iReschedIPIs
1.227 + __JUMP(, lr);
1.228 +
1.229 + asm("resched_thread_divert: ");
1.230 + asm("mov r1, #1 ");
1.231 + asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.232 + asm("bic sp, sp, #4 "); // align stack
1.233 + asm("stmfd sp!, {r0-r5,r12,lr} "); // save registers for diagnostic purposes
1.234 + asm("mov r4, r3 "); // don't really need to bother about registers since thread is exiting
1.235 +
1.236 + // need to send any outstanding reschedule IPIs
1.237 + asm("cmp r12, #0 ");
1.238 + asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
1.239 +
1.240 + __ASM_STI();
1.241 + asm("ldrb r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
1.242 + asm("cmp r1, #1 ");
1.243 + asm("bne 1f ");
1.244 + __ASM_CRASH();
1.245 + asm("1: ");
1.246 + asm("mov r2, #0 ");
1.247 + asm("strb r2, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
1.248 + asm("mov r0, r4 ");
1.249 + asm("bl " CSM_ZN11NThreadBase4ExitEv );
1.250 + __ASM_CRASH(); // shouldn't get here
1.251 +
1.252 + // There is no thread ready to run
1.253 + // R11->TSubScheduler, R1=unknown, R2=0, R3=__BTraceFilter, R12=unknown
1.254 + asm("no_thread: ");
1.255 + __ASM_CLI();
1.256 + asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.257 + asm("mov r0, r11 ");
1.258 + asm("cmp r12, #0 ");
1.259 + asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
1.260 + __ASM_STI();
1.261 + __DATA_SYNC_BARRIER_Z__(r1);
1.262 + ARM_WFI;
1.263 + asm("no_thread2: ");
1.264 + asm("ldr r1, [r11, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3)); // check iDfcPendingFlag and iExIDfcPendingFlag
1.265 + asm("mov r0, r11 ");
1.266 + asm("movs r1, r1, lsr #16 ");
1.267 + asm("beq no_thread ");
1.268 + asm("bl " CSM_ZN13TSubScheduler9QueueDfcsEv); // queue any pending DFCs
1.269 + asm("ldrb r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.270 + asm("cmp r1, #0 "); // check if a reschedule is required
1.271 + asm("beq no_thread2 ");
1.272 + asm("b select_thread ");
1.273 +
1.274 +
1.275 +
1.276 +/******************************************************************************
1.277 + Missed out stuff:
1.278 + EMI EVENT LOGGING
1.279 + __CPU_ARM1136_ERRATUM_351912_FIXED
1.280 + Debug hooks in the scheduler
1.281 + ******************************************************************************/
1.282 +
1.283 + asm("__BTraceFilter: ");
1.284 + asm(".word %a0 " : : "i" ((TInt)&BTraceData.iFilter[0]));
1.285 + };
1.286 +
1.287 +
1.288 +/**
1.289 + * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
1.290 + *
1.291 + * @param aStart Set to the lowest memory address which needs to be modified.
1.292 + * @param aEnd Set to the highest memory address +1 which needs to be modified.
1.293 +
1.294 + @pre Kernel must be locked.
1.295 + @pre Call in a thread context.
1.296 + @pre Interrupts must be enabled.
1.297 + */
1.298 +EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
1.299 + {
1.300 +#if 0
1.301 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
1.302 +#ifdef __DEBUGGER_SUPPORT__
1.303 + asm("adr r2,resched_trampoline_hook_address");
1.304 + asm("str r2,[r0]");
1.305 + asm("adr r2,resched_trampoline_hook_address+4");
1.306 + asm("str r2,[r1]");
1.307 +#else
1.308 + asm("mov r2,#0");
1.309 + asm("str r2,[r0]");
1.310 + asm("str r2,[r1]");
1.311 +#endif
1.312 +#endif
1.313 + __JUMP(,lr);
1.314 + };
1.315 +
1.316 +
1.317 +/**
1.318 + * Modifies the scheduler code so that it can call the function set by
1.319 + * NKern::SetRescheduleCallback().
1.320 + *
1.321 + * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
1.322 +
1.323 + @pre Kernel must be locked.
1.324 + @pre Call in a thread context.
1.325 + @pre Interrupts must be enabled.
1.326 + */
1.327 +EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
1.328 + {
1.329 +#if 0
1.330 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
1.331 +#ifdef __DEBUGGER_SUPPORT__
1.332 + asm("adr r0,resched_trampoline_hook_address");
1.333 + asm("adr r1,resched_trampoline");
1.334 + asm("sub r1, r1, r0");
1.335 + asm("sub r1, r1, #8");
1.336 + asm("mov r1, r1, asr #2");
1.337 + asm("add r1, r1, #0xea000000"); // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
1.338 +
1.339 +#if defined(__CPU_MEMORY_TYPE_REMAPPING)
1.340 + // These platforms have shadow memory in non-writable page. We cannot use the standard
1.341 + // Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
1.342 + // Instead, we'll temporarily disable access permission checking in MMU by switching
1.343 + // domain#0 into Manager Mode (see Domain Access Control Register).
1.344 + asm("mrs r12, CPSR "); // save cpsr setting and ...
1.345 + CPSIDAIF; // ...disable interrupts
1.346 + asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
1.347 + asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
1.348 + asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
1.349 + asm("str r1,[r0]");
1.350 + asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
1.351 + asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
1.352 +#else
1.353 + asm("str r1,[r0]");
1.354 +#endif
1.355 +
1.356 +#endif
1.357 +#endif
1.358 + __JUMP(,lr);
1.359 + };
1.360 +
1.361 +
1.362 +/**
1.363 + * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
1.364 + *
1.365 + * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
1.366 +
1.367 + @pre Kernel must be locked.
1.368 + @pre Call in a thread context.
1.369 + @pre Interrupts must be enabled.
1.370 + */
1.371 +EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
1.372 + {
1.373 +#if 0
1.374 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
1.375 +#ifdef __DEBUGGER_SUPPORT__
1.376 + asm("adr r0,resched_trampoline_hook_address");
1.377 + asm("ldr r1,resched_trampoline_unhook_data");
1.378 +
1.379 +#if defined(__CPU_MEMORY_TYPE_REMAPPING)
1.380 + // See comments above in InsertSchedulerHooks
1.381 + asm("mrs r12, CPSR "); // save cpsr setting and ...
1.382 + CPSIDAIF; // ...disable interrupts
1.383 + asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
1.384 + asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
1.385 + asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
1.386 + asm("str r1,[r0]");
1.387 + asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
1.388 + asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
1.389 +#else
1.390 + asm("str r1,[r0]");
1.391 +#endif
1.392 +
1.393 +#endif
1.394 +#endif
1.395 + __JUMP(,lr);
1.396 + };
1.397 +
1.398 +
1.399 +/**
1.400 + * Set the function which is to be called on every thread reschedule.
1.401 + *
1.402 + * @param aCallback Pointer to callback function, or NULL to disable callback.
1.403 +
1.404 + @pre Kernel must be locked.
1.405 + @pre Call in a thread context.
1.406 + @pre Interrupts must be enabled.
1.407 + */
1.408 +EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
1.409 + {
1.410 +#if 0
1.411 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
1.412 +#ifdef __DEBUGGER_SUPPORT__
1.413 + asm("ldr r1, __TheScheduler ");
1.414 + asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
1.415 +#endif
1.416 +#endif
1.417 + __JUMP(,lr);
1.418 + };
1.419 +
1.420 +
1.421 +
1.422 +/** Disables interrupts to specified level.
1.423 +
1.424 + Note that if we are not disabling all interrupts we must lock the kernel
1.425 + here, otherwise a high priority interrupt which is still enabled could
1.426 + cause a reschedule and the new thread could then reenable interrupts.
1.427 +
1.428 + @param aLevel Interrupts are disbabled up to and including aLevel. On ARM,
1.429 + level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
1.430 + @return CPU-specific value passed to RestoreInterrupts.
1.431 +
1.432 + @pre 1 <= aLevel <= maximum level (CPU-specific)
1.433 +
1.434 + @see NKern::RestoreInterrupts()
1.435 + */
1.436 +EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
1.437 + {
1.438 +#ifdef __FIQ_IS_UNCONTROLLED__
1.439 + asm("mrs r1, cpsr ");
1.440 + asm("cmp r0, #0 ");
1.441 + asm("beq 1f ");
1.442 + __ASM_CLI();
1.443 + asm("1: ");
1.444 + asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */
1.445 + __JUMP(, lr);
1.446 +#else
1.447 + asm("cmp r0, #1 ");
1.448 + asm("bhi " CSM_ZN5NKern20DisableAllInterruptsEv); // if level>1, disable all
1.449 + asm("mrs r2, cpsr "); // r2=original CPSR
1.450 + asm("bcc 1f "); // skip if level=0
1.451 + __ASM_CLI(); // Disable all interrupts to prevent migration
1.452 + GET_RWNO_TID(,r12); // r12 -> TSubScheduler
1.453 + asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.454 + asm("and r0, r2, #0xc0 ");
1.455 + asm("cmp r3, #0 "); // test if kernel locked
1.456 + asm("addeq r3, r3, #1 "); // if not, lock the kernel
1.457 + asm("streq r3, [r12] ");
1.458 + asm("orreq r0, r0, #0x80000000 "); // and set top bit to indicate kernel locked
1.459 + __ASM_STI2(); // reenable FIQs only
1.460 + __JUMP(, lr);
1.461 + asm("1: ");
1.462 + asm("and r0, r2, #0xc0 ");
1.463 + __JUMP(, lr);
1.464 +#endif
1.465 + }
1.466 +
1.467 +
1.468 +/** Disables all interrupts (e.g. both IRQ and FIQ on ARM).
1.469 +
1.470 + @return CPU-specific value passed to NKern::RestoreInterrupts().
1.471 +
1.472 + @see NKern::RestoreInterrupts()
1.473 + */
1.474 +EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
1.475 + {
1.476 + asm("mrs r1, cpsr ");
1.477 + asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */
1.478 + __ASM_CLI();
1.479 + __JUMP(,lr);
1.480 + }
1.481 +
1.482 +
1.483 +/** Enables all interrupts (e.g. IRQ and FIQ on ARM).
1.484 +
1.485 + This function never unlocks the kernel. So it must be used
1.486 + only to complement NKern::DisableAllInterrupts. Never use it
1.487 + to complement NKern::DisableInterrupts.
1.488 +
1.489 + @see NKern::DisableInterrupts()
1.490 + @see NKern::DisableAllInterrupts()
1.491 +
1.492 + @internalComponent
1.493 + */
1.494 +EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
1.495 + {
1.496 + __ASM_STI();
1.497 + __JUMP(,lr);
1.498 + }
1.499 +
1.500 +
1.501 +/** Restores interrupts to previous level and unlocks the kernel if it was
1.502 + locked when disabling them.
1.503 +
1.504 + @param aRestoreData CPU-specific data returned from NKern::DisableInterrupts
1.505 + or NKern::DisableAllInterrupts specifying the previous interrupt level.
1.506 +
1.507 + @see NKern::DisableInterrupts()
1.508 + @see NKern::DisableAllInterrupts()
1.509 + */
1.510 +EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
1.511 + {
1.512 + asm("tst r0, r0 "); // test state of top bit of aLevel
1.513 + asm("mrs r1, cpsr ");
1.514 + asm("and r0, r0, #%a0" : : "i" ((TInt)KAllInterruptsMask));
1.515 + asm("bic r1, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));
1.516 + asm("orr r1, r1, r0 "); // replace I and F bits with those supplied
1.517 + asm("msr cpsr_c, r1 "); // flags are unchanged (in particular N)
1.518 + __JUMP(pl,lr); // if top bit of aLevel clear, finished
1.519 +
1.520 + // if top bit of aLevel set, fall through to unlock the kernel
1.521 + }
1.522 +
1.523 +
1.524 +/** Unlocks the kernel.
1.525 +
1.526 + Decrements iKernLockCount for current CPU; if it becomes zero and IDFCs
1.527 + or a reschedule are pending, calls the scheduler to process them.
1.528 + Must be called in mode_svc.
1.529 +
1.530 + @pre Call either in a thread or an IDFC context.
1.531 + @pre Do not call from an ISR.
1.532 + */
1.533 +EXPORT_C __NAKED__ void NKern::Unlock()
1.534 + {
1.535 + ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
1.536 +
1.537 + GET_RWNO_TID(,r0) // r0=&SubScheduler()
1.538 + __ASM_CLI(); // interrupts off
1.539 + asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.540 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.541 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.542 + asm("subs r3, r1, #1 ");
1.543 + asm("strne r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.544 + asm("bne 0f "); // kernel still locked -> return
1.545 + asm("cmp r2, #0 "); // check for DFCs or reschedule
1.546 + asm("bne 1f ");
1.547 + asm("cmp r12, #0 "); // IPIs outstanding?
1.548 + asm("str r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); // unlock the kernel
1.549 + asm("bne 2f ");
1.550 + asm("0: ");
1.551 + __ASM_STI(); // interrupts back on
1.552 + __JUMP(,lr);
1.553 +
1.554 + // need to run IDFCs and/or reschedule
1.555 + asm("1: ");
1.556 + asm("stmfd sp!, {r0,r4-r11,lr} ");
1.557 + asm("bl " CSM_ZN10TScheduler10RescheduleEv );
1.558 + asm(".global nkern_unlock_resched_return ");
1.559 + asm("nkern_unlock_resched_return: ");
1.560 +
1.561 + // need to send any outstanding reschedule IPIs
1.562 + asm("cmp r12, #0 ");
1.563 + asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
1.564 + asm("ldmfd sp!, {r0,r4-r11,lr} ");
1.565 + __ASM_STI();
1.566 + __JUMP(,lr);
1.567 +
1.568 + asm("2: ");
1.569 + asm("stmfd sp!, {r0,lr} ");
1.570 + asm("bl " CSM_CFUNC(send_accumulated_resched_ipis));
1.571 + asm("ldmfd sp!, {r0,lr} ");
1.572 + __ASM_STI();
1.573 + __JUMP(,lr);
1.574 + }
1.575 +
1.576 +
1.577 +/** Locks the kernel.
1.578 +
1.579 + Increments iKernLockCount for the current CPU, thereby deferring IDFCs
1.580 + and preemption. Must be called in mode_svc.
1.581 +
1.582 + @pre Call either in a thread or an IDFC context.
1.583 + @pre Do not call from an ISR.
1.584 + */
1.585 +EXPORT_C __NAKED__ void NKern::Lock()
1.586 + {
1.587 + ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
1.588 +
1.589 + __ASM_CLI();
1.590 + GET_RWNO_TID(,r12);
1.591 + asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.592 + asm("add r3, r3, #1 "); // lock the kernel
1.593 + asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.594 + __ASM_STI();
1.595 + __JUMP(,lr);
1.596 + }
1.597 +
1.598 +
1.599 +/** Locks the kernel and returns a pointer to the current thread.
1.600 +
1.601 + Increments iKernLockCount for the current CPU, thereby deferring IDFCs
1.602 + and preemption. Must be called in mode_svc.
1.603 +
1.604 + @pre Call either in a thread or an IDFC context.
1.605 + @pre Do not call from an ISR.
1.606 + */
1.607 +EXPORT_C __NAKED__ NThread* NKern::LockC()
1.608 + {
1.609 + ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
1.610 +
1.611 + __ASM_CLI();
1.612 + GET_RWNO_TID(,r12);
1.613 + asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.614 + asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1.615 + asm("add r3, r3, #1 "); // lock the kernel
1.616 + asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.617 + __ASM_STI();
1.618 + __JUMP(,lr);
1.619 + }
1.620 +
1.621 +
1.622 +/** Allows IDFCs and rescheduling if they are pending.
1.623 +
1.624 + If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
1.625 + calls the scheduler to process the IDFCs and possibly reschedule.
1.626 + Must be called in mode_svc.
1.627 +
1.628 + @return Nonzero if a reschedule actually occurred, zero if not.
1.629 +
1.630 + @pre Call either in a thread or an IDFC context.
1.631 + @pre Do not call from an ISR.
1.632 + */
1.633 +EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
1.634 + {
1.635 + ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
1.636 +
1.637 + GET_RWNO_TID(,r0) // r0=&SubScheduler()
1.638 + asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.639 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
1.640 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.641 + asm("cmp r1, #1 ");
1.642 + asm("bgt 0f "); // if locked more than once return FALSE
1.643 + asm("cmp r2, #0 "); // locked once and IDFCs/reschedule pending?
1.644 + asm("bne 1f "); // skip if so
1.645 + asm("cmp r12, #0 "); // locked once and resched IPIs outstanding?
1.646 + asm("bne 2f "); // skip if so
1.647 + asm("0: ");
1.648 + asm("mov r0, #0 ");
1.649 + __JUMP(, lr); // else return FALSE
1.650 +
1.651 + // need to run IDFCs and/or reschedule
1.652 + asm("1: ");
1.653 + asm("stmfd sp!, {r1,r4-r11,lr} ");
1.654 + asm("bl " CSM_ZN10TScheduler10RescheduleEv );
1.655 + asm(".global nkern_preemption_point_resched_return ");
1.656 + asm("nkern_preemption_point_resched_return: ");
1.657 + asm("str r2, [sp] ");
1.658 + asm("mov r2, #1 ");
1.659 + asm("str r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
1.660 +
1.661 + // need to send any outstanding reschedule IPIs
1.662 + asm("cmp r12, #0 ");
1.663 + asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
1.664 + asm("ldmfd sp!, {r0,r4-r11,lr} "); // return TRUE if reschedule occurred
1.665 + __ASM_STI();
1.666 + __JUMP(, lr);
1.667 +
1.668 + asm("2: ");
1.669 + asm("stmfd sp!, {r2,lr} ");
1.670 + asm("bl " CSM_CFUNC(send_accumulated_resched_ipis));
1.671 + asm("ldmfd sp!, {r0,lr} "); // return TRUE if reschedule occurred
1.672 + __ASM_STI();
1.673 + __JUMP(, lr);
1.674 + }
1.675 +
1.676 +
1.677 +#ifdef __CPU_HAS_VFP
1.678 +// Do the actual VFP context save
1.679 +__NAKED__ void VfpContextSave(void*)
1.680 + {
1.681 + VFP_FMRX(,1,VFP_XREG_FPEXC);
1.682 + asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Check to see if VFP in use
1.683 + __JUMP(eq, lr); // Return immediately if not
1.684 + asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX) ); // Check to see if an exception has occurred
1.685 + asm("beq 1f "); // Skip ahead if not
1.686 + asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));
1.687 + VFP_FMXR(,VFP_XREG_FPEXC,1); // Reset exception flag
1.688 + asm("orr r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX)); // But store it for later
1.689 + asm("1: ");
1.690 +
1.691 +
1.692 + VFP_FMRX(,2,VFP_XREG_FPSCR);
1.693 + asm("stmia r0!, {r2} "); // Save FPSCR
1.694 +
1.695 +#ifndef __VFP_V3
1.696 + VFP_FMRX(,2,VFP_XREG_FPINST);
1.697 + VFP_FMRX(,3,VFP_XREG_FPINST2);
1.698 + asm("stmia r0!, {r2-r3} "); // Save FPINST, FPINST2
1.699 +#endif
1.700 +
1.701 + VFP_FSTMIADW(CC_AL,0,0,16); // Save D0 - D15
1.702 +
1.703 +#ifdef __VFP_V3
1.704 + VFP_FMRX(,2,VFP_XREG_MVFR0);
1.705 + asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present
1.706 + asm("beq 0f "); // Skip ahead if not
1.707 + GET_CAR(,r2);
1.708 + asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if access to the upper 16 registers is disabled
1.709 + VFP_FSTMIADW(CC_EQ,0,16,16); // If not then save D16 - D31
1.710 +#endif
1.711 +
1.712 + asm("0: ");
1.713 + asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
1.714 + VFP_FMXR(,VFP_XREG_FPEXC,1); // Disable VFP
1.715 +
1.716 + __JUMP(,lr);
1.717 + }
1.718 +#endif
1.719 +
1.720 +
1.721 +/** Check if the kernel is locked the specified number of times.
1.722 +
1.723 + @param aCount The number of times the kernel should be locked
1.724 + If zero, tests if it is locked at all
1.725 + @return TRUE if the tested condition is true.
1.726 +
1.727 + @internalTechnology
1.728 +*/
1.729 +EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
1.730 + {
1.731 + asm("mrs r12, cpsr ");
1.732 + __ASM_CLI();
1.733 + GET_RWNO_TID(,r3);
1.734 + asm("movs r1, r0 "); // r1 = aCount
1.735 + asm("ldr r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
1.736 + asm("moveq r1, r0 "); // if aCount=0, aCount=iKernLockCount
1.737 + asm("cmp r1, r0 "); //
1.738 + asm("movne r0, #0 "); // if aCount!=iKernLockCount, return FALSE else return iKernLockCount
1.739 + asm("msr cpsr, r12 ");
1.740 + __JUMP(,lr);
1.741 + }
1.742 +
1.743 +
1.744 +// Only call this if thread migration is disabled, i.e.
1.745 +// interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
1.746 +extern "C" __NAKED__ TSubScheduler& SubScheduler()
1.747 + {
1.748 + GET_RWNO_TID(,r0);
1.749 + __JUMP(,lr);
1.750 + }
1.751 +
1.752 +/** Returns the NThread control block for the currently scheduled thread.
1.753 +
1.754 + Note that this is the calling thread if called from a thread context, or the
1.755 + interrupted thread if called from an interrupt context.
1.756 +
1.757 + @return A pointer to the NThread for the currently scheduled thread.
1.758 +
1.759 + @pre Call in any context.
1.760 +*/
1.761 +EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
1.762 + {
1.763 + asm("mrs r12, cpsr ");
1.764 + __ASM_CLI();
1.765 + GET_RWNO_TID(,r0);
1.766 + asm("cmp r0, #0 ");
1.767 + asm("ldrne r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1.768 + asm("msr cpsr, r12 ");
1.769 + __JUMP(,lr);
1.770 + }
1.771 +
1.772 +
1.773 +/** Returns the NThread control block for the currently scheduled thread.
1.774 +
1.775 + Note that this is the calling thread if called from a thread context, or the
1.776 + interrupted thread if called from an interrupt context.
1.777 +
1.778 + @return A pointer to the NThread for the currently scheduled thread.
1.779 +
1.780 + @pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
1.781 + disabled or with preemption disabled.
1.782 +*/
1.783 +extern "C" __NAKED__ NThread* NCurrentThreadL()
1.784 + {
1.785 + GET_RWNO_TID(,r0);
1.786 + asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
1.787 + __JUMP(,lr);
1.788 + }
1.789 +
1.790 +
1.791 +/** Returns the CPU number of the calling CPU.
1.792 +
1.793 + @return the CPU number of the calling CPU.
1.794 +
1.795 + @pre Call in any context.
1.796 +*/
1.797 +EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
1.798 + {
1.799 + asm("mrs r12, cpsr ");
1.800 + __ASM_CLI();
1.801 + GET_RWNO_TID(,r0);
1.802 + asm("cmp r0, #0 ");
1.803 + asm("ldrne r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));
1.804 + asm("msr cpsr, r12 ");
1.805 + __JUMP(,lr);
1.806 + }
1.807 +
1.808 +
1.809 +/** Returns the current processor context type (thread, IDFC or interrupt).
1.810 +
1.811 + @return A value from NKern::TContext enumeration (but never EEscaped).
1.812 +
1.813 + @pre Call in any context.
1.814 +
1.815 + @see NKern::TContext
1.816 + */
1.817 +EXPORT_C __NAKED__ TInt NKern::CurrentContext()
1.818 + {
1.819 + asm("mrs r1, cpsr ");
1.820 + __ASM_CLI(); // interrupts off to stop migration
1.821 + GET_RWNO_TID(,r3); // r3 = &SubScheduler()
1.822 + asm("mov r0, #2 "); // 2 = interrupt
1.823 + asm("and r2, r1, #0x1f "); // r1 = mode
1.824 + asm("cmp r2, #0x13 ");
1.825 + asm("ldreqb r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iInIDFC));
1.826 + asm("bne 0f "); // if not svc, must be interrupt
1.827 + asm("cmp r0, #0 ");
1.828 + asm("movne r0, #1 "); // if iInIDFC, return 1 else return 0
1.829 + asm("0: ");
1.830 + asm("msr cpsr, r1 "); // restore interrupts
1.831 + __JUMP(,lr);
1.832 + }
1.833 +
1.834 +
1.835 +extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
1.836 + {
1.837 + __DATA_SYNC_BARRIER_Z__(r3); // need DSB before sending any IPI
1.838 + asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
1.839 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
1.840 + asm("mov r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
1.841 + asm("orr r1, r1, r3, lsl #16 ");
1.842 + asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
1.843 + __JUMP(,lr);
1.844 + }
1.845 +
1.846 +// Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
1.847 +// Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
1.848 +// Return with R0 unaltered.
1.849 +extern "C" __NAKED__ void send_accumulated_resched_ipis()
1.850 + {
1.851 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
1.852 + asm("mov r1, #0 ");
1.853 + asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
1.854 + __DATA_SYNC_BARRIER__(r1); // need DSB before sending any IPI
1.855 + asm("mov r1, r12, lsl #16 ");
1.856 +// asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
1.857 + asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
1.858 + __JUMP(,lr);
1.859 + }
1.860 +
1.861 +// Send a reschedule IPI to the specified CPU
1.862 +extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
1.863 + {
1.864 + GET_RWNO_TID(,r3);
1.865 + __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
1.866 + asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
1.867 + ASM_DEBUG1(SendReschedIPI,r0);
1.868 + asm("mov r1, #0x10000 ");
1.869 + asm("mov r1, r1, lsl r0 "); // 0x10000<<aCpu
1.870 +// asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
1.871 + asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
1.872 + __JUMP(,lr);
1.873 + }
1.874 +
1.875 +// Send a reschedule IPI to the current processor
1.876 +// *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
1.877 +extern "C" __NAKED__ void send_self_resched_ipi()
1.878 + {
1.879 + GET_RWNO_TID(,r3);
1.880 + __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
1.881 + asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
1.882 + asm("mov r1, #0x02000000 "); // target = requesting CPU only
1.883 +// asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
1.884 + asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPI
1.885 + __JUMP(,lr);
1.886 + }
1.887 +
1.888 +extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
1.889 + {
1.890 + ASM_DEBUG1(SendReschedIPIs,r0);
1.891 + __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
1.892 + asm("cmp r0, #0 "); // any bits set in aMask?
1.893 + GET_RWNO_TID(ne,r3);
1.894 + asm("ldrne r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
1.895 + asm("movne r0, r0, lsl #16 ");
1.896 +// asm("orrne r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
1.897 + asm("strne r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs if any
1.898 + __JUMP(,lr);
1.899 + }
1.900 +
1.901 +
1.902 +extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
1.903 + {
1.904 + asm("ldr r1, __TheSubSchedulers ");
1.905 + asm("mov r2, #0x10000 ");
1.906 + asm("mov r2, r2, lsl r0 "); // 0x10000<<aCpu
1.907 + ASM_DEBUG1(SendReschedIPIAndWait,r0);
1.908 + asm("add r0, r1, r0, lsl #9 "); // sizeof(TSubScheduler)=512
1.909 + asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
1.910 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
1.911 + __DATA_SYNC_BARRIER_Z__(r1); // make sure i_IrqCount is read before IPI is sent
1.912 +// asm("orr r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
1.913 + asm("str r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
1.914 + __DATA_SYNC_BARRIER__(r1); // make sure IPI has been sent
1.915 + asm("1: ");
1.916 + asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
1.917 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
1.918 + asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
1.919 + asm("cmp r1, #0 ");
1.920 + asm("beq 0f "); // iRescheduleNeededFlag not set -> wait
1.921 + asm("cmp r2, #0 ");
1.922 + asm("bge 2f "); // if other CPU is in an ISR, finish
1.923 + asm("cmp r3, r12 "); // if not, has i_IrqCount changed?
1.924 + asm("0: ");
1.925 + ARM_WFEcc(CC_EQ); // if not, wait for something to happen ...
1.926 + asm("beq 1b "); // ... and loop
1.927 + asm("2: ");
1.928 + __DATA_MEMORY_BARRIER__(r1); // make sure subsequent memory accesses don't jump the gun
1.929 + // guaranteed to observe final thread state after this
1.930 + __JUMP(,lr);
1.931 +
1.932 + asm("__TheSubSchedulers: ");
1.933 + asm(".word TheSubSchedulers ");
1.934 + }
1.935 +
1.936 +/* If the current thread is subject to timeslicing, update its remaining time
1.937 + from the current CPU's local timer. Don't stop the timer.
1.938 + If the remaining time is negative, save it as zero.
1.939 + */
1.940 +__NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
1.941 + {
1.942 + asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
1.943 + asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
1.944 + asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
1.945 + asm("cmp r3, #0 ");
1.946 + asm("ble 0f "); // thread isn't timesliced or timeslice already expired so skip
1.947 + asm("cmp r12, #0 ");
1.948 + asm("bne 0f "); // initial (i.e. idle) thread, so skip
1.949 + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
1.950 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
1.951 + asm("cmp r3, #0 ");
1.952 + asm("movmi r0, #0 "); // if timer count is negative, save zero
1.953 + asm("bmi 1f ");
1.954 + asm("umull r0, r3, r12, r3 "); // scale up to max timer clock
1.955 + asm("adds r0, r0, #0x00800000 ");
1.956 + asm("adcs r3, r3, #0 ");
1.957 + asm("mov r0, r0, lsr #24 ");
1.958 + asm("orr r0, r0, r3, lsl #8 ");
1.959 + asm("1: ");
1.960 + asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
1.961 + asm("0: ");
1.962 + __JUMP(,lr);
1.963 + }
1.964 +
1.965 +/* Update aOld's execution time and set up the timer for aNew
1.966 + Update this CPU's timestamp value
1.967 +
1.968 + if (!aOld) aOld=iInitialThread
1.969 + if (!aNew) aNew=iInitialThread
1.970 + newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
1.971 + cli()
1.972 + oldcount = timer count
1.973 + if (oldcount<=0 || aOld!=aNew)
1.974 + {
1.975 + timer count = newcount
1.976 + elapsed = i_LastTimerSet - oldcount
1.977 + i_LastTimerSet = newcount
1.978 + elapsed = elapsed * i_TimerMultI / 2^24
1.979 + aOld->iTotalCpuTime64 += elapsed
1.980 + correction = i_TimestampError;
1.981 + if (correction > i_MaxCorrection)
1.982 + correction = i_MaxCorrection
1.983 + else if (correction < -i_MaxCorrection)
1.984 + correction = -i_MaxCorrection
1.985 + i_TimestampError -= correction
1.986 + i_LastTimestamp += elapsed + i_TimerGap - correction
1.987 + }
1.988 + sti()
1.989 + */
1.990 +__NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
1.991 + {
1.992 + asm("cmp r2, #0 ");
1.993 + asm("ldreq r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
1.994 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
1.995 + asm("cmp r1, #0 ");
1.996 + asm("ldreq r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
1.997 + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
1.998 + asm("stmfd sp!, {r4-r7} ");
1.999 + asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
1.1000 + asm("ldr r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
1.1001 + asm("cmp r1, r2 ");
1.1002 + asm("beq 2f ");
1.1003 + asm("adds r6, r6, #1 ");
1.1004 + asm("str r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
1.1005 + asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
1.1006 + asm("ldr r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
1.1007 + asm("adcs r7, r7, #0 ");
1.1008 + asm("str r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
1.1009 + asm("adds r4, r4, #1 ");
1.1010 + asm("adcs r6, r6, #0 ");
1.1011 + asm("str r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
1.1012 + asm("str r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
1.1013 + asm("2: ");
1.1014 + asm("cmp r3, #1 "); // aNew->iTime > 0 ?
1.1015 + asm("umullge r4, r3, r12, r3 ");
1.1016 + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
1.1017 + asm("movlt r3, #0x7fffffff ");
1.1018 + asm("addges r3, r3, r4, lsr #31 "); // round up top 32 bits if bit 31 set
1.1019 + asm("moveq r3, #1 "); // if result zero, limit to 1
1.1020 + asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
1.1021 + __ASM_CLI();
1.1022 + asm("ldr r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
1.1023 + asm("cmp r1, r2 ");
1.1024 + asm("bne 1f ");
1.1025 + asm("cmp r4, #0 ");
1.1026 + asm("bgt 0f "); // same thread, timeslice not expired -> leave timer alone
1.1027 + asm("1: ");
1.1028 + asm("str r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); // set new timeslice value in timer
1.1029 + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
1.1030 + asm("str r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
1.1031 + asm("sub r12, r12, r4 "); // r12 = elapsed (actual timer ticks)
1.1032 + asm("umull r4, r5, r12, r5 ");
1.1033 + asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
1.1034 + asm("ldr r12, [r1, #4] ");
1.1035 + asm("adds r4, r4, #0x00800000 ");
1.1036 + asm("adcs r5, r5, #0 ");
1.1037 + asm("mov r4, r4, lsr #24 ");
1.1038 + asm("orr r4, r4, r5, lsl #8 "); // r4 = elapsed
1.1039 + asm("adds r3, r3, r4 ");
1.1040 + asm("adcs r12, r12, #0 ");
1.1041 + asm("stmia r1, {r3,r12} "); // aOld->iTotalCpuTime64 += elapsed
1.1042 + asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
1.1043 + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
1.1044 + asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
1.1045 + asm("ldr r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
1.1046 + asm("mov r12, r3 ");
1.1047 + asm("cmp r3, r5 ");
1.1048 + asm("movgt r3, r5 "); // if (correction>i_MaxCorrection) correction=i_MaxCorrection
1.1049 + asm("cmn r3, r5 ");
1.1050 + asm("rsblt r3, r5, #0 "); // if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
1.1051 + asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
1.1052 + asm("sub r12, r12, r3 ");
1.1053 + asm("str r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
1.1054 + asm("add r4, r4, r5 "); // r4 = elapsed + i_TimerGap
1.1055 + asm("adds r1, r1, r4 ");
1.1056 + asm("adcs r2, r2, #0 "); // iLastTimestamp64 + (elapsed + i_TimerGap)
1.1057 + asm("subs r1, r1, r3 ");
1.1058 + asm("sbcs r1, r1, r3, asr #32 "); // iLastTimestamp64 + (elapsed + i_TimerGap - correction)
1.1059 + asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
1.1060 + asm("str r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
1.1061 + asm("0: ");
1.1062 + __ASM_STI();
1.1063 + asm("ldmfd sp!, {r4-r7} ");
1.1064 + __JUMP(,lr);
1.1065 + }
1.1066 +
1.1067 +