1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkern/arm/ncutilf.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,507 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkern\arm\ncutilf.cia
1.18 +//
1.19 +//
1.20 +
1.21 +#include <e32cia.h>
1.22 +#include <arm.h>
1.23 +#include "highrestimer.h"
1.24 +
1.25 +#ifdef __SCHEDULER_MACHINE_CODED__
1.26 +/** Signals the request semaphore of a nanothread.
1.27 +
1.28 + This function is intended to be used by the EPOC layer and personality
1.29 + layers. Device drivers should use Kern::RequestComplete instead.
1.30 +
1.31 + @param aThread Nanothread to signal. Must be non NULL.
1.32 +
1.33 + @see Kern::RequestComplete()
1.34 +
1.35 + @pre Interrupts must be enabled.
1.36 + @pre Do not call from an ISR.
1.37 + */
1.38 +EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
1.39 + {
1.40 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR);
1.41 +
1.42 + asm("ldr r2, __TheScheduler ");
1.43 + asm("str lr, [sp, #-4]! ");
1.44 + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
1.45 + asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
1.46 + asm("add r3, r3, #1 ");
1.47 + asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
1.48 + asm("bl " CSM_ZN14NFastSemaphore6SignalEv); // alignment OK since target is also assembler
1.49 + asm("ldr lr, [sp], #4 ");
1.50 + asm("b " CSM_ZN5NKern6UnlockEv);
1.51 + }
1.52 +
1.53 +
1.54 +/** Atomically signals the request semaphore of a nanothread and a fast mutex.
1.55 +
1.56 + This function is intended to be used by the EPOC layer and personality
1.57 + layers. Device drivers should use Kern::RequestComplete instead.
1.58 +
1.59 + @param aThread Nanothread to signal. Must be non NULL.
1.60 + @param aMutex Fast mutex to signal. If NULL, the system lock is signaled.
1.61 +
1.62 + @see Kern::RequestComplete()
1.63 +
1.64 + @pre Kernel must be unlocked.
1.65 + @pre Call in a thread context.
1.66 + @pre Interrupts must be enabled.
1.67 + */
1.68 +EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, NFastMutex* /*aMutex*/)
1.69 + {
1.70 + ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
1.71 +
1.72 + asm("ldr r2, __TheScheduler ");
1.73 + asm("cmp r1, #0 ");
1.74 + asm("ldreq r1, __SystemLock ");
1.75 + asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
1.76 + asm("stmfd sp!, {r1,lr} ");
1.77 + asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
1.78 + asm("add r3, r3, #1 ");
1.79 + asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
1.80 + asm("bl " CSM_ZN14NFastSemaphore6SignalEv);
1.81 + asm("ldr r0, [sp], #4 ");
1.82 + asm("bl " CSM_ZN10NFastMutex6SignalEv); // alignment OK since target is also assembler
1.83 + asm("ldr lr, [sp], #4 ");
1.84 + asm("b " CSM_ZN5NKern6UnlockEv);
1.85 +
1.86 + asm("__SystemLock: ");
1.87 + asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
1.88 + asm("__TheScheduler: ");
1.89 + asm(".word TheScheduler ");
1.90 + }
1.91 +#endif
1.92 +
1.93 +
1.94 +#ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
1.95 +// called by C++ version of NThread::UserContextType()
1.96 +__NAKED__ TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/)
1.97 + {
1.98 + asm("ldr r1, __irq_resched_return ");
1.99 + asm("cmp r0, r1 ");
1.100 + asm("movne r0, #0 ");
1.101 + __JUMP(,lr);
1.102 + asm("__irq_resched_return: ");
1.103 + asm(".word irq_resched_return ");
1.104 + }
1.105 +
1.106 +#else
1.107 +
1.108 +/** Get a value which indicates where a thread's user mode context is stored.
1.109 +
1.110 + @return A value that can be used as an index into the tables returned by
1.111 + NThread::UserContextTables().
1.112 +
1.113 + @pre any context
1.114 + @pre kernel locked
1.115 + @post kernel locked
1.116 +
1.117 + @see UserContextTables
1.118 + @publishedPartner
1.119 + */
1.120 +EXPORT_C __NAKED__ NThread::TUserContextType NThread::UserContextType()
1.121 + {
1.122 + ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
1.123 +//
1.124 +// Optimisation note: It may be possible to coalesce the first and second
1.125 +// checks below by creating separate "EContextXxxDied" context types for each
1.126 +// possible way a thread can die and ordering these new types before
1.127 +// EContextException.
1.128 +//
1.129 +
1.130 + // Dying thread? use context saved earlier by kernel
1.131 +
1.132 + asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
1.133 + asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // r2 = iUserContextType
1.134 + asm("mov r1, r0 "); // r1 = this
1.135 + asm("cmp r3, #%a0" : : "i" ((TInt)NThread::ECSExitInProgress));
1.136 + asm("moveq r0, r2");
1.137 + __JUMP(eq,lr);
1.138 +
1.139 + // Exception or no user context?
1.140 +
1.141 + asm("ldr r3, __TheScheduler");
1.142 + asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
1.143 + asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
1.144 + asm("movls r0, r2 "); // Return EContextNone or EContextException
1.145 + __JUMP(ls,lr);
1.146 + asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextUserIntrCallback));
1.147 + asm("blo 1f");
1.148 + asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextWFARCallback));
1.149 + asm("movls r0, r2 "); // Return EContextUserIntrCallback or EContextWFARCallback
1.150 + __JUMP(ls,lr);
1.151 +
1.152 + // Getting current thread context? must be in exec call as exception
1.153 + // and dying thread cases were tested above.
1.154 +
1.155 + asm("1: ");
1.156 + asm("cmp r3, r1");
1.157 + asm("moveq r0, #%a0" : : "i" ((TInt)NThread::EContextExec));
1.158 + __JUMP(eq,lr);
1.159 +
1.160 + asm("ldr r0, [r1, #%a0]" : : "i" _FOFF(NThread,iStackBase));
1.161 + asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iStackSize));
1.162 + asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
1.163 + asm("add r2, r2, r0");
1.164 + asm("ldr r0, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+11*4)); // get saved return address from reschedule
1.165 + asm("ldr r12, __irq_resched_return ");
1.166 + asm("sub r2, r2, r3");
1.167 + asm("cmp r0, r12 ");
1.168 + asm("beq preempted ");
1.169 +
1.170 + // Transition to supervisor mode must have been due to a SWI
1.171 +
1.172 + asm("not_preempted:");
1.173 + asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+15*4)));
1.174 + asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextWFAR)); // thread must have blocked doing Exec::WaitForAnyRequest
1.175 + asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextExec)); // Thread must have been in a SLOW or UNPROTECTED Exec call
1.176 + __JUMP(,lr);
1.177 +
1.178 + // thread was preempted due to an interrupt
1.179 + // interrupt and reschedule will have pushed ? words + USER_MEMORY_GUARD_SAVE_WORDS + EXTRA_STACK_SPACE onto the stack
1.180 +
1.181 + asm("preempted:");
1.182 + asm("ldr r12, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+12*4)); // first word on stack before reschedule
1.183 + asm("mov r0, #%a0 " : : "i" ((TInt)NThread::EContextUserInterrupt));
1.184 + asm("and r12, r12, #0x1f ");
1.185 + asm("cmp r12, #0x10 "); // interrupted mode = user?
1.186 + __JUMP(eq,lr);
1.187 +
1.188 + asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+30*4)));
1.189 + asm("bcs not_preempted "); // thread was interrupted in supervisor mode, return address and r4-r11 were saved
1.190 +
1.191 + // interrupt occurred in exec call entry before r4-r11 saved
1.192 + asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+20*4)));
1.193 + asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt1)); // interrupt before return address was saved or after registers restored
1.194 + asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt2)); // interrupt after return address saved
1.195 + __JUMP(,lr);
1.196 +
1.197 + asm("__irq_resched_return: ");
1.198 + asm(".word irq_resched_return ");
1.199 + }
1.200 +
1.201 +#endif // __USER_CONTEXT_TYPE_MACHINE_CODED__
1.202 +
1.203 +__NAKED__ void Arm::GetUserSpAndLr(TAny*)
1.204 + {
1.205 + asm("stmia r0, {r13, r14}^ ");
1.206 + asm("mov r0, r0"); // NOP needed between stm^ and banked register access
1.207 + __JUMP(,lr);
1.208 + }
1.209 +
1.210 +__NAKED__ void Arm::SetUserSpAndLr(TAny*)
1.211 + {
1.212 + asm("ldmia r0, {r13, r14}^ ");
1.213 + asm("mov r0, r0"); // NOP needed between ldm^ and banked register access
1.214 + __JUMP(,lr);
1.215 + }
1.216 +
1.217 +#ifdef __CPU_ARM_USE_DOMAINS
1.218 +__NAKED__ TUint32 Arm::Dacr()
1.219 + {
1.220 + asm("mrc p15, 0, r0, c3, c0, 0 ");
1.221 + __JUMP(,lr);
1.222 + }
1.223 +
1.224 +__NAKED__ void Arm::SetDacr(TUint32)
1.225 + {
1.226 + asm("mcr p15, 0, r0, c3, c0, 0 ");
1.227 + CPWAIT(,r0);
1.228 + __JUMP(,lr);
1.229 + }
1.230 +
1.231 +__NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32)
1.232 + {
1.233 + asm("mrc p15, 0, r2, c3, c0, 0 ");
1.234 + asm("bic r2, r2, r0 ");
1.235 + asm("orr r2, r2, r1 ");
1.236 + asm("mcr p15, 0, r2, c3, c0, 0 ");
1.237 + CPWAIT(,r0);
1.238 + asm("mov r0, r2 ");
1.239 + __JUMP(,lr);
1.240 + }
1.241 +#endif
1.242 +
1.243 +#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
1.244 +__NAKED__ void Arm::SetCar(TUint32)
1.245 + {
1.246 + SET_CAR(,r0);
1.247 + CPWAIT(,r0);
1.248 + __JUMP(,lr);
1.249 + }
1.250 +#endif
1.251 +
1.252 +
1.253 +
1.254 +/** Get the CPU's coprocessor access register value
1.255 +
1.256 +@return The value of the CAR, 0 if CPU doesn't have CAR
1.257 +
1.258 +@publishedPartner
1.259 +@released
1.260 + */
1.261 +EXPORT_C __NAKED__ TUint32 Arm::Car()
1.262 + {
1.263 +#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
1.264 + GET_CAR(,r0);
1.265 +#else
1.266 + asm("mov r0, #0 ");
1.267 +#endif
1.268 + __JUMP(,lr);
1.269 + }
1.270 +
1.271 +
1.272 +
1.273 +/** Modify the CPU's coprocessor access register value
1.274 + Does nothing if CPU does not have CAR.
1.275 +
1.276 +@param aClearMask Mask of bits to clear (1 = clear this bit)
1.277 +@param aSetMask Mask of bits to set (1 = set this bit)
1.278 +@return The original value of the CAR, 0 if CPU doesn't have CAR
1.279 +
1.280 +@publishedPartner
1.281 +@released
1.282 + */
1.283 +EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
1.284 + {
1.285 +#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
1.286 + GET_CAR(,r2);
1.287 + asm("bic r0, r2, r0 ");
1.288 + asm("orr r0, r0, r1 ");
1.289 + SET_CAR(,r0);
1.290 + CPWAIT(,r0);
1.291 + asm("mov r0, r2 ");
1.292 +#else
1.293 + asm("mov r0, #0 ");
1.294 +#endif
1.295 + __JUMP(,lr);
1.296 + }
1.297 +
1.298 +#ifdef __CPU_HAS_VFP
1.299 +__NAKED__ void Arm::SetFpExc(TUint32)
1.300 + {
1.301 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
1.302 +// If we are about to enable VFP, disable dynamic branch prediction
1.303 +// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
1.304 + asm("mrs r3, cpsr ");
1.305 + CPSIDAIF;
1.306 + asm("mrc p15, 0, r1, c1, c0, 1 ");
1.307 + asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
1.308 + asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction)
1.309 + asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled)
1.310 + asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS
1.311 + asm("mcr p15, 0, r1, c1, c0, 1 ");
1.312 + asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC
1.313 + VFP_FMXR(,VFP_XREG_FPEXC,0);
1.314 + asm("msr cpsr, r3 ");
1.315 + __JUMP(,lr);
1.316 +#else
1.317 + VFP_FMXR(,VFP_XREG_FPEXC,0);
1.318 + __JUMP(,lr);
1.319 +#endif
1.320 + }
1.321 +#endif
1.322 +
1.323 +
1.324 +
1.325 +/** Get the value of the VFP FPEXC register
1.326 +
1.327 +@return The value of FPEXC, 0 if there is no VFP
1.328 +
1.329 +@publishedPartner
1.330 +@released
1.331 + */
1.332 +EXPORT_C __NAKED__ TUint32 Arm::FpExc()
1.333 + {
1.334 +#ifdef __CPU_HAS_VFP
1.335 + VFP_FMRX(,0,VFP_XREG_FPEXC);
1.336 +#else
1.337 + asm("mov r0, #0 ");
1.338 +#endif
1.339 + __JUMP(,lr);
1.340 + }
1.341 +
1.342 +
1.343 +
1.344 +/** Modify the VFP FPEXC register
1.345 + Does nothing if there is no VFP
1.346 +
1.347 +@param aClearMask Mask of bits to clear (1 = clear this bit)
1.348 +@param aSetMask Mask of bits to set (1 = set this bit)
1.349 +@return The original value of FPEXC, 0 if no VFP present
1.350 +
1.351 +@publishedPartner
1.352 +@released
1.353 + */
1.354 +EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
1.355 + {
1.356 +#ifdef __CPU_HAS_VFP
1.357 + VFP_FMRX(,12,VFP_XREG_FPEXC);
1.358 + asm("bic r0, r12, r0 ");
1.359 + asm("orr r0, r0, r1 ");
1.360 +
1.361 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
1.362 +// If we are about to enable VFP, disable dynamic branch prediction
1.363 +// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
1.364 + asm("mrs r3, cpsr ");
1.365 + CPSIDAIF;
1.366 + asm("mrc p15, 0, r1, c1, c0, 1 ");
1.367 + asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
1.368 + asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction)
1.369 + asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled)
1.370 + asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS
1.371 + asm("mcr p15, 0, r1, c1, c0, 1 ");
1.372 + asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC
1.373 + VFP_FMXR(,VFP_XREG_FPEXC,0);
1.374 + asm("msr cpsr, r3 ");
1.375 +#else
1.376 + VFP_FMXR(,VFP_XREG_FPEXC,0);
1.377 +#endif // erratum 351912
1.378 +
1.379 + asm("mov r0, r12 ");
1.380 +#else // no vfp
1.381 + asm("mov r0, #0 ");
1.382 +#endif
1.383 + __JUMP(,lr);
1.384 + }
1.385 +
1.386 +/** Get the value of the VFP FPSCR register
1.387 +
1.388 +@return The value of FPSCR, 0 if there is no VFP
1.389 +
1.390 +@publishedPartner
1.391 +@released
1.392 + */
1.393 +EXPORT_C __NAKED__ TUint32 Arm::FpScr()
1.394 + {
1.395 +#ifdef __CPU_HAS_VFP
1.396 + VFP_FMRX(,0,VFP_XREG_FPSCR);
1.397 +#else
1.398 + asm("mov r0, #0 ");
1.399 +#endif
1.400 + __JUMP(,lr);
1.401 + }
1.402 +
1.403 +
1.404 +
1.405 +/** Modify the VFP FPSCR register
1.406 + Does nothing if there is no VFP
1.407 +
1.408 +@param aClearMask Mask of bits to clear (1 = clear this bit)
1.409 +@param aSetMask Mask of bits to set (1 = set this bit)
1.410 +@return The original value of FPSCR, 0 if no VFP present
1.411 +
1.412 +@publishedPartner
1.413 +@released
1.414 + */
1.415 +EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
1.416 + {
1.417 +#ifdef __CPU_HAS_VFP
1.418 + VFP_FMRX(,2,VFP_XREG_FPSCR);
1.419 + asm("bic r0, r2, r0 ");
1.420 + asm("orr r0, r0, r1 ");
1.421 + VFP_FMXR(,VFP_XREG_FPSCR,0);
1.422 + asm("mov r0, r2 ");
1.423 +#else
1.424 + asm("mov r0, #0 ");
1.425 +#endif
1.426 + __JUMP(,lr);
1.427 + }
1.428 +
1.429 +
1.430 +/** Detect whether NEON is present
1.431 +
1.432 +@return ETrue if present, EFalse if not
1.433 +
1.434 +@internalTechnology
1.435 +@released
1.436 + */
1.437 +#if defined(__CPU_HAS_VFP) && defined(__VFP_V3)
1.438 +__NAKED__ TBool Arm::NeonPresent()
1.439 + {
1.440 + asm("mov r0, #0 "); // Not present
1.441 + VFP_FMRX(, 1,VFP_XREG_FPEXC); // Save VFP state
1.442 + asm("orr r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN));
1.443 + VFP_FMXR(, VFP_XREG_FPEXC,1); // Enable VFP
1.444 +
1.445 + VFP_FMRX(, 2,VFP_XREG_MVFR0); // Read MVFR0
1.446 + asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present
1.447 + asm("beq 0f "); // Skip ahead if not
1.448 + GET_CAR(, r2);
1.449 + asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS)); // Check to see if ASIMD is disabled
1.450 + asm("bne 0f "); // Skip ahead if so
1.451 + asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if the upper 16 registers are disabled
1.452 + asm("moveq r0, #1" ); // If not then eport NEON present
1.453 +
1.454 + asm("0: ");
1.455 + VFP_FMXR(,VFP_XREG_FPEXC,1); // Restore VFP state
1.456 + __JUMP(, lr);
1.457 + }
1.458 +#endif
1.459 +
1.460 +
1.461 +#ifdef __CPU_HAS_MMU
1.462 +__NAKED__ TBool Arm::MmuActive()
1.463 + {
1.464 + asm("mrc p15, 0, r0, c1, c0, 0 ");
1.465 + asm("and r0, r0, #1 ");
1.466 + __JUMP(,lr);
1.467 + }
1.468 +
1.469 +// Returns the content of Translate Table Base Register 0.
1.470 +// To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes)
1.471 +__NAKED__ TUint32 Arm::MmuTTBR0()
1.472 + {
1.473 + asm("mrc p15, 0, r0, c2, c0, 0 ");
1.474 + __JUMP(,lr);
1.475 + }
1.476 +#endif
1.477 +
1.478 +
1.479 +
1.480 +/** Get the current value of the high performance counter.
1.481 +
1.482 + If a high performance counter is not available, this uses the millisecond
1.483 + tick count instead.
1.484 +*/
1.485 +#ifdef HAS_HIGH_RES_TIMER
1.486 +EXPORT_C __NAKED__ TUint32 NKern::FastCounter()
1.487 + {
1.488 + GET_HIGH_RES_TICK_COUNT(R0);
1.489 + __JUMP(,lr);
1.490 + }
1.491 +#else
1.492 +EXPORT_C TUint32 NKern::FastCounter()
1.493 + {
1.494 + return NTickCount();
1.495 + }
1.496 +#endif
1.497 +
1.498 +
1.499 +
1.500 +/** Get the frequency of counter queried by NKern::FastCounter().
1.501 +*/
1.502 +EXPORT_C TInt NKern::FastCounterFrequency()
1.503 + {
1.504 +#ifdef HAS_HIGH_RES_TIMER
1.505 + return KHighResTimerFrequency;
1.506 +#else
1.507 + return 1000000 / NKern::TickPeriod();
1.508 +#endif
1.509 + }
1.510 +