os/kernelhwsrv/kernel/eka/nkern/arm/ncutilf.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkern\arm\ncutilf.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <arm.h>
sl@0
    20
#include "highrestimer.h"
sl@0
    21
sl@0
    22
#ifdef __SCHEDULER_MACHINE_CODED__
sl@0
    23
/** Signals the request semaphore of a nanothread.
sl@0
    24
sl@0
    25
	This function is intended to be used by the EPOC layer and personality
sl@0
    26
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
    27
sl@0
    28
	@param aThread Nanothread to signal.  Must be non NULL.
sl@0
    29
sl@0
    30
	@see Kern::RequestComplete()
sl@0
    31
sl@0
    32
	@pre Interrupts must be enabled.
sl@0
    33
	@pre Do not call from an ISR.
sl@0
    34
 */
sl@0
    35
EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
sl@0
    36
	{
sl@0
    37
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR);
sl@0
    38
sl@0
    39
	asm("ldr r2, __TheScheduler ");
sl@0
    40
	asm("str lr, [sp, #-4]! ");
sl@0
    41
	asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
    42
	asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
sl@0
    43
	asm("add r3, r3, #1 ");
sl@0
    44
	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
    45
	asm("bl  " CSM_ZN14NFastSemaphore6SignalEv);	// alignment OK since target is also assembler
sl@0
    46
	asm("ldr lr, [sp], #4 ");
sl@0
    47
	asm("b  " CSM_ZN5NKern6UnlockEv);
sl@0
    48
	}
sl@0
    49
sl@0
    50
sl@0
    51
/** Atomically signals the request semaphore of a nanothread and a fast mutex.
sl@0
    52
sl@0
    53
	This function is intended to be used by the EPOC layer and personality
sl@0
    54
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
    55
sl@0
    56
	@param aThread Nanothread to signal.  Must be non NULL.
sl@0
    57
	@param aMutex Fast mutex to signal.  If NULL, the system lock is signaled.
sl@0
    58
sl@0
    59
	@see Kern::RequestComplete()
sl@0
    60
sl@0
    61
	@pre Kernel must be unlocked.
sl@0
    62
	@pre Call in a thread context.
sl@0
    63
	@pre Interrupts must be enabled.
sl@0
    64
 */
sl@0
    65
EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, NFastMutex* /*aMutex*/)
sl@0
    66
	{
sl@0
    67
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
sl@0
    68
sl@0
    69
	asm("ldr r2, __TheScheduler ");
sl@0
    70
	asm("cmp r1, #0 ");
sl@0
    71
	asm("ldreq r1, __SystemLock ");
sl@0
    72
	asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
    73
	asm("stmfd sp!, {r1,lr} ");
sl@0
    74
	asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
sl@0
    75
	asm("add r3, r3, #1 ");
sl@0
    76
	asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
    77
	asm("bl  " CSM_ZN14NFastSemaphore6SignalEv);
sl@0
    78
	asm("ldr r0, [sp], #4 ");
sl@0
    79
	asm("bl  " CSM_ZN10NFastMutex6SignalEv);		// alignment OK since target is also assembler
sl@0
    80
	asm("ldr lr, [sp], #4 ");
sl@0
    81
	asm("b  " CSM_ZN5NKern6UnlockEv);
sl@0
    82
sl@0
    83
	asm("__SystemLock: ");
sl@0
    84
	asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
sl@0
    85
	asm("__TheScheduler: ");
sl@0
    86
	asm(".word TheScheduler ");
sl@0
    87
	}
sl@0
    88
#endif
sl@0
    89
sl@0
    90
sl@0
    91
#ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
sl@0
    92
// called by C++ version of NThread::UserContextType()
sl@0
    93
__NAKED__ TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/)
sl@0
    94
	{
sl@0
    95
	asm("ldr r1, __irq_resched_return ");
sl@0
    96
	asm("cmp r0, r1 ");
sl@0
    97
	asm("movne r0, #0 ");
sl@0
    98
	__JUMP(,lr);
sl@0
    99
	asm("__irq_resched_return: ");
sl@0
   100
	asm(".word irq_resched_return ");
sl@0
   101
	}
sl@0
   102
sl@0
   103
#else
sl@0
   104
sl@0
   105
/** Get a value which indicates where a thread's user mode context is stored.
sl@0
   106
sl@0
   107
	@return A value that can be used as an index into the tables returned by
sl@0
   108
	NThread::UserContextTables().
sl@0
   109
sl@0
   110
	@pre any context
sl@0
   111
	@pre kernel locked
sl@0
   112
	@post kernel locked
sl@0
   113
 
sl@0
   114
	@see UserContextTables
sl@0
   115
	@publishedPartner
sl@0
   116
 */
sl@0
   117
EXPORT_C __NAKED__ NThread::TUserContextType NThread::UserContextType()
sl@0
   118
	{
sl@0
   119
	ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
sl@0
   120
//
sl@0
   121
// Optimisation note: It may be possible to coalesce the first and second
sl@0
   122
// checks below by creating separate "EContextXxxDied" context types for each
sl@0
   123
// possible way a thread can die and ordering these new types before
sl@0
   124
// EContextException.
sl@0
   125
//
sl@0
   126
sl@0
   127
	// Dying thread? use context saved earlier by kernel
sl@0
   128
sl@0
   129
	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
sl@0
   130
	asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iSpare3));   // r2 = iUserContextType
sl@0
   131
	asm("mov r1, r0 ");    // r1 = this
sl@0
   132
	asm("cmp r3, #%a0" : : "i" ((TInt)NThread::ECSExitInProgress));
sl@0
   133
	asm("moveq r0, r2"); 
sl@0
   134
	__JUMP(eq,lr);
sl@0
   135
sl@0
   136
	// Exception or no user context?
sl@0
   137
sl@0
   138
	asm("ldr r3, __TheScheduler");
sl@0
   139
	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
sl@0
   140
	asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
sl@0
   141
	asm("movls r0, r2 ");  // Return EContextNone or EContextException
sl@0
   142
	__JUMP(ls,lr);
sl@0
   143
	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextUserIntrCallback));
sl@0
   144
	asm("blo 1f");
sl@0
   145
	asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextWFARCallback));
sl@0
   146
	asm("movls r0, r2 ");  // Return EContextUserIntrCallback or EContextWFARCallback
sl@0
   147
	__JUMP(ls,lr);
sl@0
   148
sl@0
   149
	// Getting current thread context? must be in exec call as exception
sl@0
   150
	// and dying thread cases were tested above.
sl@0
   151
sl@0
   152
	asm("1: ");
sl@0
   153
	asm("cmp r3, r1");
sl@0
   154
	asm("moveq r0, #%a0" : : "i" ((TInt)NThread::EContextExec));
sl@0
   155
	__JUMP(eq,lr);
sl@0
   156
sl@0
   157
	asm("ldr r0, [r1, #%a0]" : : "i" _FOFF(NThread,iStackBase));
sl@0
   158
	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iStackSize));
sl@0
   159
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
sl@0
   160
	asm("add r2, r2, r0");
sl@0
   161
	asm("ldr r0, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+11*4)); // get saved return address from reschedule
sl@0
   162
	asm("ldr r12, __irq_resched_return ");
sl@0
   163
	asm("sub r2, r2, r3");
sl@0
   164
	asm("cmp r0, r12 ");
sl@0
   165
	asm("beq preempted ");
sl@0
   166
sl@0
   167
	// Transition to supervisor mode must have been due to a SWI
sl@0
   168
sl@0
   169
	asm("not_preempted:");
sl@0
   170
	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+15*4)));
sl@0
   171
	asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextWFAR)); // thread must have blocked doing Exec::WaitForAnyRequest
sl@0
   172
	asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextExec)); // Thread must have been in a SLOW or UNPROTECTED Exec call
sl@0
   173
	__JUMP(,lr);
sl@0
   174
	
sl@0
   175
	// thread was preempted due to an interrupt
sl@0
   176
	// interrupt and reschedule will have pushed ? words + USER_MEMORY_GUARD_SAVE_WORDS + EXTRA_STACK_SPACE onto the stack
sl@0
   177
sl@0
   178
	asm("preempted:");
sl@0
   179
	asm("ldr r12, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+12*4));  // first word on stack before reschedule
sl@0
   180
	asm("mov r0, #%a0 " : : "i" ((TInt)NThread::EContextUserInterrupt));
sl@0
   181
	asm("and r12, r12, #0x1f ");
sl@0
   182
	asm("cmp r12, #0x10 ");   // interrupted mode = user?
sl@0
   183
	__JUMP(eq,lr);
sl@0
   184
sl@0
   185
	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+30*4)));
sl@0
   186
	asm("bcs not_preempted "); 	// thread was interrupted in supervisor mode, return address and r4-r11 were saved
sl@0
   187
sl@0
   188
	// interrupt occurred in exec call entry before r4-r11 saved
sl@0
   189
	asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+20*4)));
sl@0
   190
	asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt1)); // interrupt before return address was saved or after registers restored
sl@0
   191
	asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt2)); // interrupt after return address saved
sl@0
   192
	__JUMP(,lr);
sl@0
   193
sl@0
   194
	asm("__irq_resched_return: ");
sl@0
   195
	asm(".word irq_resched_return ");
sl@0
   196
	}
sl@0
   197
sl@0
   198
#endif  // __USER_CONTEXT_TYPE_MACHINE_CODED__
sl@0
   199
sl@0
   200
__NAKED__ void Arm::GetUserSpAndLr(TAny*) 
sl@0
   201
	{
sl@0
   202
	asm("stmia r0, {r13, r14}^ ");
sl@0
   203
	asm("mov r0, r0"); // NOP needed between stm^ and banked register access
sl@0
   204
	__JUMP(,lr);
sl@0
   205
	}
sl@0
   206
sl@0
   207
__NAKED__ void Arm::SetUserSpAndLr(TAny*) 
sl@0
   208
	{
sl@0
   209
	asm("ldmia r0, {r13, r14}^ ");
sl@0
   210
	asm("mov r0, r0"); // NOP needed between ldm^ and banked register access
sl@0
   211
	__JUMP(,lr);
sl@0
   212
	}
sl@0
   213
sl@0
   214
#ifdef __CPU_ARM_USE_DOMAINS
sl@0
   215
__NAKED__ TUint32 Arm::Dacr()
sl@0
   216
	{
sl@0
   217
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   218
	__JUMP(,lr);
sl@0
   219
	}
sl@0
   220
sl@0
   221
__NAKED__ void Arm::SetDacr(TUint32)
sl@0
   222
	{
sl@0
   223
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   224
	CPWAIT(,r0);
sl@0
   225
	__JUMP(,lr);
sl@0
   226
	}
sl@0
   227
sl@0
   228
__NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32)
sl@0
   229
	{
sl@0
   230
	asm("mrc p15, 0, r2, c3, c0, 0 ");
sl@0
   231
	asm("bic r2, r2, r0 ");
sl@0
   232
	asm("orr r2, r2, r1 ");
sl@0
   233
	asm("mcr p15, 0, r2, c3, c0, 0 ");
sl@0
   234
	CPWAIT(,r0);
sl@0
   235
	asm("mov r0, r2 ");
sl@0
   236
	__JUMP(,lr);
sl@0
   237
	}
sl@0
   238
#endif
sl@0
   239
sl@0
   240
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
sl@0
   241
__NAKED__ void Arm::SetCar(TUint32)
sl@0
   242
	{
sl@0
   243
	SET_CAR(,r0);
sl@0
   244
	CPWAIT(,r0);
sl@0
   245
	__JUMP(,lr);
sl@0
   246
	}
sl@0
   247
#endif
sl@0
   248
sl@0
   249
sl@0
   250
sl@0
   251
/** Get the CPU's coprocessor access register value
sl@0
   252
sl@0
   253
@return The value of the CAR, 0 if CPU doesn't have CAR
sl@0
   254
sl@0
   255
@publishedPartner
sl@0
   256
@released
sl@0
   257
 */
sl@0
   258
EXPORT_C __NAKED__ TUint32 Arm::Car()
sl@0
   259
	{
sl@0
   260
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
sl@0
   261
	GET_CAR(,r0);
sl@0
   262
#else
sl@0
   263
	asm("mov r0, #0 ");
sl@0
   264
#endif
sl@0
   265
	__JUMP(,lr);
sl@0
   266
	}
sl@0
   267
sl@0
   268
sl@0
   269
sl@0
   270
/** Modify the CPU's coprocessor access register value
sl@0
   271
	Does nothing if CPU does not have CAR.
sl@0
   272
sl@0
   273
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
   274
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
   275
@return The original value of the CAR, 0 if CPU doesn't have CAR
sl@0
   276
sl@0
   277
@publishedPartner
sl@0
   278
@released
sl@0
   279
 */
sl@0
   280
EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
   281
	{
sl@0
   282
#ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
sl@0
   283
	GET_CAR(,r2);
sl@0
   284
	asm("bic r0, r2, r0 ");
sl@0
   285
	asm("orr r0, r0, r1 ");
sl@0
   286
	SET_CAR(,r0);
sl@0
   287
	CPWAIT(,r0);
sl@0
   288
	asm("mov r0, r2 ");
sl@0
   289
#else
sl@0
   290
	asm("mov r0, #0 ");
sl@0
   291
#endif
sl@0
   292
	__JUMP(,lr);
sl@0
   293
	}
sl@0
   294
sl@0
   295
#ifdef __CPU_HAS_VFP
sl@0
   296
__NAKED__ void Arm::SetFpExc(TUint32)
sl@0
   297
	{
sl@0
   298
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
sl@0
   299
// If we are about to enable VFP, disable dynamic branch prediction
sl@0
   300
// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
sl@0
   301
	asm("mrs r3, cpsr ");
sl@0
   302
	CPSIDAIF;
sl@0
   303
	asm("mrc p15, 0, r1, c1, c0, 1 ");
sl@0
   304
	asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
sl@0
   305
	asm("bic r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
sl@0
   306
	asm("and r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
sl@0
   307
	asm("orreq r1, r1, r2, lsl #1 ");	// if VFP is being disabled set DB = RS
sl@0
   308
	asm("mcr p15, 0, r1, c1, c0, 1 ");
sl@0
   309
	asm("mcr p15, 0, r2, c7, c5, 6 ");	// flush BTAC
sl@0
   310
	VFP_FMXR(,VFP_XREG_FPEXC,0);
sl@0
   311
	asm("msr cpsr, r3 ");
sl@0
   312
	__JUMP(,lr);
sl@0
   313
#else
sl@0
   314
	VFP_FMXR(,VFP_XREG_FPEXC,0);
sl@0
   315
	__JUMP(,lr);
sl@0
   316
#endif
sl@0
   317
	}
sl@0
   318
#endif
sl@0
   319
sl@0
   320
sl@0
   321
sl@0
   322
/** Get the value of the VFP FPEXC register
sl@0
   323
sl@0
   324
@return The value of FPEXC, 0 if there is no VFP
sl@0
   325
sl@0
   326
@publishedPartner
sl@0
   327
@released
sl@0
   328
 */
sl@0
   329
EXPORT_C __NAKED__ TUint32 Arm::FpExc()
sl@0
   330
	{
sl@0
   331
#ifdef __CPU_HAS_VFP
sl@0
   332
	VFP_FMRX(,0,VFP_XREG_FPEXC);
sl@0
   333
#else
sl@0
   334
	asm("mov r0, #0 ");
sl@0
   335
#endif
sl@0
   336
	__JUMP(,lr);
sl@0
   337
	}
sl@0
   338
sl@0
   339
sl@0
   340
sl@0
   341
/** Modify the VFP FPEXC register
sl@0
   342
	Does nothing if there is no VFP
sl@0
   343
sl@0
   344
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
   345
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
   346
@return The original value of FPEXC, 0 if no VFP present
sl@0
   347
sl@0
   348
@publishedPartner
sl@0
   349
@released
sl@0
   350
 */
sl@0
   351
EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
   352
	{
sl@0
   353
#ifdef __CPU_HAS_VFP
sl@0
   354
	VFP_FMRX(,12,VFP_XREG_FPEXC);
sl@0
   355
	asm("bic r0, r12, r0 ");
sl@0
   356
	asm("orr r0, r0, r1 ");
sl@0
   357
sl@0
   358
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
sl@0
   359
// If we are about to enable VFP, disable dynamic branch prediction
sl@0
   360
// If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
sl@0
   361
	asm("mrs r3, cpsr ");
sl@0
   362
	CPSIDAIF;
sl@0
   363
	asm("mrc p15, 0, r1, c1, c0, 1 ");
sl@0
   364
	asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
sl@0
   365
	asm("bic r1, r1, #2 ");				// clear DB bit (disable dynamic prediction)
sl@0
   366
	asm("and r2, r1, #1 ");				// r2 bit 0 = RS bit (1 if return stack enabled)
sl@0
   367
	asm("orreq r1, r1, r2, lsl #1 ");	// if VFP is being disabled set DB = RS
sl@0
   368
	asm("mcr p15, 0, r1, c1, c0, 1 ");
sl@0
   369
	asm("mcr p15, 0, r2, c7, c5, 6 ");	// flush BTAC
sl@0
   370
	VFP_FMXR(,VFP_XREG_FPEXC,0);
sl@0
   371
	asm("msr cpsr, r3 ");
sl@0
   372
#else
sl@0
   373
	VFP_FMXR(,VFP_XREG_FPEXC,0);
sl@0
   374
#endif	// erratum 351912
sl@0
   375
sl@0
   376
	asm("mov r0, r12 ");
sl@0
   377
#else	// no vfp
sl@0
   378
	asm("mov r0, #0 ");
sl@0
   379
#endif
sl@0
   380
	__JUMP(,lr);
sl@0
   381
	}
sl@0
   382
sl@0
   383
/** Get the value of the VFP FPSCR register
sl@0
   384
sl@0
   385
@return The value of FPSCR, 0 if there is no VFP
sl@0
   386
sl@0
   387
@publishedPartner
sl@0
   388
@released
sl@0
   389
 */
sl@0
   390
EXPORT_C __NAKED__ TUint32 Arm::FpScr()
sl@0
   391
	{
sl@0
   392
#ifdef __CPU_HAS_VFP
sl@0
   393
	VFP_FMRX(,0,VFP_XREG_FPSCR);
sl@0
   394
#else
sl@0
   395
	asm("mov r0, #0 ");
sl@0
   396
#endif
sl@0
   397
	__JUMP(,lr);
sl@0
   398
	}
sl@0
   399
sl@0
   400
sl@0
   401
sl@0
   402
/** Modify the VFP FPSCR register
sl@0
   403
	Does nothing if there is no VFP
sl@0
   404
sl@0
   405
@param	aClearMask	Mask of bits to clear	(1 = clear this bit)
sl@0
   406
@param	aSetMask	Mask of bits to set		(1 = set this bit)
sl@0
   407
@return The original value of FPSCR, 0 if no VFP present
sl@0
   408
sl@0
   409
@publishedPartner
sl@0
   410
@released
sl@0
   411
 */
sl@0
   412
EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
sl@0
   413
	{
sl@0
   414
#ifdef __CPU_HAS_VFP
sl@0
   415
	VFP_FMRX(,2,VFP_XREG_FPSCR);
sl@0
   416
	asm("bic r0, r2, r0 ");
sl@0
   417
	asm("orr r0, r0, r1 ");
sl@0
   418
	VFP_FMXR(,VFP_XREG_FPSCR,0);
sl@0
   419
	asm("mov r0, r2 ");
sl@0
   420
#else
sl@0
   421
	asm("mov r0, #0 ");
sl@0
   422
#endif
sl@0
   423
	__JUMP(,lr);
sl@0
   424
	}
sl@0
   425
sl@0
   426
sl@0
   427
/** Detect whether NEON is present
sl@0
   428
sl@0
   429
@return ETrue if present, EFalse if not
sl@0
   430
sl@0
   431
@internalTechnology
sl@0
   432
@released
sl@0
   433
 */
sl@0
   434
#if defined(__CPU_HAS_VFP) && defined(__VFP_V3)
sl@0
   435
__NAKED__ TBool Arm::NeonPresent()
sl@0
   436
	{
sl@0
   437
	asm("mov	r0, #0 ");										// Not present
sl@0
   438
	VFP_FMRX(,	1,VFP_XREG_FPEXC);								// Save VFP state
sl@0
   439
	asm("orr	r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN));
sl@0
   440
	VFP_FMXR(,	VFP_XREG_FPEXC,1);								// Enable VFP
sl@0
   441
sl@0
   442
	VFP_FMRX(,	2,VFP_XREG_MVFR0);								// Read MVFR0
sl@0
   443
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
sl@0
   444
	asm("beq	0f ");											// Skip ahead if not
sl@0
   445
	GET_CAR(,	r2);
sl@0
   446
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS));	// Check to see if ASIMD is disabled
sl@0
   447
	asm("bne	0f ");											// Skip ahead if so
sl@0
   448
	asm("tst	r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if the upper 16 registers are disabled
sl@0
   449
	asm("moveq	r0, #1" );										// If not then eport NEON present
sl@0
   450
sl@0
   451
	asm("0: ");
sl@0
   452
	VFP_FMXR(,VFP_XREG_FPEXC,1);								// Restore VFP state
sl@0
   453
	__JUMP(,	lr);
sl@0
   454
	}
sl@0
   455
#endif
sl@0
   456
sl@0
   457
sl@0
   458
#ifdef __CPU_HAS_MMU
sl@0
   459
__NAKED__ TBool Arm::MmuActive()
sl@0
   460
	{
sl@0
   461
	asm("mrc p15, 0, r0, c1, c0, 0 ");
sl@0
   462
	asm("and r0, r0, #1 ");
sl@0
   463
	__JUMP(,lr);
sl@0
   464
	}
sl@0
   465
sl@0
   466
// Returns the content of Translate Table Base Register 0.
sl@0
   467
// To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes)
sl@0
   468
__NAKED__ TUint32 Arm::MmuTTBR0()
sl@0
   469
	{
sl@0
   470
	asm("mrc p15, 0, r0, c2, c0, 0 ");
sl@0
   471
	__JUMP(,lr);
sl@0
   472
	}
sl@0
   473
#endif
sl@0
   474
sl@0
   475
sl@0
   476
sl@0
   477
/** Get the current value of the high performance counter.
sl@0
   478
sl@0
   479
    If a high performance counter is not available, this uses the millisecond
sl@0
   480
    tick count instead.
sl@0
   481
*/
sl@0
   482
#ifdef HAS_HIGH_RES_TIMER
sl@0
   483
EXPORT_C __NAKED__ TUint32 NKern::FastCounter()
sl@0
   484
	{
sl@0
   485
	GET_HIGH_RES_TICK_COUNT(R0);
sl@0
   486
	__JUMP(,lr);
sl@0
   487
	}
sl@0
   488
#else
sl@0
   489
EXPORT_C TUint32 NKern::FastCounter()
sl@0
   490
	{
sl@0
   491
	return NTickCount();
sl@0
   492
	}
sl@0
   493
#endif
sl@0
   494
sl@0
   495
sl@0
   496
sl@0
   497
/** Get the frequency of counter queried by NKern::FastCounter().
sl@0
   498
*/
sl@0
   499
EXPORT_C TInt NKern::FastCounterFrequency()
sl@0
   500
	{
sl@0
   501
#ifdef HAS_HIGH_RES_TIMER
sl@0
   502
	return KHighResTimerFrequency;
sl@0
   503
#else
sl@0
   504
	return 1000000 / NKern::TickPeriod();
sl@0
   505
#endif
sl@0
   506
	}
sl@0
   507