os/kernelhwsrv/kernel/eka/nkernsmp/arm/ncsched.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\arm\ncsched.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
// NThreadBase member data
sl@0
    19
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    20
sl@0
    21
// TDfc member data
sl@0
    22
#define __INCLUDE_TDFC_DEFINES__
sl@0
    23
sl@0
    24
#include <e32cia.h>
sl@0
    25
#include <arm.h>
sl@0
    26
#include "nkern.h"
sl@0
    27
#include <arm_gic.h>
sl@0
    28
#include <arm_scu.h>
sl@0
    29
#include <arm_tmr.h>
sl@0
    30
//#include <highrestimer.h>
sl@0
    31
//#include "emievents.h"
sl@0
    32
sl@0
    33
#ifdef _DEBUG
sl@0
    34
#define ASM_KILL_LINK(rp,rs)	asm("mov "#rs", #0xdf ");\
sl@0
    35
								asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
sl@0
    36
								asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
sl@0
    37
								asm("str "#rs", ["#rp"] ");\
sl@0
    38
								asm("str "#rs", ["#rp", #4] ");
sl@0
    39
#else
sl@0
    40
#define ASM_KILL_LINK(rp,rs)
sl@0
    41
#endif
sl@0
    42
sl@0
    43
#define ALIGN_STACK_START			\
sl@0
    44
	asm("mov r12, sp");				\
sl@0
    45
	asm("tst sp, #4");				\
sl@0
    46
	asm("subeq sp, sp, #4");		\
sl@0
    47
	asm("str r12, [sp,#-4]!")
sl@0
    48
sl@0
    49
#define ALIGN_STACK_END				\
sl@0
    50
	asm("ldr sp, [sp]")
sl@0
    51
sl@0
    52
sl@0
    53
//#define __DEBUG_BAD_ADDR
sl@0
    54
sl@0
    55
extern "C" void NewThreadTrace(NThread* a);
sl@0
    56
extern "C" void send_accumulated_resched_ipis();
sl@0
    57
sl@0
    58
sl@0
    59
__NAKED__ void TScheduler::Reschedule()
sl@0
    60
	{
sl@0
    61
	//
sl@0
    62
	// Enter in mode_svc with kernel locked, interrupts can be on or off
sl@0
    63
	// Exit in mode_svc with kernel unlocked, interrupts off
sl@0
    64
	// NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
sl@0
    65
	// NOTE: R4-R11 are modified
sl@0
    66
	//
sl@0
    67
	asm("mov	r2, sp ");					// bit 0 will be reschedule flag
sl@0
    68
	asm("bic	sp, sp, #4 ");				// align stack
sl@0
    69
	GET_RWNO_TID(,r0)						// r0->TSubScheduler
sl@0
    70
	asm("stmfd	sp!, {r2,lr} ");			// save original SP/resched flag, return address
sl@0
    71
	__ASM_CLI();							// interrupts off
sl@0
    72
	asm("ldr	r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
sl@0
    73
	asm("mov	r11, r0 ");					// r11->TSubScheduler
sl@0
    74
	asm("ldr	r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));	// r10->CPU local timer
sl@0
    75
sl@0
    76
	asm("start_resched: ");
sl@0
    77
	asm("movs	r1, r1, lsr #16 ");			// check if IDFCs or ExIDFCs pending
sl@0
    78
sl@0
    79
	asm("blne "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
sl@0
    80
	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
    81
	asm("ldr	r3, [sp, #0] ");
sl@0
    82
	asm("mrs	r2, spsr ");				// r2=spsr_svc
sl@0
    83
	asm("cmp	r1, #0 ");					// check if a reschedule is required
sl@0
    84
	asm("beq	no_resched_needed ");		// branch out if not
sl@0
    85
	__ASM_STI();							// interrupts back on
sl@0
    86
	asm("orr	r3, r3, #1 ");
sl@0
    87
	asm("str	r3, [sp, #0] ");			// set resched flag
sl@0
    88
	asm("stmfd	sp!, {r0,r2} ");			// store SPSR_SVC
sl@0
    89
	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
    90
#ifdef __CPU_ARMV7
sl@0
    91
	asm("mrc	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
sl@0
    92
#else
sl@0
    93
	asm("mov	r7, #0 ");
sl@0
    94
#endif
sl@0
    95
	GET_RWRO_TID(,r8);						// r8 = User RO Thread ID
sl@0
    96
	GET_RWRW_TID(,r9);						// r9 = User RW Thread ID
sl@0
    97
#ifdef __CPU_HAS_VFP
sl@0
    98
	VFP_FMRX(,0,VFP_XREG_FPEXC);			// r0 = FPEXC
sl@0
    99
	asm("bic r0, r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Store FPEXC with VFP disabled in case this thread runs on a different core next time
sl@0
   100
#else
sl@0
   101
	asm("mov	r0, #0 ");
sl@0
   102
#endif
sl@0
   103
	GET_CAR(,	r1);						// r1 = CAR
sl@0
   104
	asm("mrc	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
sl@0
   105
	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
sl@0
   106
sl@0
   107
	// Save auxiliary registers
sl@0
   108
	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
sl@0
   109
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));
sl@0
   110
	asm("str	sp, [r5, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// store original thread's stack pointer
sl@0
   111
	asm("stmia	sp, {r0-r1,r7-r9,r12} ");
sl@0
   112
sl@0
   113
	// We must move to a temporary stack before selecting the next thread.
sl@0
   114
	// This is because another CPU may begin executing this thread before the
sl@0
   115
	// select_next_thread() function returns and our stack would then be
sl@0
   116
	// corrupted. We use the stack belonging to this CPU's initial thread since
sl@0
   117
	// we are guaranteed that will never run on another CPU.
sl@0
   118
	asm("ldr	sp, [r4, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
sl@0
   119
sl@0
   120
	asm("select_thread: ");
sl@0
   121
	asm("mov	r0, r11 ");
sl@0
   122
	asm("bl "	CSM_ZN13TSubScheduler16SelectNextThreadEv );	// also sets r0->iCurrentThread
sl@0
   123
#ifdef BTRACE_CPU_USAGE
sl@0
   124
	asm("ldr	r2, __BTraceFilter ");
sl@0
   125
#endif
sl@0
   126
	asm("movs	r3, r0 ");					// r3 = new thread (might be 0)
sl@0
   127
	asm("ldrne	sp, [r0, #%a0]" : : "i" _FOFF(NThread,iSavedSP));	// if a thread has been selected, move to its stack
sl@0
   128
	asm("beq	no_thread ");				// branch out if no thread is ready
sl@0
   129
sl@0
   130
#ifdef BTRACE_CPU_USAGE
sl@0
   131
	asm("ldrb	r1, [r2, #4] ");			// check category 4 trace
sl@0
   132
	asm("cmp	r1, #0 ");
sl@0
   133
	asm("beq	1f ");
sl@0
   134
	asm("stmfd	sp!, {r0-r3} ");
sl@0
   135
	asm("bl		NewThreadTrace ");
sl@0
   136
	asm("ldmfd	sp!, {r0-r3} ");
sl@0
   137
	asm("1: ");
sl@0
   138
#endif	// BTRACE_CPU_USAGE
sl@0
   139
sl@0
   140
	asm("cmp	r3, r5 ");					// same thread?
sl@0
   141
	asm("beq	same_thread ");
sl@0
   142
	asm("ldrb	r1, [r3, #%a0]" : : "i" _FOFF(NThreadBase, i_ThrdAttr));
sl@0
   143
	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
sl@0
   144
	asm("mov	r2, r3, lsr #6 ");			// r2=current thread>>6
sl@0
   145
	asm("tst	r1, #%a0" : : "i" ((TInt)KThreadAttAddressSpace));	// address space required?
sl@0
   146
	asm("ldrne	r4, [r4, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler));	// if so, get pointer to process handler
sl@0
   147
sl@0
   148
	// we are doing a thread switch so restore new thread's auxiliary registers
sl@0
   149
	// R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
sl@0
   150
	asm("ldmia	sp, {r0-r1,r7-r9,r12} ");
sl@0
   151
sl@0
   152
#ifdef __CPU_ARMV7
sl@0
   153
	asm("mcr	p14, 6, r7, c1, c0, 0 ");	// r7 = TEEHBR
sl@0
   154
#endif
sl@0
   155
	SET_RWRO_TID(,r8);						// r8 = User RO Thread ID
sl@0
   156
	SET_RWRW_TID(,r9);						// r9 = User RW Thread ID
sl@0
   157
#ifdef __CPU_HAS_VFP
sl@0
   158
	VFP_FMXR(,VFP_XREG_FPEXC,0);			// r0 = FPEXC
sl@0
   159
#endif
sl@0
   160
	SET_CAR(,	r1);						// r1 = CAR
sl@0
   161
	asm("mcr	p15, 0, r12, c3, c0, 0 ");	// r12 = DACR
sl@0
   162
sl@0
   163
	asm("beq	no_as_switch ");			// skip if address space change not required
sl@0
   164
sl@0
   165
	// Do address space switching
sl@0
   166
	// Handler called with:
sl@0
   167
	// r11->subscheduler, r3->current thread
sl@0
   168
	// r9->new address space, r5->old address space
sl@0
   169
	// Must preserve r10,r11,r3, can modify other registers
sl@0
   170
	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iAddressSpace));	// get current address space ptr
sl@0
   171
	asm("ldr	r9, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iAddressSpace));		// get new address space ptr
sl@0
   172
	asm("adr	lr, as_switch_return ");
sl@0
   173
	__JUMP(,	r4);
sl@0
   174
sl@0
   175
	asm("no_as_switch: ");
sl@0
   176
	asm("mrc	p15, 0, r4, c13, c0, 1 ");	// r4 = CONTEXTID (threadID:ASID)
sl@0
   177
	asm("and	r4, r4, #0xff ");			// isolate ASID
sl@0
   178
	asm("orr	r2, r4, r2, lsl #8 ");		// r2 = new ContextID (new thread ID : ASID)
sl@0
   179
	__DATA_SYNC_BARRIER_Z__(r12);			// needed before change to ContextID
sl@0
   180
	asm("mcr	p15, 0, r2, c13, c0, 1 ");	// set ContextID (ASID + debugging thread ID)
sl@0
   181
	__INST_SYNC_BARRIER__(r12);
sl@0
   182
#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
sl@0
   183
	asm("mcr	p15, 0, r12, c7, c5, 6 ");	// flush BTAC
sl@0
   184
#endif
sl@0
   185
sl@0
   186
	asm("as_switch_return: ");
sl@0
   187
	asm("same_thread: ");
sl@0
   188
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));	// step past auxiliary registers
sl@0
   189
 	asm("ldmib	sp!, {r2,r12} ");			// r2=SPSR_SVC, r12=original SP + resched flag
sl@0
   190
	__ASM_CLI();							// interrupts off
sl@0
   191
	asm("ldr	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
   192
	asm("msr	spsr, r2 ");				// restore spsr_svc
sl@0
   193
	asm("mov	r0, r11 ");
sl@0
   194
	asm("mov	r2, r12 ");					// r2 = original SP + reschedule flag
sl@0
   195
	asm("cmp	r1, #0 ");					// check for more IDFCs and/or another reschedule
sl@0
   196
	asm("bne	start_resched ");			// loop if required
sl@0
   197
	asm("ldr	r14, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
sl@0
   198
	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   199
	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   200
	asm("cmp	r14, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
sl@0
   201
	asm("ldr	lr, [sp, #4] ");			// restore R10, R11, return address
sl@0
   202
	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
sl@0
   203
	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
sl@0
   204
	asm("beq	resched_thread_divert ");
sl@0
   205
sl@0
   206
	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
sl@0
   207
	//				R12=iReschedIPIs
sl@0
   208
	__JUMP(,	lr);
sl@0
   209
sl@0
   210
	asm("no_resched_needed: ");
sl@0
   211
	asm("ldr	r3, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   212
	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   213
	asm("mov	r0, r11 ");
sl@0
   214
	asm("str	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   215
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
sl@0
   216
	asm("cmp	r2, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
sl@0
   217
	asm("ldmfd	sp, {r2,lr} ");				// r2 = original SP + reschedule flag, restore lr
sl@0
   218
	asm("bic	sp, r2, #3 ");				// restore initial unaligned stack pointer
sl@0
   219
	asm("and	r2, r2, #1 ");				// r2 = reschedule flag
sl@0
   220
	asm("beq	resched_thread_divert ");
sl@0
   221
sl@0
   222
	// Return with:	R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
sl@0
   223
	//				R12=iReschedIPIs
sl@0
   224
	__JUMP(,	lr);
sl@0
   225
sl@0
   226
	asm("resched_thread_divert: ");
sl@0
   227
	asm("mov	r1, #1 ");
sl@0
   228
	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   229
	asm("bic	sp, sp, #4 ");				// align stack
sl@0
   230
	asm("stmfd	sp!, {r0-r5,r12,lr} ");		// save registers for diagnostic purposes
sl@0
   231
	asm("mov	r4, r3 ");					// don't really need to bother about registers since thread is exiting
sl@0
   232
sl@0
   233
	// need to send any outstanding reschedule IPIs
sl@0
   234
	asm("cmp	r12, #0 ");
sl@0
   235
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   236
sl@0
   237
	__ASM_STI();
sl@0
   238
	asm("ldrb	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
sl@0
   239
	asm("cmp	r1, #1 ");
sl@0
   240
	asm("bne	1f ");
sl@0
   241
	__ASM_CRASH();
sl@0
   242
	asm("1: ");
sl@0
   243
	asm("mov	r2, #0 ");
sl@0
   244
	asm("strb	r2, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
sl@0
   245
	asm("mov	r0, r4 ");
sl@0
   246
	asm("bl "	CSM_ZN11NThreadBase4ExitEv );
sl@0
   247
	__ASM_CRASH();	// shouldn't get here
sl@0
   248
sl@0
   249
	// There is no thread ready to run
sl@0
   250
	// R11->TSubScheduler, R1=unknown, R2=0, R3=__BTraceFilter, R12=unknown
sl@0
   251
	asm("no_thread: ");
sl@0
   252
	__ASM_CLI();
sl@0
   253
	asm("ldr	r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   254
	asm("mov	r0, r11 ");
sl@0
   255
	asm("cmp	r12, #0 ");
sl@0
   256
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   257
	__ASM_STI();
sl@0
   258
	__DATA_SYNC_BARRIER_Z__(r1);
sl@0
   259
	ARM_WFI;
sl@0
   260
	asm("no_thread2: ");
sl@0
   261
	asm("ldr	r1, [r11, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3));	// check iDfcPendingFlag and iExIDfcPendingFlag
sl@0
   262
	asm("mov	r0, r11 ");
sl@0
   263
	asm("movs	r1, r1, lsr #16 ");
sl@0
   264
	asm("beq	no_thread ");
sl@0
   265
	asm("bl "	CSM_ZN13TSubScheduler9QueueDfcsEv);		// queue any pending DFCs
sl@0
   266
	asm("ldrb	r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
   267
	asm("cmp	r1, #0 ");					// check if a reschedule is required
sl@0
   268
	asm("beq	no_thread2 ");
sl@0
   269
	asm("b		select_thread ");
sl@0
   270
sl@0
   271
sl@0
   272
sl@0
   273
/******************************************************************************
sl@0
   274
 Missed out stuff:
sl@0
   275
	EMI EVENT LOGGING
sl@0
   276
	__CPU_ARM1136_ERRATUM_351912_FIXED
sl@0
   277
	Debug hooks in the scheduler
sl@0
   278
 ******************************************************************************/
sl@0
   279
sl@0
   280
	asm("__BTraceFilter: ");
sl@0
   281
	asm(".word %a0 " : : "i" ((TInt)&BTraceData.iFilter[0]));
sl@0
   282
	};
sl@0
   283
sl@0
   284
sl@0
   285
/** 
sl@0
   286
 * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
sl@0
   287
 * 
sl@0
   288
 * @param aStart Set to the lowest memory address which needs to be modified.
sl@0
   289
 * @param aEnd   Set to the highest memory address +1 which needs to be modified.
sl@0
   290
sl@0
   291
 @pre	Kernel must be locked.
sl@0
   292
 @pre	Call in a thread context.
sl@0
   293
 @pre	Interrupts must be enabled.
sl@0
   294
 */
sl@0
   295
EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
sl@0
   296
	{
sl@0
   297
#if 0
sl@0
   298
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
sl@0
   299
#ifdef __DEBUGGER_SUPPORT__
sl@0
   300
	asm("adr r2,resched_trampoline_hook_address");
sl@0
   301
	asm("str r2,[r0]");
sl@0
   302
	asm("adr r2,resched_trampoline_hook_address+4");
sl@0
   303
	asm("str r2,[r1]");
sl@0
   304
#else
sl@0
   305
	asm("mov r2,#0");
sl@0
   306
	asm("str r2,[r0]");
sl@0
   307
	asm("str r2,[r1]");
sl@0
   308
#endif
sl@0
   309
#endif
sl@0
   310
	__JUMP(,lr);
sl@0
   311
	};
sl@0
   312
sl@0
   313
sl@0
   314
/** 
sl@0
   315
 * Modifies the scheduler code so that it can call the function set by
sl@0
   316
 * NKern::SetRescheduleCallback().
sl@0
   317
 *
sl@0
   318
 * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
sl@0
   319
sl@0
   320
 @pre	Kernel must be locked.
sl@0
   321
 @pre	Call in a thread context.
sl@0
   322
 @pre	Interrupts must be enabled.
sl@0
   323
 */
sl@0
   324
EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
sl@0
   325
	{
sl@0
   326
#if 0
sl@0
   327
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
sl@0
   328
#ifdef __DEBUGGER_SUPPORT__
sl@0
   329
	asm("adr r0,resched_trampoline_hook_address");
sl@0
   330
	asm("adr r1,resched_trampoline");
sl@0
   331
	asm("sub r1, r1, r0");
sl@0
   332
	asm("sub r1, r1, #8");
sl@0
   333
	asm("mov r1, r1, asr #2");
sl@0
   334
	asm("add r1, r1, #0xea000000");  // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
sl@0
   335
sl@0
   336
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   337
	// These platforms have shadow memory in non-writable page. We cannot use the standard
sl@0
   338
	// Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
sl@0
   339
	// Instead, we'll temporarily disable access permission checking in MMU by switching
sl@0
   340
	// domain#0 into Manager Mode (see Domain Access Control Register).
sl@0
   341
	asm("mrs r12, CPSR ");				// save cpsr setting and ...
sl@0
   342
	CPSIDAIF;							// ...disable interrupts
sl@0
   343
	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
sl@0
   344
	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
sl@0
   345
	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
sl@0
   346
	asm("str r1,[r0]");
sl@0
   347
	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
sl@0
   348
	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
sl@0
   349
#else
sl@0
   350
	asm("str r1,[r0]");
sl@0
   351
#endif
sl@0
   352
sl@0
   353
#endif
sl@0
   354
#endif
sl@0
   355
	__JUMP(,lr);
sl@0
   356
	};
sl@0
   357
sl@0
   358
sl@0
   359
/** 
sl@0
   360
 * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
sl@0
   361
 *
sl@0
   362
 * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
sl@0
   363
sl@0
   364
 @pre	Kernel must be locked.
sl@0
   365
 @pre	Call in a thread context.
sl@0
   366
 @pre	Interrupts must be enabled.
sl@0
   367
 */
sl@0
   368
EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
sl@0
   369
	{
sl@0
   370
#if 0
sl@0
   371
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
sl@0
   372
#ifdef __DEBUGGER_SUPPORT__
sl@0
   373
	asm("adr r0,resched_trampoline_hook_address");
sl@0
   374
	asm("ldr r1,resched_trampoline_unhook_data");
sl@0
   375
sl@0
   376
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   377
	// See comments above in InsertSchedulerHooks
sl@0
   378
	asm("mrs r12, CPSR ");				// save cpsr setting and ...
sl@0
   379
	CPSIDAIF;							// ...disable interrupts
sl@0
   380
	asm("mrc p15, 0, r2, c3, c0, 0 ");	// read DACR
sl@0
   381
	asm("orr r3, r2, #3");				// domain #0 is the first two bits. manager mode is 11b
sl@0
   382
	asm("mcr p15, 0, r3, c3, c0, 0 ");	// write DACR
sl@0
   383
	asm("str r1,[r0]");
sl@0
   384
	asm("mcr p15, 0, r2, c3, c0, 0 ");	// write back the original value of DACR
sl@0
   385
	asm("msr CPSR_cxsf, r12 "); 		// restore cpsr setting (re-enable interrupts)
sl@0
   386
#else
sl@0
   387
	asm("str r1,[r0]");
sl@0
   388
#endif
sl@0
   389
sl@0
   390
#endif
sl@0
   391
#endif
sl@0
   392
	__JUMP(,lr);
sl@0
   393
	};
sl@0
   394
sl@0
   395
sl@0
   396
/** 
sl@0
   397
 * Set the function which is to be called on every thread reschedule.
sl@0
   398
 *
sl@0
   399
 * @param aCallback  Pointer to callback function, or NULL to disable callback.
sl@0
   400
sl@0
   401
 @pre	Kernel must be locked.
sl@0
   402
 @pre	Call in a thread context.
sl@0
   403
 @pre	Interrupts must be enabled.
sl@0
   404
 */
sl@0
   405
EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
sl@0
   406
	{
sl@0
   407
#if 0
sl@0
   408
	ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
sl@0
   409
#ifdef __DEBUGGER_SUPPORT__
sl@0
   410
	asm("ldr r1, __TheScheduler ");
sl@0
   411
	asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
sl@0
   412
#endif
sl@0
   413
#endif
sl@0
   414
	__JUMP(,lr);
sl@0
   415
	};
sl@0
   416
sl@0
   417
sl@0
   418
sl@0
   419
/** Disables interrupts to specified level.
sl@0
   420
sl@0
   421
	Note that if we are not disabling all interrupts we must lock the kernel
sl@0
   422
	here, otherwise a high priority interrupt which is still enabled could
sl@0
   423
	cause a reschedule and the new thread could then reenable interrupts.
sl@0
   424
sl@0
   425
	@param  aLevel Interrupts are disbabled up to and including aLevel.  On ARM,
sl@0
   426
			level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
sl@0
   427
	@return CPU-specific value passed to RestoreInterrupts.
sl@0
   428
sl@0
   429
	@pre 1 <= aLevel <= maximum level (CPU-specific)
sl@0
   430
sl@0
   431
	@see NKern::RestoreInterrupts()
sl@0
   432
 */
sl@0
   433
EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
sl@0
   434
	{
sl@0
   435
#ifdef __FIQ_IS_UNCONTROLLED__
sl@0
   436
	asm("mrs	r1, cpsr ");
sl@0
   437
	asm("cmp	r0, #0 ");
sl@0
   438
	asm("beq	1f ");
sl@0
   439
	__ASM_CLI();
sl@0
   440
	asm("1: ");
sl@0
   441
	asm("and	r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
sl@0
   442
	__JUMP(,	lr);
sl@0
   443
#else
sl@0
   444
	asm("cmp	r0, #1 ");
sl@0
   445
	asm("bhi "	CSM_ZN5NKern20DisableAllInterruptsEv);	// if level>1, disable all
sl@0
   446
	asm("mrs	r2, cpsr ");			// r2=original CPSR
sl@0
   447
	asm("bcc	1f ");					// skip if level=0
sl@0
   448
	__ASM_CLI();						// Disable all interrupts to prevent migration
sl@0
   449
	GET_RWNO_TID(,r12);					// r12 -> TSubScheduler
sl@0
   450
	asm("ldr	r3, [r12, #%a0]!" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   451
	asm("and	r0, r2, #0xc0 ");
sl@0
   452
	asm("cmp	r3, #0 ");				// test if kernel locked
sl@0
   453
	asm("addeq	r3, r3, #1 ");			// if not, lock the kernel
sl@0
   454
	asm("streq	r3, [r12] ");
sl@0
   455
	asm("orreq	r0, r0, #0x80000000 ");	// and set top bit to indicate kernel locked
sl@0
   456
	__ASM_STI2();						// reenable FIQs only
sl@0
   457
	__JUMP(,	lr);
sl@0
   458
	asm("1: ");
sl@0
   459
	asm("and	r0, r2, #0xc0 ");
sl@0
   460
	__JUMP(,	lr);
sl@0
   461
#endif
sl@0
   462
	}
sl@0
   463
sl@0
   464
sl@0
   465
/** Disables all interrupts (e.g. both IRQ and FIQ on ARM). 
sl@0
   466
sl@0
   467
	@return CPU-specific value passed to NKern::RestoreInterrupts().
sl@0
   468
sl@0
   469
	@see NKern::RestoreInterrupts()
sl@0
   470
 */
sl@0
   471
EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
sl@0
   472
	{
sl@0
   473
	asm("mrs r1, cpsr ");
sl@0
   474
	asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));	/* return original CPSR I and F bits */
sl@0
   475
	__ASM_CLI();
sl@0
   476
	__JUMP(,lr);
sl@0
   477
	}
sl@0
   478
sl@0
   479
sl@0
   480
/** Enables all interrupts (e.g. IRQ and FIQ on ARM).
sl@0
   481
sl@0
   482
	This function never unlocks the kernel.  So it must be used
sl@0
   483
	only to complement NKern::DisableAllInterrupts. Never use it
sl@0
   484
	to complement NKern::DisableInterrupts.
sl@0
   485
sl@0
   486
	@see NKern::DisableInterrupts()
sl@0
   487
	@see NKern::DisableAllInterrupts()
sl@0
   488
sl@0
   489
	@internalComponent
sl@0
   490
 */
sl@0
   491
EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
sl@0
   492
	{
sl@0
   493
	__ASM_STI();
sl@0
   494
	__JUMP(,lr);
sl@0
   495
	}
sl@0
   496
sl@0
   497
sl@0
   498
/** Restores interrupts to previous level and unlocks the kernel if it was 
sl@0
   499
	locked when disabling them.
sl@0
   500
sl@0
   501
	@param 	aRestoreData CPU-specific data returned from NKern::DisableInterrupts
sl@0
   502
			or NKern::DisableAllInterrupts specifying the previous interrupt level.
sl@0
   503
sl@0
   504
	@see NKern::DisableInterrupts()
sl@0
   505
	@see NKern::DisableAllInterrupts()
sl@0
   506
 */
sl@0
   507
EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
sl@0
   508
	{
sl@0
   509
	asm("tst r0, r0 ");					// test state of top bit of aLevel
sl@0
   510
	asm("mrs r1, cpsr ");
sl@0
   511
	asm("and r0, r0, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   512
	asm("bic r1, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));
sl@0
   513
	asm("orr r1, r1, r0 ");				// replace I and F bits with those supplied
sl@0
   514
	asm("msr cpsr_c, r1 ");				// flags are unchanged (in particular N)
sl@0
   515
	__JUMP(pl,lr);						// if top bit of aLevel clear, finished
sl@0
   516
sl@0
   517
	// if top bit of aLevel set, fall through to unlock the kernel
sl@0
   518
	}
sl@0
   519
sl@0
   520
sl@0
   521
/**	Unlocks the kernel.
sl@0
   522
sl@0
   523
	Decrements iKernLockCount for current CPU; if it becomes zero and IDFCs
sl@0
   524
	or a reschedule are	pending, calls the scheduler to process them.
sl@0
   525
	Must be called in mode_svc.
sl@0
   526
sl@0
   527
    @pre    Call either in a thread or an IDFC context.
sl@0
   528
    @pre    Do not call from an ISR.
sl@0
   529
 */
sl@0
   530
EXPORT_C __NAKED__ void NKern::Unlock()
sl@0
   531
	{
sl@0
   532
	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
sl@0
   533
sl@0
   534
	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
sl@0
   535
	__ASM_CLI();							// interrupts off
sl@0
   536
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   537
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   538
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
   539
	asm("subs	r3, r1, #1 ");
sl@0
   540
	asm("strne	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   541
	asm("bne	0f ");						// kernel still locked -> return
sl@0
   542
	asm("cmp	r2, #0 ");					// check for DFCs or reschedule
sl@0
   543
	asm("bne	1f ");
sl@0
   544
	asm("cmp	r12, #0 ");					// IPIs outstanding?
sl@0
   545
	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));	// unlock the kernel
sl@0
   546
	asm("bne	2f ");
sl@0
   547
	asm("0: ");
sl@0
   548
	__ASM_STI();							// interrupts back on
sl@0
   549
	__JUMP(,lr);
sl@0
   550
sl@0
   551
	// need to run IDFCs and/or reschedule
sl@0
   552
	asm("1: ");
sl@0
   553
	asm("stmfd	sp!, {r0,r4-r11,lr} ");
sl@0
   554
	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
sl@0
   555
	asm(".global nkern_unlock_resched_return ");
sl@0
   556
	asm("nkern_unlock_resched_return: ");
sl@0
   557
sl@0
   558
	// need to send any outstanding reschedule IPIs
sl@0
   559
	asm("cmp	r12, #0 ");
sl@0
   560
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   561
	asm("ldmfd	sp!, {r0,r4-r11,lr} ");
sl@0
   562
	__ASM_STI();
sl@0
   563
	__JUMP(,lr);
sl@0
   564
sl@0
   565
	asm("2:		");
sl@0
   566
	asm("stmfd	sp!, {r0,lr} ");
sl@0
   567
	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   568
	asm("ldmfd	sp!, {r0,lr} ");
sl@0
   569
	__ASM_STI();
sl@0
   570
	__JUMP(,lr);
sl@0
   571
	}
sl@0
   572
sl@0
   573
sl@0
   574
/**	Locks the kernel.
sl@0
   575
sl@0
   576
	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
sl@0
   577
	and preemption.	Must be called in mode_svc.
sl@0
   578
sl@0
   579
    @pre    Call either in a thread or an IDFC context.
sl@0
   580
    @pre    Do not call from an ISR.
sl@0
   581
 */
sl@0
   582
EXPORT_C __NAKED__ void NKern::Lock()
sl@0
   583
	{
sl@0
   584
	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
sl@0
   585
sl@0
   586
	__ASM_CLI();
sl@0
   587
	GET_RWNO_TID(,r12);
sl@0
   588
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   589
	asm("add r3, r3, #1 ");			// lock the kernel
sl@0
   590
	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   591
	__ASM_STI();
sl@0
   592
	__JUMP(,lr);
sl@0
   593
	}
sl@0
   594
sl@0
   595
sl@0
   596
/**	Locks the kernel and returns a pointer to the current thread.
sl@0
   597
sl@0
   598
	Increments iKernLockCount for the current CPU, thereby deferring IDFCs
sl@0
   599
	and preemption.	Must be called in mode_svc.
sl@0
   600
sl@0
   601
    @pre    Call either in a thread or an IDFC context.
sl@0
   602
    @pre    Do not call from an ISR.
sl@0
   603
 */
sl@0
   604
EXPORT_C __NAKED__ NThread* NKern::LockC()
sl@0
   605
	{
sl@0
   606
	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
sl@0
   607
sl@0
   608
	__ASM_CLI();
sl@0
   609
	GET_RWNO_TID(,r12);
sl@0
   610
	asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   611
	asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   612
	asm("add r3, r3, #1 ");			// lock the kernel
sl@0
   613
	asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   614
	__ASM_STI();
sl@0
   615
	__JUMP(,lr);
sl@0
   616
	}
sl@0
   617
sl@0
   618
sl@0
   619
/**	Allows IDFCs and rescheduling if they are pending.
sl@0
   620
sl@0
   621
	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
sl@0
   622
	calls the scheduler to process the IDFCs and possibly reschedule.
sl@0
   623
	Must be called in mode_svc.
sl@0
   624
sl@0
   625
	@return	Nonzero if a reschedule actually occurred, zero if not.
sl@0
   626
sl@0
   627
    @pre    Call either in a thread or an IDFC context.
sl@0
   628
    @pre    Do not call from an ISR.
sl@0
   629
 */
sl@0
   630
EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
sl@0
   631
	{
sl@0
   632
	ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
sl@0
   633
sl@0
   634
	GET_RWNO_TID(,r0)						// r0=&SubScheduler()
sl@0
   635
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   636
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
   637
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   638
	asm("cmp	r1, #1 ");
sl@0
   639
	asm("bgt	0f ");						// if locked more than once return FALSE
sl@0
   640
	asm("cmp	r2, #0 ");					// locked once and IDFCs/reschedule pending?
sl@0
   641
	asm("bne	1f ");						// skip if so
sl@0
   642
	asm("cmp	r12, #0 ");					// locked once and resched IPIs outstanding?
sl@0
   643
	asm("bne	2f ");						// skip if so
sl@0
   644
	asm("0:		");
sl@0
   645
	asm("mov	r0, #0 ");
sl@0
   646
	__JUMP(,	lr);						// else return FALSE
sl@0
   647
sl@0
   648
	// need to run IDFCs and/or reschedule
sl@0
   649
	asm("1:		");
sl@0
   650
	asm("stmfd	sp!, {r1,r4-r11,lr} ");
sl@0
   651
	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );
sl@0
   652
	asm(".global nkern_preemption_point_resched_return ");
sl@0
   653
	asm("nkern_preemption_point_resched_return: ");
sl@0
   654
	asm("str	r2, [sp] ");
sl@0
   655
	asm("mov	r2, #1 ");
sl@0
   656
	asm("str	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   657
sl@0
   658
	// need to send any outstanding reschedule IPIs
sl@0
   659
	asm("cmp	r12, #0 ");
sl@0
   660
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   661
	asm("ldmfd	sp!, {r0,r4-r11,lr} ");		// return TRUE if reschedule occurred
sl@0
   662
	__ASM_STI();
sl@0
   663
	__JUMP(,	lr);
sl@0
   664
sl@0
   665
	asm("2:		");
sl@0
   666
	asm("stmfd	sp!, {r2,lr} ");
sl@0
   667
	asm("bl "	CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   668
	asm("ldmfd	sp!, {r0,lr} ");			// return TRUE if reschedule occurred
sl@0
   669
	__ASM_STI();
sl@0
   670
	__JUMP(,	lr);
sl@0
   671
	}
sl@0
   672
sl@0
   673
sl@0
   674
#ifdef __CPU_HAS_VFP
sl@0
   675
// Do the actual VFP context save
sl@0
   676
__NAKED__ void VfpContextSave(void*)
sl@0
   677
	{
sl@0
   678
	VFP_FMRX(,1,VFP_XREG_FPEXC);
sl@0
   679
	asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );		// Check to see if VFP in use
sl@0
   680
	__JUMP(eq, lr);											// Return immediately if not
sl@0
   681
	asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX) );		// Check to see if an exception has occurred
sl@0
   682
	asm("beq 1f ");											// Skip ahead if not
sl@0
   683
	asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));
sl@0
   684
	VFP_FMXR(,VFP_XREG_FPEXC,1);							// Reset exception flag
sl@0
   685
	asm("orr r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));	// But store it for later
sl@0
   686
	asm("1: ");
sl@0
   687
sl@0
   688
sl@0
   689
	VFP_FMRX(,2,VFP_XREG_FPSCR);
sl@0
   690
	asm("stmia	r0!, {r2} ");								// Save FPSCR
sl@0
   691
sl@0
   692
#ifndef __VFP_V3
sl@0
   693
	VFP_FMRX(,2,VFP_XREG_FPINST);
sl@0
   694
	VFP_FMRX(,3,VFP_XREG_FPINST2);
sl@0
   695
	asm("stmia	r0!, {r2-r3} ");							// Save FPINST, FPINST2
sl@0
   696
#endif
sl@0
   697
sl@0
   698
	VFP_FSTMIADW(CC_AL,0,0,16);								// Save D0 - D15
sl@0
   699
sl@0
   700
#ifdef __VFP_V3
sl@0
   701
	VFP_FMRX(,2,VFP_XREG_MVFR0);
sl@0
   702
	asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32));	// Check to see if all 32 Advanced SIMD registers are present
sl@0
   703
	asm("beq 0f ");											// Skip ahead if not
sl@0
   704
	GET_CAR(,r2);
sl@0
   705
	asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS));	// Check to see if access to the upper 16 registers is disabled
sl@0
   706
	VFP_FSTMIADW(CC_EQ,0,16,16);							// If not then save D16 - D31
sl@0
   707
#endif
sl@0
   708
sl@0
   709
	asm("0: ");
sl@0
   710
	asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
sl@0
   711
	VFP_FMXR(,VFP_XREG_FPEXC,1);							// Disable VFP
sl@0
   712
sl@0
   713
	__JUMP(,lr);
sl@0
   714
	}
sl@0
   715
#endif
sl@0
   716
sl@0
   717
sl@0
   718
/** Check if the kernel is locked the specified number of times.
sl@0
   719
sl@0
   720
	@param aCount	The number of times the kernel should be locked
sl@0
   721
					If zero, tests if it is locked at all
sl@0
   722
	@return TRUE if the tested condition is true.
sl@0
   723
sl@0
   724
	@internalTechnology
sl@0
   725
*/
sl@0
   726
EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
sl@0
   727
	{
sl@0
   728
	asm("mrs	r12, cpsr ");
sl@0
   729
	__ASM_CLI();
sl@0
   730
	GET_RWNO_TID(,r3);
sl@0
   731
	asm("movs	r1, r0 ");			// r1 = aCount
sl@0
   732
	asm("ldr	r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   733
	asm("moveq	r1, r0 ");			// if aCount=0, aCount=iKernLockCount
sl@0
   734
	asm("cmp	r1, r0 ");			//
sl@0
   735
	asm("movne	r0, #0 ");			// if aCount!=iKernLockCount, return FALSE else return iKernLockCount
sl@0
   736
	asm("msr	cpsr, r12 ");
sl@0
   737
	__JUMP(,lr);
sl@0
   738
	}
sl@0
   739
sl@0
   740
sl@0
   741
// Only call this if thread migration is disabled, i.e.
sl@0
   742
// interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
sl@0
   743
extern "C" __NAKED__ TSubScheduler& SubScheduler()
sl@0
   744
	{
sl@0
   745
	GET_RWNO_TID(,r0);
sl@0
   746
	__JUMP(,lr);
sl@0
   747
	}
sl@0
   748
sl@0
   749
/** Returns the NThread control block for the currently scheduled thread.
sl@0
   750
sl@0
   751
    Note that this is the calling thread if called from a thread context, or the
sl@0
   752
	interrupted thread if called from an interrupt context.
sl@0
   753
	
sl@0
   754
	@return A pointer to the NThread for the currently scheduled thread.
sl@0
   755
	
sl@0
   756
	@pre Call in any context.
sl@0
   757
*/
sl@0
   758
EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
sl@0
   759
	{
sl@0
   760
	asm("mrs	r12, cpsr ");
sl@0
   761
	__ASM_CLI();
sl@0
   762
	GET_RWNO_TID(,r0);
sl@0
   763
	asm("cmp	r0, #0 ");
sl@0
   764
	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   765
	asm("msr	cpsr, r12 ");
sl@0
   766
	__JUMP(,lr);
sl@0
   767
	}
sl@0
   768
sl@0
   769
sl@0
   770
/** Returns the NThread control block for the currently scheduled thread.
sl@0
   771
sl@0
   772
    Note that this is the calling thread if called from a thread context, or the
sl@0
   773
	interrupted thread if called from an interrupt context.
sl@0
   774
	
sl@0
   775
	@return A pointer to the NThread for the currently scheduled thread.
sl@0
   776
	
sl@0
   777
	@pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
sl@0
   778
			disabled or with preemption disabled.
sl@0
   779
*/
sl@0
   780
extern "C" __NAKED__ NThread* NCurrentThreadL()
sl@0
   781
	{
sl@0
   782
	GET_RWNO_TID(,r0);
sl@0
   783
	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   784
	__JUMP(,lr);
sl@0
   785
	}
sl@0
   786
sl@0
   787
sl@0
   788
/** Returns the CPU number of the calling CPU.
sl@0
   789
sl@0
   790
	@return the CPU number of the calling CPU.
sl@0
   791
	
sl@0
   792
	@pre Call in any context.
sl@0
   793
*/
sl@0
   794
EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
sl@0
   795
	{
sl@0
   796
	asm("mrs	r12, cpsr ");
sl@0
   797
	__ASM_CLI();
sl@0
   798
	GET_RWNO_TID(,r0);
sl@0
   799
	asm("cmp	r0, #0 ");
sl@0
   800
	asm("ldrne	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));
sl@0
   801
	asm("msr	cpsr, r12 ");
sl@0
   802
	__JUMP(,lr);
sl@0
   803
	}
sl@0
   804
sl@0
   805
sl@0
   806
/**	Returns the current processor context type (thread, IDFC or interrupt).
sl@0
   807
sl@0
   808
	@return	A value from NKern::TContext enumeration (but never EEscaped).
sl@0
   809
	
sl@0
   810
	@pre	Call in any context.
sl@0
   811
sl@0
   812
	@see	NKern::TContext
sl@0
   813
 */
sl@0
   814
EXPORT_C __NAKED__ TInt NKern::CurrentContext()
sl@0
   815
	{
sl@0
   816
	asm("mrs r1, cpsr ");
sl@0
   817
	__ASM_CLI();							// interrupts off to stop migration
sl@0
   818
	GET_RWNO_TID(,r3);						// r3 = &SubScheduler()
sl@0
   819
	asm("mov r0, #2 ");						// 2 = interrupt
sl@0
   820
	asm("and r2, r1, #0x1f ");				// r1 = mode
sl@0
   821
	asm("cmp r2, #0x13 ");
sl@0
   822
	asm("ldreqb r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iInIDFC));
sl@0
   823
	asm("bne 0f ");							// if not svc, must be interrupt
sl@0
   824
	asm("cmp r0, #0 ");
sl@0
   825
	asm("movne r0, #1 ");					// if iInIDFC, return 1 else return 0
sl@0
   826
	asm("0: ");
sl@0
   827
	asm("msr cpsr, r1 ");					// restore interrupts
sl@0
   828
	__JUMP(,lr);
sl@0
   829
	}
sl@0
   830
sl@0
   831
sl@0
   832
extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
sl@0
   833
	{
sl@0
   834
	__DATA_SYNC_BARRIER_Z__(r3);			// need DSB before sending any IPI
sl@0
   835
	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   836
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   837
	asm("mov	r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
sl@0
   838
	asm("orr	r1, r1, r3, lsl #16 ");
sl@0
   839
	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
sl@0
   840
	__JUMP(,lr);
sl@0
   841
	}
sl@0
   842
sl@0
   843
// Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
sl@0
   844
// Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
sl@0
   845
// Return with R0 unaltered.
sl@0
   846
extern "C" __NAKED__ void send_accumulated_resched_ipis()
sl@0
   847
	{
sl@0
   848
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
sl@0
   849
	asm("mov	r1, #0 ");
sl@0
   850
	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
sl@0
   851
	__DATA_SYNC_BARRIER__(r1);				// need DSB before sending any IPI
sl@0
   852
	asm("mov	r1, r12, lsl #16 ");
sl@0
   853
//	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
sl@0
   854
	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
sl@0
   855
	__JUMP(,lr);
sl@0
   856
	}
sl@0
   857
sl@0
   858
// Send a reschedule IPI to the specified CPU
sl@0
   859
extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
sl@0
   860
	{
sl@0
   861
	GET_RWNO_TID(,r3);
sl@0
   862
	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
sl@0
   863
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   864
	ASM_DEBUG1(SendReschedIPI,r0);
sl@0
   865
	asm("mov	r1, #0x10000 ");
sl@0
   866
	asm("mov	r1, r1, lsl r0 ");	// 0x10000<<aCpu
sl@0
   867
//	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
sl@0
   868
	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
sl@0
   869
	__JUMP(,lr);
sl@0
   870
	}
sl@0
   871
sl@0
   872
// Send a reschedule IPI to the current processor
sl@0
   873
// *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
sl@0
   874
extern "C" __NAKED__ void send_self_resched_ipi()
sl@0
   875
	{
sl@0
   876
	GET_RWNO_TID(,r3);
sl@0
   877
	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
sl@0
   878
	asm("ldr	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   879
	asm("mov	r1, #0x02000000 ");			// target = requesting CPU only
sl@0
   880
//	asm("orr	r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
sl@0
   881
	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPI
sl@0
   882
	__JUMP(,lr);
sl@0
   883
	}
sl@0
   884
sl@0
   885
extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
sl@0
   886
	{
sl@0
   887
	ASM_DEBUG1(SendReschedIPIs,r0);
sl@0
   888
	__DATA_SYNC_BARRIER_Z__(r2);			// need DSB before sending any IPI
sl@0
   889
	asm("cmp	r0, #0 ");		// any bits set in aMask?
sl@0
   890
	GET_RWNO_TID(ne,r3);
sl@0
   891
	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   892
	asm("movne	r0, r0, lsl #16 ");
sl@0
   893
//	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
sl@0
   894
	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
sl@0
   895
	__JUMP(,lr);
sl@0
   896
	}
sl@0
   897
sl@0
   898
sl@0
   899
extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
sl@0
   900
	{
sl@0
   901
	asm("ldr	r1, __TheSubSchedulers ");
sl@0
   902
	asm("mov	r2, #0x10000 ");
sl@0
   903
	asm("mov	r2, r2, lsl r0 ");	// 0x10000<<aCpu
sl@0
   904
	ASM_DEBUG1(SendReschedIPIAndWait,r0);
sl@0
   905
	asm("add	r0, r1, r0, lsl #9 ");	// sizeof(TSubScheduler)=512
sl@0
   906
	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   907
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
sl@0
   908
	__DATA_SYNC_BARRIER_Z__(r1);		// make sure i_IrqCount is read before IPI is sent
sl@0
   909
//	asm("orr	r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));	RESCHED_IPI_VECTOR=0
sl@0
   910
	asm("str	r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs
sl@0
   911
	__DATA_SYNC_BARRIER__(r1);			// make sure IPI has been sent
sl@0
   912
	asm("1: ");
sl@0
   913
	asm("ldrb	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   914
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
sl@0
   915
	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
sl@0
   916
	asm("cmp	r1, #0 ");
sl@0
   917
	asm("beq	0f ");					// iRescheduleNeededFlag not set -> wait
sl@0
   918
	asm("cmp	r2, #0 ");
sl@0
   919
	asm("bge	2f ");					// if other CPU is in an ISR, finish
sl@0
   920
	asm("cmp	r3, r12 ");				// if not, has i_IrqCount changed?
sl@0
   921
	asm("0: ");
sl@0
   922
	ARM_WFEcc(CC_EQ);					// if not, wait for something to happen ...
sl@0
   923
	asm("beq	1b ");					// ... and loop
sl@0
   924
	asm("2: ");
sl@0
   925
	__DATA_MEMORY_BARRIER__(r1);		// make sure subsequent memory accesses don't jump the gun
sl@0
   926
										// guaranteed to observe final thread state after this
sl@0
   927
	__JUMP(,lr);
sl@0
   928
sl@0
   929
	asm("__TheSubSchedulers: ");
sl@0
   930
	asm(".word TheSubSchedulers ");
sl@0
   931
	}
sl@0
   932
sl@0
   933
/*	If the current thread is subject to timeslicing, update its remaining time
sl@0
   934
	from the current CPU's local timer. Don't stop the timer.
sl@0
   935
	If the remaining time is negative, save it as zero.
sl@0
   936
 */
sl@0
   937
__NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
sl@0
   938
	{
sl@0
   939
	asm("ldr	r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
sl@0
   940
	asm("ldrb	r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
sl@0
   941
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
sl@0
   942
	asm("cmp	r3, #0 ");
sl@0
   943
	asm("ble	0f ");					// thread isn't timesliced or timeslice already expired so skip
sl@0
   944
	asm("cmp	r12, #0 ");
sl@0
   945
	asm("bne	0f ");					// initial (i.e. idle) thread, so skip
sl@0
   946
	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
sl@0
   947
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
sl@0
   948
	asm("cmp	r3, #0 ");
sl@0
   949
	asm("movmi	r0, #0 ");				// if timer count is negative, save zero
sl@0
   950
	asm("bmi	1f ");
sl@0
   951
	asm("umull	r0, r3, r12, r3 ");		// scale up to max timer clock
sl@0
   952
	asm("adds	r0, r0, #0x00800000 ");
sl@0
   953
	asm("adcs	r3, r3, #0 ");
sl@0
   954
	asm("mov	r0, r0, lsr #24 ");
sl@0
   955
	asm("orr	r0, r0, r3, lsl #8 ");
sl@0
   956
	asm("1:		");
sl@0
   957
	asm("str	r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
sl@0
   958
	asm("0:		");
sl@0
   959
	__JUMP(,lr);
sl@0
   960
	}
sl@0
   961
sl@0
   962
/*	Update aOld's execution time and set up the timer for aNew
sl@0
   963
	Update this CPU's timestamp value
sl@0
   964
sl@0
   965
	if (!aOld) aOld=iInitialThread
sl@0
   966
	if (!aNew) aNew=iInitialThread
sl@0
   967
	newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
sl@0
   968
	cli()
sl@0
   969
	oldcount = timer count
sl@0
   970
	if (oldcount<=0 || aOld!=aNew)
sl@0
   971
		{
sl@0
   972
		timer count = newcount
sl@0
   973
		elapsed = i_LastTimerSet - oldcount
sl@0
   974
		i_LastTimerSet = newcount
sl@0
   975
		elapsed = elapsed * i_TimerMultI / 2^24
sl@0
   976
		aOld->iTotalCpuTime64 += elapsed
sl@0
   977
		correction = i_TimestampError;
sl@0
   978
		if (correction > i_MaxCorrection)
sl@0
   979
			correction = i_MaxCorrection
sl@0
   980
		else if (correction < -i_MaxCorrection)
sl@0
   981
			correction = -i_MaxCorrection
sl@0
   982
		i_TimestampError -= correction
sl@0
   983
		i_LastTimestamp += elapsed + i_TimerGap - correction
sl@0
   984
		}
sl@0
   985
	sti()
sl@0
   986
 */
sl@0
   987
__NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
sl@0
   988
	{
sl@0
   989
	asm("cmp	r2, #0 ");
sl@0
   990
	asm("ldreq	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
sl@0
   991
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
sl@0
   992
	asm("cmp	r1, #0 ");
sl@0
   993
	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
sl@0
   994
	asm("ldr	r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
sl@0
   995
	asm("stmfd	sp!, {r4-r7} ");
sl@0
   996
	asm("ldr	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
sl@0
   997
	asm("ldr	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
sl@0
   998
	asm("cmp	r1, r2 ");
sl@0
   999
	asm("beq	2f ");
sl@0
  1000
	asm("adds	r6, r6, #1 ");
sl@0
  1001
	asm("str	r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
sl@0
  1002
	asm("ldr	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
sl@0
  1003
	asm("ldr	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
sl@0
  1004
	asm("adcs	r7, r7, #0 ");
sl@0
  1005
	asm("str	r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
sl@0
  1006
	asm("adds	r4, r4, #1 ");
sl@0
  1007
	asm("adcs	r6, r6, #0 ");
sl@0
  1008
	asm("str	r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
sl@0
  1009
	asm("str	r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
sl@0
  1010
	asm("2:		");
sl@0
  1011
	asm("cmp	r3, #1 ");					// aNew->iTime > 0 ?
sl@0
  1012
	asm("umullge r4, r3, r12, r3 ");
sl@0
  1013
	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
sl@0
  1014
	asm("movlt	r3, #0x7fffffff ");
sl@0
  1015
	asm("addges	r3, r3, r4, lsr #31 ");		// round up top 32 bits if bit 31 set
sl@0
  1016
	asm("moveq	r3, #1 ");					// if result zero, limit to 1
sl@0
  1017
	asm("ldr	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
sl@0
  1018
	__ASM_CLI();
sl@0
  1019
	asm("ldr	r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
sl@0
  1020
	asm("cmp	r1, r2 ");
sl@0
  1021
	asm("bne	1f ");
sl@0
  1022
	asm("cmp	r4, #0 ");
sl@0
  1023
	asm("bgt	0f ");						// same thread, timeslice not expired -> leave timer alone
sl@0
  1024
	asm("1:		");
sl@0
  1025
	asm("str	r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));	// set new timeslice value in timer
sl@0
  1026
	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
sl@0
  1027
	asm("str	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
sl@0
  1028
	asm("sub	r12, r12, r4 ");			// r12 = elapsed (actual timer ticks)
sl@0
  1029
	asm("umull	r4, r5, r12, r5 ");
sl@0
  1030
	asm("ldr	r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
sl@0
  1031
	asm("ldr	r12, [r1, #4] ");
sl@0
  1032
	asm("adds	r4, r4, #0x00800000 ");
sl@0
  1033
	asm("adcs	r5, r5, #0 ");
sl@0
  1034
	asm("mov	r4, r4, lsr #24 ");
sl@0
  1035
	asm("orr	r4, r4, r5, lsl #8 ");		// r4 = elapsed
sl@0
  1036
	asm("adds	r3, r3, r4 ");
sl@0
  1037
	asm("adcs	r12, r12, #0 ");
sl@0
  1038
	asm("stmia	r1, {r3,r12} ");			// aOld->iTotalCpuTime64 += elapsed
sl@0
  1039
	asm("ldr	r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
sl@0
  1040
	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
sl@0
  1041
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
sl@0
  1042
	asm("ldr	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
sl@0
  1043
	asm("mov	r12, r3 ");
sl@0
  1044
	asm("cmp	r3, r5 ");
sl@0
  1045
	asm("movgt	r3, r5 ");					// if (correction>i_MaxCorrection) correction=i_MaxCorrection
sl@0
  1046
	asm("cmn	r3, r5 ");
sl@0
  1047
	asm("rsblt	r3, r5, #0 ");				// if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
sl@0
  1048
	asm("ldr	r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
sl@0
  1049
	asm("sub	r12, r12, r3 ");
sl@0
  1050
	asm("str	r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
sl@0
  1051
	asm("add	r4, r4, r5 ");				// r4 = elapsed + i_TimerGap
sl@0
  1052
	asm("adds	r1, r1, r4 ");
sl@0
  1053
	asm("adcs	r2, r2, #0 ");				// iLastTimestamp64 + (elapsed + i_TimerGap)
sl@0
  1054
	asm("subs	r1, r1, r3 ");
sl@0
  1055
	asm("sbcs	r1, r1, r3, asr #32 ");		// iLastTimestamp64 + (elapsed + i_TimerGap - correction)
sl@0
  1056
	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
sl@0
  1057
	asm("str	r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
sl@0
  1058
	asm("0:		");
sl@0
  1059
	__ASM_STI();
sl@0
  1060
	asm("ldmfd	sp!, {r4-r7} ");
sl@0
  1061
	__JUMP(,lr);
sl@0
  1062
	}
sl@0
  1063
sl@0
  1064