os/kernelhwsrv/kernel/eka/nkernsmp/arm/vectors.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\arm\vectors.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <arm.h>
sl@0
    20
#include <arm_gic.h>
sl@0
    21
#include <arm_scu.h>
sl@0
    22
#include <arm_tmr.h>
sl@0
    23
sl@0
    24
void FastMutexNestAttempt();
sl@0
    25
void FastMutexSignalError();
sl@0
    26
extern "C" void ExcFault(TAny*);
sl@0
    27
sl@0
    28
extern "C" void send_accumulated_resched_ipis();
sl@0
    29
sl@0
    30
extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType);
sl@0
    31
sl@0
    32
extern "C" {
sl@0
    33
extern TUint32 CrashStateOut;
sl@0
    34
extern SFullArmRegSet DefaultRegSet;
sl@0
    35
}
sl@0
    36
sl@0
    37
#ifdef BTRACE_CPU_USAGE
sl@0
    38
extern "C" void btrace_irq_exit();
sl@0
    39
extern "C" void btrace_fiq_exit();
sl@0
    40
#endif
sl@0
    41
#ifdef _DEBUG
sl@0
    42
#define __CHECK_LOCK_STATE__
sl@0
    43
#endif
sl@0
    44
sl@0
    45
//#define __FAULT_ON_FIQ__
sl@0
    46
sl@0
    47
#ifdef __CHECK_LOCK_STATE__
sl@0
    48
/******************************************************************************
sl@0
    49
 * Check that the kernel is unlocked, no fast mutex is held and the thread
sl@0
    50
 * is not in a critical section when returning to user mode.
sl@0
    51
 ******************************************************************************/
sl@0
    52
extern "C" __NAKED__ void check_lock_state()
sl@0
    53
	{
sl@0
    54
	GET_RWNO_TID(,r12);
sl@0
    55
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
    56
	asm("cmp	r12, #0 ");
sl@0
    57
	asm("beq	1f ");
sl@0
    58
	__ASM_CRASH();
sl@0
    59
	asm("1:		");
sl@0
    60
	GET_RWNO_TID(,r12);
sl@0
    61
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
    62
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
sl@0
    63
	asm("cmp	r12, #0 ");
sl@0
    64
	asm("beq	2f ");
sl@0
    65
	__ASM_CRASH();
sl@0
    66
	asm("2:		");
sl@0
    67
	GET_RWNO_TID(,r12);
sl@0
    68
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
    69
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(NThread,iCsCount));
sl@0
    70
	asm("cmp	r12, #0 ");
sl@0
    71
	asm("beq	3f ");
sl@0
    72
	__ASM_CRASH();
sl@0
    73
	asm("3:		");
sl@0
    74
	GET_RWNO_TID(,r12);
sl@0
    75
	asm("ldr	r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
    76
	asm("ldrh	r12, [r12, #%a0]" : : "i" _FOFF(NSchedulable,iFreezeCpu));
sl@0
    77
	asm("cmp	r12, #0 ");
sl@0
    78
	asm("beq	4f ");
sl@0
    79
	__ASM_CRASH();
sl@0
    80
	asm("4:		");
sl@0
    81
	__JUMP(,lr);
sl@0
    82
	}
sl@0
    83
#endif
sl@0
    84
sl@0
    85
//#define	__RECORD_STATE__
sl@0
    86
#ifdef __RECORD_STATE__
sl@0
    87
#define RECORD_STATE()				\
sl@0
    88
	asm("ldr r3, [sp, #68] ");		\
sl@0
    89
	asm("mov r1, sp ");				\
sl@0
    90
	asm("bic r12, sp, #0xff ");		\
sl@0
    91
	asm("bic r12, r12, #0xf00 ");	\
sl@0
    92
	asm("add r12, r12, #24 ");		\
sl@0
    93
	asm("tst r3, #0x0f ");			\
sl@0
    94
	asm("addne pc, pc, #12 ");		\
sl@0
    95
	asm("ldmia r1!, {r2-r11} ");	\
sl@0
    96
	asm("stmia r12!, {r2-r11} ");	\
sl@0
    97
	asm("ldmia r1!, {r2-r9} ");		\
sl@0
    98
	asm("stmia r12!, {r2-r9} ")
sl@0
    99
sl@0
   100
#define RECORD_STATE_EXC()			\
sl@0
   101
	asm("ldr r3, [sp, #92] ");		\
sl@0
   102
	asm("mov r12, sp ");			\
sl@0
   103
	asm("bic lr, sp, #0xff ");		\
sl@0
   104
	asm("bic lr, lr, #0xf00 ");		\
sl@0
   105
	asm("tst r3, #0x0f ");			\
sl@0
   106
	asm("addne pc, pc, #12 ");		\
sl@0
   107
	asm("ldmia r12!, {r0-r11} ");	\
sl@0
   108
	asm("stmia lr!, {r0-r11} ");	\
sl@0
   109
	asm("ldmia r12!, {r0-r11} ");	\
sl@0
   110
	asm("stmia lr!, {r0-r11} ");
sl@0
   111
#else
sl@0
   112
#define RECORD_STATE()
sl@0
   113
#define RECORD_STATE_EXC()
sl@0
   114
#endif
sl@0
   115
sl@0
   116
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   117
// This macro can be invoked just before a return-from-exception instruction
sl@0
   118
// It will cause an UNDEF exception if we're about to return to user mode with UMG still on
sl@0
   119
#define	USER_MEMORY_GUARD_CHECK()											\
sl@0
   120
	asm("stmfd	sp!, {lr}");												\
sl@0
   121
	asm("ldr	lr, [sp, #8]");						/* lr<-future CPSR	*/	\
sl@0
   122
	USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(lr);							\
sl@0
   123
	asm("ldmfd	sp!, {lr}");
sl@0
   124
#else
sl@0
   125
#define	USER_MEMORY_GUARD_CHECK()
sl@0
   126
#endif
sl@0
   127
sl@0
   128
/******************************************************************************
sl@0
   129
 * SWI Handler
sl@0
   130
 ******************************************************************************/
sl@0
   131
sl@0
   132
extern "C" __NAKED__ void __ArmVectorSwi()
sl@0
   133
	{
sl@0
   134
	// IRQs disabled, FIQs enabled here
sl@0
   135
	__ASM_CLI();							// all interrupts off
sl@0
   136
	SRSDBW(MODE_SVC);						// save return address and return CPSR to supervisor stack
sl@0
   137
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   138
	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
sl@0
   139
	asm("mov	r4, #%a0" : : "i" ((TInt)SThreadExcStack::ESvc));
sl@0
   140
	USER_MEMORY_GUARD_ON_IF_MODE_USR(r11);
sl@0
   141
	asm("ldr	r12, [lr, #-4] ");			// get SWI opcode
sl@0
   142
	GET_RWNO_TID(,r11);
sl@0
   143
	asm("str	r4, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   144
	asm("movs	r12, r12, lsl #9 ");		// 512*SWI number into r12
sl@0
   145
	asm("adr	lr, fast_swi_exit ");
sl@0
   146
	asm("ldr	r9, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   147
	asm("bcc	slow_swi ");				// bit 23=0 for slow/unprot
sl@0
   148
	asm("mov	r1, r9 ");
sl@0
   149
	asm("beq	wait_for_any_request ");	// special case for Exec::WaitForAnyRequest
sl@0
   150
	asm("ldr	r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
sl@0
   151
	asm("ldr	r3, [r2], r12, lsr #7 ");	// r3=limit, r2->dispatch table entry
sl@0
   152
	asm("ldr	r2, [r2] ");				// r2->kernel function
sl@0
   153
	asm("cmp	r3, r12, lsr #9 ");			// r3-SWI number
sl@0
   154
	__JUMP(hi,	r2);						// if SWI number valid, call kernel function
sl@0
   155
	asm("mvn	r12, #0 ");					// put invalid SWI number into r12
sl@0
   156
	asm("b		slow_swi ");					// go through slow SWI routine to call invalid SWI handler
sl@0
   157
sl@0
   158
#ifndef __FAST_SEM_MACHINE_CODED__
sl@0
   159
	asm("wait_for_any_request: ");
sl@0
   160
	__ASM_STI();							// all interrupts on
sl@0
   161
	asm("b		WaitForAnyRequest__5NKern ");
sl@0
   162
#else
sl@0
   163
	asm(".global exec_wfar_wait ");
sl@0
   164
	asm("exec_wfar_wait: ");
sl@0
   165
	asm("mov	r2, #1 ");
sl@0
   166
	asm("str	r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));			// else lock the kernel
sl@0
   167
	__ASM_STI();
sl@0
   168
	asm("strb	r2, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));	// and set the reschedule flag
sl@0
   169
	asm("bl "	CSM_ZN10TScheduler10RescheduleEv );	// reschedule
sl@0
   170
	asm(".global exec_wfar_resched_return ");
sl@0
   171
	asm("exec_wfar_resched_return: ");
sl@0
   172
	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
sl@0
   173
	asm("mov	r9, r3 ");
sl@0
   174
sl@0
   175
	// need to send any outstanding reschedule IPIs
sl@0
   176
	asm("cmp	r12, #0 ");
sl@0
   177
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   178
sl@0
   179
	asm(".global exec_wfar_finish ");
sl@0
   180
	asm("exec_wfar_finish: ");
sl@0
   181
	asm("mrs	r1, spsr ");
sl@0
   182
	asm("tst	r1, #0x0f ");
sl@0
   183
	asm("bne	fast_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
sl@0
   184
											// and the UMG was not changed on entry so we don't reset it
sl@0
   185
sl@0
   186
#ifdef __CHECK_LOCK_STATE__
sl@0
   187
	asm("bl "	CSM_CFUNC(check_lock_state));
sl@0
   188
#endif
sl@0
   189
	asm("cmp	r4, #3 ");					// callbacks?
sl@0
   190
	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
sl@0
   191
	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
sl@0
   192
	asm("b		fast_swi_exit2 ");
sl@0
   193
#endif
sl@0
   194
sl@0
   195
	asm("fast_swi_exit: ");
sl@0
   196
#if defined(__CHECK_LOCK_STATE__) || defined(__USER_MEMORY_GUARDS_ENABLED__)
sl@0
   197
	asm("mrs	r12, spsr ");
sl@0
   198
	asm("tst	r12, #0x0f ");
sl@0
   199
	asm("bne	fast_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
sl@0
   200
											// and the UMG was not changed on entry so we don't restore it
sl@0
   201
#ifdef __CHECK_LOCK_STATE__
sl@0
   202
	asm("bl "	CSM_CFUNC(check_lock_state));
sl@0
   203
#endif
sl@0
   204
	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
sl@0
   205
#endif
sl@0
   206
sl@0
   207
	asm("fast_swi_exit2: ");
sl@0
   208
	RECORD_STATE();
sl@0
   209
	asm("ldmib	sp, {r1-r14}^ ");			// restore R1-R12, R13_usr, R14_usr
sl@0
   210
	asm("nop ");							// don't access banked register immediately after
sl@0
   211
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   212
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   213
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   214
sl@0
   215
sl@0
   216
	asm("slow_swi: ");						// IRQs and FIQs off here
sl@0
   217
	__ASM_STI();							// all interrupts on
sl@0
   218
	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
sl@0
   219
	asm("mrs	r11, spsr ");				// spsr_svc into r11
sl@0
   220
	asm("adr	lr, slow_swi_exit ");
sl@0
   221
	asm("ldr	r5, [r4, #-12] ");			// r5=limit
sl@0
   222
	asm("add	r6, r4, r12, lsr #6 ");		// r6->dispatch table entry
sl@0
   223
	asm("cmp	r5, r12, lsr #9 ");			// r5-SWI number
sl@0
   224
	asm("ldmhiia r6, {r5,r6} ");			// if SWI number OK, flags into r5, function addr into r6
sl@0
   225
	asm("ldrls	pc, [r4, #-8] ");			// if SWI number invalid, call invalid handler, returning to slow_swi_exit below
sl@0
   226
sl@0
   227
	// Acquire system lock if necessary: warning - any scratch registers modified after __ArmVectorSwi()
sl@0
   228
	// function preamble will be restored after call to NKern::LockSystem() with stale values.
sl@0
   229
	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagClaim));	// claim system lock?
sl@0
   230
	asm("beq	slow_swi_no_wait ");						// skip if not
sl@0
   231
	asm("bl "	CSM_ZN5NKern10LockSystemEv );
sl@0
   232
	asm("ldmia	sp, {r0-r3} ");								// reload original values
sl@0
   233
	asm("slow_swi_no_wait: ");
sl@0
   234
sl@0
   235
	// Check to see if extra arguments are needed.  Needs to be placed after call to NKern::LockSystem()
sl@0
   236
	// above, as r2 is reloaded with its original value by the ldmia instruction above
sl@0
   237
	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask));	// extra arguments needed?
sl@0
   238
	asm("addne	r2, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR4));	// if so, point r2 at saved registers on stack
sl@0
   239
sl@0
   240
	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess));	// preprocess (handle lookup)? can use r4, r7, r8, r12, r0
sl@0
   241
	asm("mov	lr, pc ");
sl@0
   242
	asm("ldrne	pc, [r4, #-4] ");			// call preprocess handler if required
sl@0
   243
	asm("orr	r5, r9, r5, lsr #30 ");		// r5 = current NThread pointer with bits 0,1 = (flags & (KExecFlagRelease|KExecFlagClaim))>>30
sl@0
   244
	asm("mov	lr, pc ");
sl@0
   245
	__JUMP(,	r6);						// call exec function, preserve r5,r11
sl@0
   246
	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR0));	// save return value
sl@0
   247
	asm("bic	r9, r5, #3 ");				// r9 = current NThread pointer
sl@0
   248
	asm("tst	r5, #%a0" : : "i" ((TInt)KExecFlagRelease>>30));		// release system lock?
sl@0
   249
	asm("blne "	CSM_ZN5NKern12UnlockSystemEv );
sl@0
   250
sl@0
   251
	asm("slow_swi_exit: ");
sl@0
   252
	__ASM_CLI();
sl@0
   253
	asm("ldr	r4, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
sl@0
   254
	asm("tst	r11, #0x0f ");				// returning to user mode?
sl@0
   255
	asm("bne	slow_swi_exit2 ");			// not returning to user mode; in this case we don't run callbacks
sl@0
   256
											// and the UMG was not changed on entry so we don't reset it
sl@0
   257
sl@0
   258
#ifdef __CHECK_LOCK_STATE__
sl@0
   259
	asm("bl "	CSM_CFUNC(check_lock_state));
sl@0
   260
#endif
sl@0
   261
	asm("cmp	r4, #3 ");					// callbacks?
sl@0
   262
	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
sl@0
   263
	USER_MEMORY_GUARD_OFF(,r12,r12);		// because we're returning to user mode
sl@0
   264
sl@0
   265
	asm("slow_swi_exit2: ");
sl@0
   266
	RECORD_STATE();
sl@0
   267
	asm("ldmia	sp, {r0-r14}^ ");			// R0=return value, restore R1-R12, R13_usr, R14_usr
sl@0
   268
	asm("nop ");							// don't access banked register immediately after
sl@0
   269
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   270
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   271
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   272
	}
sl@0
   273
sl@0
   274
sl@0
   275
/******************************************************************************
sl@0
   276
 * IRQ Postamble
sl@0
   277
 * This routine is called after the IRQ has been dispatched
sl@0
   278
 * Enter in mode_sys
sl@0
   279
 * R4->TSubScheduler, R6->GIC CPU interface
sl@0
   280
 * For nested IRQ, R0-R12, R14_sys, return address, return CPSR are on top
sl@0
   281
 *	 of the mode_sys (i.e. current) stack
sl@0
   282
 * For non-nested IRQ, registers are saved on top of mode_svc stack and
sl@0
   283
 *	 pointed to by R5 in the order:
sl@0
   284
 *	 R5->R0 ... R12 R13_usr R14_usr <spare> PC CPSR
sl@0
   285
 *   and if user memory guards are active, R8 = saved DACR
sl@0
   286
 ******************************************************************************/
sl@0
   287
sl@0
   288
extern "C" __NAKED__ void __ArmVectorIrq()
sl@0
   289
	{
sl@0
   290
	// Interrupts may be enabled here
sl@0
   291
#ifdef BTRACE_CPU_USAGE
sl@0
   292
	asm("ldr	r10, __BTraceCpuUsageFilter ");
sl@0
   293
#endif
sl@0
   294
	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
sl@0
   295
	asm("ldrb	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iEventHandlersPending));
sl@0
   296
	__DATA_MEMORY_BARRIER_Z__(r2);
sl@0
   297
#ifdef BTRACE_CPU_USAGE
sl@0
   298
	asm("ldrb	r10, [r10] ");
sl@0
   299
#endif
sl@0
   300
	asm("subs	r7, r7, #1 ");
sl@0
   301
	asm("bpl	nested_irq_exit ");
sl@0
   302
	asm("cmp	r0, #0 ");
sl@0
   303
	asm("beq	no_event_handlers ");
sl@0
   304
	asm("mov	r0, r4 ");
sl@0
   305
	asm("bl		run_event_handlers ");
sl@0
   306
sl@0
   307
	asm("no_event_handlers: ");
sl@0
   308
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   309
	asm("mov	r11, r8 ");
sl@0
   310
#endif
sl@0
   311
	asm("ldr	r8, [r5, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));	// r8 = interrupted cpsr
sl@0
   312
	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   313
	__ASM_CLI();							// all interrupts off
sl@0
   314
	asm("and	r2, r8, #0x1f ");
sl@0
   315
	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   316
	asm("cmp	r2, #0x10 ");				// interrupted mode_usr ?
sl@0
   317
	asm("cmpne	r2, #0x13 ");				// if not, interrupted mode_svc ?
sl@0
   318
	asm("cmpeq	r0, #0 ");					// if mode_usr or mode_svc, is kernel locked?
sl@0
   319
	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
sl@0
   320
	asm("bne	irq_kernel_locked_exit ");	// if neither or if kernel locked, exit immediately
sl@0
   321
	asm("cmp	r1, #0 ");					// If not, IDFCs/reschedule pending?
sl@0
   322
	asm("beq	irq_kernel_locked_exit ");	// if not, exit
sl@0
   323
sl@0
   324
	asm("mov	r1, #1 ");
sl@0
   325
	asm("str	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
sl@0
   326
	__ASM_STI_MODE(MODE_SVC);				// mode_svc, interrupts on
sl@0
   327
sl@0
   328
	// Saved registers are on top of mode_svc stack
sl@0
   329
	// reschedule - this also switches context if necessary
sl@0
   330
	// enter this function in mode_svc, interrupts on, kernel locked
sl@0
   331
	// exit this function in mode_svc, all interrupts off, kernel unlocked
sl@0
   332
	asm("irq_do_resched: ");
sl@0
   333
	asm("stmfd	sp!, {r11,lr} ");			// save user memory guard state, lr_svc
sl@0
   334
	asm("bl "	CSM_ZN10TScheduler10RescheduleEv);	// return with R3->current thread
sl@0
   335
	asm(".global irq_resched_return ");
sl@0
   336
	asm("irq_resched_return: ");
sl@0
   337
sl@0
   338
	asm("ldr	r8, [sp, #%a0]" : : "i" (_FOFF(SThreadExcStack,iCPSR)+8));		// have UMG, lr_svc on stack as well
sl@0
   339
	asm("ldr	r4, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
sl@0
   340
	asm("mov	r9, r3 ");
sl@0
   341
sl@0
   342
	// need to send any outstanding reschedule IPIs
sl@0
   343
	asm("cmp	r12, #0 ");
sl@0
   344
	asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
sl@0
   345
	asm("tst	r8, #0x0f ");				// returning to user mode?
sl@0
   346
	asm("bne	irq_post_resched_exit ");	// if not, we don't check locks or run callbacks
sl@0
   347
sl@0
   348
#ifdef __CHECK_LOCK_STATE__
sl@0
   349
	asm("bl "	CSM_CFUNC(check_lock_state));
sl@0
   350
#endif
sl@0
   351
	asm("cmp	r4, #3 ");					// callbacks?
sl@0
   352
	asm("blhs	run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
sl@0
   353
sl@0
   354
	asm("irq_post_resched_exit: ");
sl@0
   355
	asm("ldmfd	sp!, {r11,lr} ");			// restore UMG, lr_svc
sl@0
   356
	USER_MEMORY_GUARD_RESTORE(r11,r12);
sl@0
   357
	RECORD_STATE();
sl@0
   358
	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
sl@0
   359
	asm("nop ");							// don't access banked register immediately after
sl@0
   360
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   361
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   362
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   363
sl@0
   364
	asm("irq_kernel_locked_exit: ");
sl@0
   365
#ifdef __CHECK_LOCK_STATE__
sl@0
   366
	asm("tst	r8, #0x0f ");
sl@0
   367
	asm("bleq " CSM_CFUNC(check_lock_state));
sl@0
   368
#endif
sl@0
   369
#ifdef BTRACE_CPU_USAGE
sl@0
   370
	asm("cmp	r10, #0 ");
sl@0
   371
	asm("blne	btrace_irq_exit ");
sl@0
   372
#endif
sl@0
   373
	USER_MEMORY_GUARD_RESTORE(r11,r12);
sl@0
   374
	__ASM_CLI_MODE(MODE_SVC);				// mode_svc, interrupts off
sl@0
   375
	RECORD_STATE();
sl@0
   376
	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
sl@0
   377
	asm("nop ");							// don't access banked register immediately after
sl@0
   378
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   379
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   380
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   381
sl@0
   382
	asm("nested_irq_exit: ");
sl@0
   383
	__ASM_CLI1();
sl@0
   384
	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
sl@0
   385
#ifdef BTRACE_CPU_USAGE
sl@0
   386
	asm("cmp	r10, #0 ");
sl@0
   387
	asm("blne	btrace_irq_exit ");
sl@0
   388
#endif
sl@0
   389
	asm("ldmia	sp!, {r0-r12,r14} ");		// restore r0-r12, r14_sys
sl@0
   390
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   391
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   392
sl@0
   393
	asm("__BTraceCpuUsageFilter: ");
sl@0
   394
	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
sl@0
   395
	}
sl@0
   396
sl@0
   397
sl@0
   398
/******************************************************************************
sl@0
   399
 * FIQ Postamble
sl@0
   400
 * This routine is called after the FIQ has been dispatched
sl@0
   401
 * spsr_fiq, r0-r3 are unmodified
sl@0
   402
 * Return address is on the top of the FIQ stack -- except that if user memory
sl@0
   403
 * guards are in use, the saved DACR was pushed afterwards, so that's on top
sl@0
   404
 * of the stack and the return address is next
sl@0
   405
 ******************************************************************************/
sl@0
   406
sl@0
   407
extern "C" __NAKED__ void __ArmVectorFiq()
sl@0
   408
	{
sl@0
   409
#ifdef __FAULT_ON_FIQ__
sl@0
   410
	asm(".word 0xe7f10f10 ");
sl@0
   411
#endif
sl@0
   412
	// IRQs and FIQs disabled here
sl@0
   413
	// r0-r7 are unaltered from when FIQ occurred
sl@0
   414
	GET_RWNO_TID(,r9);
sl@0
   415
	asm("mrs	r8, spsr ");				// check interrupted mode
sl@0
   416
	asm("and	r10, r8, #0x1f ");
sl@0
   417
	asm("cmp	r10, #0x10 ");				// check for mode_usr
sl@0
   418
	asm("ldr	r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   419
	asm("cmpne	r10, #0x13 ");				// or mode_svc
sl@0
   420
	asm("ldreq	r10, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   421
	asm("cmpeq	r11, #0 ");					// and check if kernel locked
sl@0
   422
	asm("bne	FiqExit0 ");				// if wrong mode or kernel locked, return immediately
sl@0
   423
	asm("cmp	r10, #0 ");					// check if reschedule needed
sl@0
   424
	asm("beq	FiqExit0 ");				// if not, return from interrupt
sl@0
   425
sl@0
   426
	// we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed
sl@0
   427
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   428
	asm("ldr	r8, [sp], #4 ");			// r8_fiq = UMG state
sl@0
   429
#endif
sl@0
   430
	asm("ldr	r14, [sp], #4 ");			// r14_fiq = return address
sl@0
   431
	asm("add	r11, r11, #1 ");
sl@0
   432
	asm("str	r11, [r9, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));	// lock the kernel
sl@0
   433
	SRSDBW(MODE_SVC);						// save return address and return CPSR to supervisor stack
sl@0
   434
	CPSCHM(MODE_SVC);						// switch to mode_svc, all interrupts off
sl@0
   435
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   436
	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
sl@0
   437
	asm("mov	r0, #%a0" : : "i" ((TInt)SThreadExcStack::EFiq));
sl@0
   438
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   439
	CPSCHM(MODE_FIQ);						// back to mode_fiq, all interrupts off
sl@0
   440
	asm("mov	r1, r8 ");					// retrieve UMG state
sl@0
   441
	CPSCHM(MODE_SVC);						// switch to mode_svc, all interrupts off
sl@0
   442
	asm("mov	r11, r1 ");					// UMG state into R11
sl@0
   443
#endif
sl@0
   444
	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   445
	__ASM_STI();							// interrupts on
sl@0
   446
	asm("b		irq_do_resched ");			// do reschedule and return from interrupt
sl@0
   447
sl@0
   448
	asm("FiqExit0: ");
sl@0
   449
#ifdef BTRACE_CPU_USAGE
sl@0
   450
	asm("ldr	r8, __BTraceCpuUsageFilter ");
sl@0
   451
	asm("ldrb	r8, [r8] ");
sl@0
   452
	asm("cmp	r8, #0 ");
sl@0
   453
	asm("beq	1f ");
sl@0
   454
	asm("stmfd	sp!, {r0-r3} ");
sl@0
   455
	asm("bl		btrace_fiq_exit ");
sl@0
   456
	asm("ldmfd	sp!, {r0-r3} ");
sl@0
   457
	asm("1: ");
sl@0
   458
#endif
sl@0
   459
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   460
	asm("ldr	r8, [sp], #4 ");
sl@0
   461
#endif
sl@0
   462
	USER_MEMORY_GUARD_RESTORE(r8,r12);
sl@0
   463
	asm("ldmfd	sp!, {pc}^ ");				// return from interrupt
sl@0
   464
sl@0
   465
	asm("__TheScheduler: ");
sl@0
   466
	asm(".word TheScheduler ");
sl@0
   467
	}
sl@0
   468
sl@0
   469
sl@0
   470
/******************************************************************************
sl@0
   471
 * Abort handler
sl@0
   472
 * This routine is called in response to a data abort, prefetch abort or
sl@0
   473
 * undefined instruction exception.
sl@0
   474
 ******************************************************************************/
sl@0
   475
sl@0
   476
extern "C" __NAKED__ void __ArmVectorAbortData()
sl@0
   477
	{
sl@0
   478
	__ASM_CLI();							// disable all interrupts
sl@0
   479
	asm("sub	lr, lr, #8 ");				// lr now points to aborted instruction
sl@0
   480
	SRSDBW(		MODE_ABT);					// save it along with aborted CPSR
sl@0
   481
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   482
	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
sl@0
   483
	GET_RWNO_TID(,r11);
sl@0
   484
	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort));
sl@0
   485
	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   486
sl@0
   487
	asm("handle_exception: ");
sl@0
   488
	// We are in exception mode (abt/und) with registers stacked as follows:
sl@0
   489
	// R13_abt/R13_und -> R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13_usr R14_usr ExcCode PC CPSR
sl@0
   490
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
   491
	CLREX									// reset exclusive monitor 	
sl@0
   492
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
   493
	STREX(12,0,13);							// dummy STREX to reset exclusivity monitor
sl@0
   494
#endif
sl@0
   495
sl@0
   496
#if 0	// minimum-dependency exception handling
sl@0
   497
	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
sl@0
   498
	asm("mrs	r4, cpsr ");
sl@0
   499
	asm("orr	r1, r0, #0xc0 ");
sl@0
   500
	asm("msr	cpsr, r1 ");				// back to original mode
sl@0
   501
	asm("mov	r2, sp ");
sl@0
   502
	asm("mov	r3, lr ");
sl@0
   503
	asm("msr	cpsr, r4 ");				// back to mode_abt or mode_und
sl@0
   504
	asm("stmfd	sp!, {r2,r3} ");			// now have R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR
sl@0
   505
	asm("mrc	p15, 0, r1, c5, c0, 0 ");	// DFSR
sl@0
   506
	asm("mrc	p15, 0, r2, c5, c0, 1 ");	// IFSR
sl@0
   507
	asm("mrc	p15, 0, r0, c6, c0, 0 ");	// DFAR
sl@0
   508
	asm("stmfd	sp!, {r0-r2} ");			// now have DFAR DFSR IFSR R13 R14 R0-R12 R13_usr R14_usr ExcCode PC CPSR
sl@0
   509
	asm("mov	r0, sp ");
sl@0
   510
	asm(".extern hw_init_exc ");
sl@0
   511
	asm("bl		hw_init_exc ");
sl@0
   512
	asm("add	sp, sp, #20 ");
sl@0
   513
	asm("ldmia	sp, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
sl@0
   514
	asm("nop ");							// don't access banked register immediately after
sl@0
   515
	asm("add	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   516
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   517
#endif
sl@0
   518
sl@0
   519
	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iCPSR));
sl@0
   520
	asm("mrs	r12, cpsr ");
sl@0
   521
	asm("and	r3, r0, #0x1f ");			// r3=processor mode when abort occurred
sl@0
   522
	asm("bic	r12, r12, #0xc0 ");
sl@0
   523
	asm("cmp	r3, #0x10 ");				// aborted in user mode?
sl@0
   524
	asm("cmpne	r3, #0x13 ");				// if not, aborted in mode_svc?
sl@0
   525
	asm("bne	fatal_exception_mode ");	// if neither, fault
sl@0
   526
	asm("cmp	r11, #0 ");
sl@0
   527
	asm("beq	fatal_exception_mode ");	// if subscheduler not yet set up, fault
sl@0
   528
	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   529
	__ASM_STI();							// reenable interrupts - rescheduling disabled by mode_abt/mode_und
sl@0
   530
	asm("mov	r10, sp ");					// r10 points to saved registers
sl@0
   531
	asm("cmp	r5, #0 ");					// exception with kernel locked?
sl@0
   532
	asm("bne	fatal_exception_mode ");	// if so, fault
sl@0
   533
	asm("add	r5, r5, #1 ");				// lock the kernel
sl@0
   534
	asm("str	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
sl@0
   535
	CPSCHM(MODE_SVC);						// mode_svc, interrupts on, kernel locked
sl@0
   536
sl@0
   537
	asm("ldr	r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   538
	asm("add	r5, r5, #%a0" : : "i" _FOFF(NThread,iStackBase));
sl@0
   539
	asm("ldmia	r5, {r2,r5} ");				// r2=supervisor stack area base, r5=size
sl@0
   540
	asm("subs	r2, sp, r2 ");				// r2=amount of mode_svc stack remaining
sl@0
   541
	asm("blo	fatal_exception_stack ");	// if stack pointer invalid, fault
sl@0
   542
	asm("cmp	r2, r5 ");
sl@0
   543
	asm("bhi	fatal_exception_stack ");
sl@0
   544
	asm("cmp	r2, #128 ");				// check enough stack to handle exception
sl@0
   545
	asm("blo	fatal_exception_stack ");	// if not, fault
sl@0
   546
sl@0
   547
	// At this point we are in mode_svc with interrupts enabled and the kernel locked.
sl@0
   548
	// We know the supervisor stack is valid and has enough free space to store the exception info.
sl@0
   549
	// Registers: R0=aborted cpsr, R10 points to saved registers, R11->TSubScheduler
sl@0
   550
	// on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und).
sl@0
   551
sl@0
   552
	asm("add	r1, r10, #%a0" : : "i" _FOFF(SThreadExcStack,iR8));
sl@0
   553
	asm("ldmia	r1, {r0-r9} ");				// get saved R8,R9,R10,R11,R12,R13_usr,R14_usr,exccode,PC,CPSR
sl@0
   554
	__ASM_CLI();
sl@0
   555
	asm("mov	r12, sp ");					// save original R13_svc
sl@0
   556
	asm("bic	sp, sp, #4 ");				// align R13_svc to 8 byte boundary
sl@0
   557
	asm("stmfd	sp!, {r0-r9} ");			// save on supervisor stack
sl@0
   558
	asm("ldmia	r10, {r0-r6,r10} ");		// get saved R0-R7
sl@0
   559
	asm("stmfd	sp!, {r0-r6,r10} ");		// save on supervisor stack
sl@0
   560
											// leave R7=exccode, R8=aborted instruction address, R9=aborted CPSR
sl@0
   561
	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
sl@0
   562
	asm("moveq	r0, #0x1b ");				// mode_und
sl@0
   563
	asm("movne	r0, #0x17 ");				// mode_abt
sl@0
   564
	asm("msr	cpsr, r0 ");				// mode_abt or mode_und, interrupts on
sl@0
   565
	asm("add	sp, sp, #%a0 " : : "i" ((TInt)sizeof(SThreadExcStack)));	// restore exception stack balance
sl@0
   566
	CPSCHM(MODE_SVC);						// back into mode_svc, interrupts on
sl@0
   567
sl@0
   568
	asm("ldr	r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
sl@0
   569
	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
sl@0
   570
	asm("mrceq	p15, 0, r1, c5, c0, 1 ");	// r1=instruction fault status
sl@0
   571
	asm("mrcne	p15, 0, r1, c5, c0, 0 ");	// r1=data fault status
sl@0
   572
#ifdef __CPU_ARM_HAS_CP15_IFAR
sl@0
   573
	asm("mrceq	p15, 0, r0, c6, c0, 2 ");	// r0 = IFAR fault address
sl@0
   574
#else
sl@0
   575
	asm("moveq	r0, r8 ");					// else fault address for prefetch abort = instruction address
sl@0
   576
#endif // __CPU_ARM_HAS_CP15_IFAR
sl@0
   577
	asm("mrcne	p15, 0, r0, c6, c0, 0 ");	// r0= DFAR fault address
sl@0
   578
	asm("mrs	r2, spsr ");				// r2 = spsr_svc
sl@0
   579
	asm("mov	r3, #0 ");					// spare word
sl@0
   580
											// r12 = original R13_svc
sl@0
   581
	asm("ldr	r5, [r4, #%a0]" : : "i" _FOFF(NThread,iHandlers));	// r5 -> SNThreadHandlers
sl@0
   582
	asm("stmfd	sp!, {r0-r3,r12,r14} ");	// save FAR, FSR, SPSR_SVC, 0, R13_svc, R14_svc
sl@0
   583
sl@0
   584
	USER_MEMORY_GUARD_ON(,r6,r0);
sl@0
   585
sl@0
   586
	// Now we can unlock the kernel and process the exception
sl@0
   587
	asm("bl "	CSM_ZN5NKern6UnlockEv );
sl@0
   588
sl@0
   589
	// R4 points to the current thread
sl@0
   590
	// Get the handler address
sl@0
   591
	asm("ldr	r5, [r5, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler));	// r5 -> exception handler
sl@0
   592
sl@0
   593
	// Kernel is now unlocked so we can retrieve the opcode for an undefined instruction trap
sl@0
   594
	// We might take a page fault doing this but that's OK since the original instruction
sl@0
   595
	// fetch might have taken a page fault and we no longer have any more locks than were
sl@0
   596
	// held at that time.
sl@0
   597
	asm("cmp	r7, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
sl@0
   598
	asm("beq	exc_undef ");
sl@0
   599
sl@0
   600
	// call the exception dispatcher
sl@0
   601
	asm("exc_dispatch: ");
sl@0
   602
	asm("mov	r1, r4 ");					// pass address of current thread
sl@0
   603
	asm("mov	r0, sp ");					// pass address of TArmExcInfo
sl@0
   604
	asm("adr	lr, exc_return ");
sl@0
   605
	__JUMP(,	r5);						// call exception handler
sl@0
   606
sl@0
   607
	// Undefined instruction - get the opcode
sl@0
   608
	// R4->current thread, R8=address of aborted instruction, R9=CPSR at time of abort, SP->TArmExcInfo
sl@0
   609
	asm("exc_undef: ");
sl@0
   610
	asm("tst	r9, #0x20 ");				// THUMB?
sl@0
   611
	asm("bne	exc_undef_thumb ");			// branch if so
sl@0
   612
	asm("tst	r9, #0x00800000 ");			// J=1 ?
sl@0
   613
	asm("bne	exc_dispatch ");			// T=0, J=1 -> dispatch normally
sl@0
   614
	asm("tst	r9, #0x0f ");				// ARM - mode_usr ?
sl@0
   615
	asm("ldrne	r0, [r8] ");				// If not, get opcode
sl@0
   616
	USER_MEMORY_GUARD_OFF(eq,r0,r0);
sl@0
   617
	asm("ldreqt r0, [r8] ");				// else get opcode with user permissions
sl@0
   618
	USER_MEMORY_GUARD_ON(eq,r1,r1);
sl@0
   619
	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus));	// save opcode
sl@0
   620
sl@0
   621
	// ARM opcode in R0 - check for coprocessor or special UNDEF opcode
sl@0
   622
	// Special undef *7F***F*
sl@0
   623
	asm("orr	r1, r0, #0xF000000F ");		// *7F***F* -> F7F***FF
sl@0
   624
	asm("orr	r1, r1, #0x000FF000 ");		// *7F***F* -> F7FFF*FF
sl@0
   625
	asm("orr	r1, r1, #0x00000F00 ");		// *7F***F* -> F7FFFFFF
sl@0
   626
	asm("cmn	r1, #0x08000001 ");			// check
sl@0
   627
	asm("moveq	r1, #32 ");
sl@0
   628
	asm("beq	special_undef_arm ");		// branch if special undef
sl@0
   629
sl@0
   630
	// Coprocessor *X***N** X=C,D,E		N=coprocessor number
sl@0
   631
	// Advanced SIMD F2****** F3****** F4X***** (X even)
sl@0
   632
	asm("and	r1, r0, #0x0F000000 ");		// *C****** -> 0C000000
sl@0
   633
	asm("add	r1, r1, #0xF4000000 ");		// *C****** -> 00000000
sl@0
   634
	asm("cmp	r1, #0x03000000 ");
sl@0
   635
	asm("movlo	r1, r0, lsr #8 ");
sl@0
   636
	asm("andlo	r1, r1, #0x0f ");			// r1 = coprocessor number
sl@0
   637
	asm("blo	undef_coproc_arm ");
sl@0
   638
	asm("add	r1, r0, #0x0E000000 ");		// F2****** -> 00******
sl@0
   639
	asm("cmp	r1, #0x02000000 ");
sl@0
   640
	asm("blo	undef_coproc_arm ");
sl@0
   641
	asm("cmp	r1, #0x03000000 ");
sl@0
   642
	asm("bhs	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
sl@0
   643
	asm("tst	r0, #0x00100000 ");
sl@0
   644
	asm("bne	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
sl@0
   645
	asm("mov	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
sl@0
   646
	asm("b		undef_coproc_arm ");
sl@0
   647
sl@0
   648
	asm("exc_undef_thumb: ");
sl@0
   649
	asm("tst	r9, #0x0f ");				// THUMB - mode_usr ?
sl@0
   650
	USER_MEMORY_GUARD_OFF(eq,r0,r0);
sl@0
   651
	asm("ldreqbt r0, [r8], #1 ");			// yes - get low 8 bits
sl@0
   652
	asm("ldreqbt r1, [r8], #1 ");			// get high 8 bits
sl@0
   653
	USER_MEMORY_GUARD_ON(eq,r2,r2);
sl@0
   654
	asm("ldrneh	r0, [r8], #2 ");			// no - get first 16 bits of opcode
sl@0
   655
	asm("orreq	r0, r0, r1, lsl #8 ");		// user mode - r0 = first 16 bits of opcode
sl@0
   656
#ifdef __CPU_THUMB2
sl@0
   657
	// must check for a 32 bit instruction and get second half if necessary
sl@0
   658
	asm("cmp	r0, #0xe800 ");
sl@0
   659
	asm("blo	exc_undef_thumb_16 ");		// skip if 16 bit
sl@0
   660
	asm("tst	r9, #0x0f ");				// mode_usr ?
sl@0
   661
	USER_MEMORY_GUARD_OFF(eq,r1,r1);
sl@0
   662
	asm("ldreqbt r1, [r8], #1 ");			// yes - get low 8 bits
sl@0
   663
	asm("ldreqbt r2, [r8], #1 ");			// get high 8 bits
sl@0
   664
	USER_MEMORY_GUARD_ON(eq,r3,r3);
sl@0
   665
	asm("ldrneh	r1, [r8], #2 ");			// no - get second 16 bits of opcode
sl@0
   666
	asm("orreq	r1, r1, r2, lsl #8 ");		// user mode - r1 = second 16 bits of opcode
sl@0
   667
	asm("orr	r0, r1, r0, lsl #16 ");		// first half of opcode into top of R0
sl@0
   668
	asm("exc_undef_thumb_16: ");
sl@0
   669
#endif
sl@0
   670
	asm("str	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iFaultStatus));	// save opcode
sl@0
   671
sl@0
   672
	// THUMB opcode in R0 - check for coprocessor operation or special UNDEF opcode
sl@0
   673
	// Special undef DE**, F7F*A***
sl@0
   674
	asm("sub	r1, r0, #0xde00 ");
sl@0
   675
	asm("cmp	r1, #0x100 ");
sl@0
   676
	asm("movlo	r1, #33 ");
sl@0
   677
	asm("blo	special_undef_thumb ");		// branch if THUMB1 special undef
sl@0
   678
	asm("orr	r1, r0, #0x000000FF ");		// F7F*A*** -> F7F*A*FF
sl@0
   679
	asm("orr	r1, r1, #0x00000F00 ");		// F7F*A*** -> F7F*AFFF
sl@0
   680
	asm("orr	r1, r1, #0x000F0000 ");		// F7F*A*** -> F7FFAFFF
sl@0
   681
	asm("add	r1, r1, #0x00005000 ");		// F7F*A*** -> F7FFFFFF
sl@0
   682
	asm("cmn	r1, #0x08000001 ");			// check
sl@0
   683
	asm("moveq	r1, #34 ");
sl@0
   684
	asm("beq	special_undef_thumb2 ");	// branch if THUMB2 special undef
sl@0
   685
sl@0
   686
	// Check for THUMB2 Coprocessor instruction
sl@0
   687
	// 111x 11yy xxxx xxxx | xxxx nnnn xxxx xxxx	nnnn=coprocessor number, yy=00,01,10
sl@0
   688
	// 111x 1111 xxxx xxxx | xxxx xxxx xxxx xxxx	Advanced SIMD
sl@0
   689
	// 1111 1001 xxx0 xxxx | xxxx xxxx xxxx xxxx	Advanced SIMD
sl@0
   690
	asm("orr	r1, r0, #0x10000000 ");
sl@0
   691
	asm("cmn	r1, #0x01000000 ");
sl@0
   692
	asm("movcs	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
sl@0
   693
	asm("bcs	undef_coproc_thumb ");
sl@0
   694
	asm("cmp	r1, #0xFC000000 ");
sl@0
   695
	asm("movcs	r1, r0, lsr #8 ");
sl@0
   696
	asm("andcs	r1, r1, #0x0f ");			// r1 = coprocessor number
sl@0
   697
	asm("bcs	undef_coproc_thumb ");
sl@0
   698
	asm("and	r1, r0, #0xFF000000 ");
sl@0
   699
	asm("cmp	r1, #0xF9000000 ");
sl@0
   700
	asm("tsteq	r0, #0x00100000 ");
sl@0
   701
	asm("bne	exc_dispatch ");			// if not coproc/AdvSIMD, dispatch normally
sl@0
   702
	asm("mov	r1, #16 ");					// CP=16 for non-coprocessor AdvSIMD
sl@0
   703
sl@0
   704
	asm("special_undef_arm: ");
sl@0
   705
	asm("special_undef_thumb: ");
sl@0
   706
	asm("special_undef_thumb2: ");
sl@0
   707
	asm("undef_coproc_thumb: ");
sl@0
   708
	asm("undef_coproc_arm: ");
sl@0
   709
	asm("mov	r0, sp ");
sl@0
   710
	asm("bl "	CSM_CFUNC(HandleSpecialOpcode));
sl@0
   711
	asm("cmp	r0, #0 ");
sl@0
   712
	asm("beq	exc_dispatch ");			// if not handled, dispatch normally, else return
sl@0
   713
sl@0
   714
	// return from exception
sl@0
   715
	// R4 points to current thread, R11->TSubScheduler, SP->TArmExcInfo
sl@0
   716
	asm("exc_return: ");
sl@0
   717
	__ASM_CLI();
sl@0
   718
	asm("ldr	r0, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iCpsr));
sl@0
   719
	asm("ldr	r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
sl@0
   720
	asm("mov	r9, r4 ");
sl@0
   721
	asm("tst	r0, #0x0f ");				// returning to user mode?
sl@0
   722
	asm("bne	exc_return2 ");				// if not, we don't check locks or run callbacks
sl@0
   723
sl@0
   724
#ifdef __CHECK_LOCK_STATE__
sl@0
   725
	asm("bl " CSM_CFUNC(check_lock_state));
sl@0
   726
#endif
sl@0
   727
	asm("cmp	r1, #3 ");					// callbacks?
sl@0
   728
	asm("blo	exc_return2 ");
sl@0
   729
	asm("stmfd	sp!, {r6} ");																		\
sl@0
   730
	asm("bl		run_user_mode_callbacks ");	// run them; NB trashes most registers (R0-R12, R14)
sl@0
   731
	asm("ldmfd	sp!, {r6} ");																		\
sl@0
   732
sl@0
   733
	asm("exc_return2: ");
sl@0
   734
	RECORD_STATE_EXC();
sl@0
   735
	USER_MEMORY_GUARD_RESTORE(r6,r0);
sl@0
   736
sl@0
   737
	asm("add	r7, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iSpsrSvc));	// r7->saved spsr_svc
sl@0
   738
	asm("ldmia	r7!, {r0-r2,r14} ");		// r0=original spsr_svc, r2=original sp_svc, restore lr_svc
sl@0
   739
	asm("add	r6, sp, #%a0" : : "i" _FOFF(TArmExcInfo,iR15));		// r6->saved PC, CPSR
sl@0
   740
	asm("msr	spsr, r0 ");				// restore spsr_svc
sl@0
   741
	asm("ldmia	r6, {r0,r1} ");
sl@0
   742
	asm("stmdb	r2!, {r0,r1} ");			// move saved PC, CPSR so sp_svc ends up at original place
sl@0
   743
	asm("str	r2, [r6, #-4] ");			// overwrite iExcCode with original sp_svc - 8
sl@0
   744
	asm("ldmia	r7, {r0-r14}^ ");			// restore R0-R12, R13_usr, R14_usr
sl@0
   745
	asm("nop	");							// don't touch banked register immediately afterwards
sl@0
   746
	asm("ldr	sp, [sp, #%a0]" : : "i" _FOFF(TArmExcInfo,iExcCode));	// R13_svc = original R13_svc - 8
sl@0
   747
	USER_MEMORY_GUARD_CHECK();				// check UMG is off if returning to user mode
sl@0
   748
	RFEIAW(13);								// restore PC and CPSR - return from Exec function
sl@0
   749
sl@0
   750
	// get here if exception occurred in mode other than usr or svc
sl@0
   751
	// we are in mode_abt or mode_und with IRQs disabled
sl@0
   752
	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
sl@0
   753
	// R12=processor mode of exception (abt/und)
sl@0
   754
	asm("fatal_exception_mode: ");
sl@0
   755
	asm("ldr	r2, __TheScheduler ");
sl@0
   756
	asm("ldr	lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler));
sl@0
   757
	asm("cmp	lr, #0 ");
sl@0
   758
	__JUMP(ne,	lr);						// if crash debugger running, let it handle exception
sl@0
   759
sl@0
   760
	// get here if mode_svc stack has overflowed
sl@0
   761
	// we are in mode_svc with interrupts enabled and the kernel locked
sl@0
   762
	// R0=original CPSR R10->saved registers on exception stack R11->TSubScheduler
sl@0
   763
	// R12=processor mode of exception (abt/und)
sl@0
   764
	asm("fatal_exception_stack: ");
sl@0
   765
	asm("orr	r3, r12, #0xC0 ");
sl@0
   766
	asm("msr	cpsr, r3 ");				// back to exception mode, all interrupts off
sl@0
   767
	asm("mov	r2, r0 ");
sl@0
   768
	asm("cmp	r11, #0 ");
sl@0
   769
	asm("ldreq	r11, __SS0 ");
sl@0
   770
	asm("ldr	r0, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));	// pass in address of stored registers
sl@0
   771
	asm("cmp	r0, #0 ");
sl@0
   772
	asm("ldreq	r0, __DefaultRegs ");
sl@0
   773
	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
sl@0
   774
	asm("ldmia	sp!, {r4-r9} ");			// get original R0-R5
sl@0
   775
	asm("stmia	r0!, {r4-r9} ");			// save original R0-R5
sl@0
   776
	asm("ldmia	sp!, {r4-r9} ");			// get original R6-R11
sl@0
   777
	asm("stmia	r0!, {r4-r9} ");			// save original R6-R11
sl@0
   778
	asm("ldmia	sp!, {r4-r9} ");			// get original R12 R13_usr R14_usr iExcCode PC CPSR
sl@0
   779
	asm("stmia	r0!, {r4-r6} ");			// save original R12 R13_usr R14_usr
sl@0
   780
	asm("sub	r0, r0, #60 ");				// R0 back to where it was (6+6+3 = 15 words saved)
sl@0
   781
	asm("str	r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode));
sl@0
   782
	asm("str	r8, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15));
sl@0
   783
	asm("str	r9, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags));
sl@0
   784
	asm("mov	r1, #13 ");					// r1 = regnum
sl@0
   785
	asm("mrs	r2, cpsr ");				// r2 = mode
sl@0
   786
	asm("mov	r4, r0 ");
sl@0
   787
	asm("bl "	CSM_ZN3Arm3RegER14SFullArmRegSetim );	// r0 = pointer to exception mode R13
sl@0
   788
	asm("str	sp, [r0] ");				// save correct original value for exception mode R13
sl@0
   789
sl@0
   790
	// call the exception fault dispatcher
sl@0
   791
	asm("mov	r0, #0 ");
sl@0
   792
	asm("b		ExcFault ");
sl@0
   793
sl@0
   794
	asm("__SS0: ");
sl@0
   795
	asm(".word	%a0" : : "i" ((TInt)&TheSubSchedulers[0]));
sl@0
   796
	asm("__DefaultRegs: ");
sl@0
   797
	asm(".word	%a0" : : "i" ((TInt)&DefaultRegSet));
sl@0
   798
	}
sl@0
   799
sl@0
   800
extern "C" __NAKED__ void __ArmVectorAbortPrefetch()
sl@0
   801
	{
sl@0
   802
	__ASM_CLI();							// disable all interrupts
sl@0
   803
	asm("sub	lr, lr, #4");				// lr now points to instruction whose prefetch was aborted
sl@0
   804
	SRSDBW(		MODE_ABT);					// save it along with aborted CPSR
sl@0
   805
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   806
	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
sl@0
   807
	GET_RWNO_TID(,r11);
sl@0
   808
	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
sl@0
   809
	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   810
	asm("b		handle_exception ");
sl@0
   811
	}
sl@0
   812
sl@0
   813
extern "C" __NAKED__ void __ArmVectorUndef()
sl@0
   814
	{
sl@0
   815
	__ASM_CLI();							// disable all interrupts
sl@0
   816
	asm("sub	lr, lr, #4");				// lr now points to undefined instruction
sl@0
   817
	SRSDBW(		MODE_UND);					// save it along with aborted CPSR
sl@0
   818
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   819
	asm("stmia	sp, {r0-r14}^ ");			// save R0-R12, R13_usr, R14_usr
sl@0
   820
	GET_RWNO_TID(,r11);
sl@0
   821
	asm("mov	r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
sl@0
   822
	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   823
	asm("mrs	r0, spsr ");				// r0=CPSR at time of exception
sl@0
   824
	asm("tst	r0, #0x20 ");				// exception in THUMB mode?
sl@0
   825
	asm("addne	lr, lr, #2 ");				// if so, correct saved return address
sl@0
   826
	asm("strne	lr, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   827
	asm("b		handle_exception ");
sl@0
   828
	}
sl@0
   829
sl@0
   830
/******************************************************************************
sl@0
   831
 * Kick other CPUs as necessary to process TGenericIPI
sl@0
   832
 ******************************************************************************/
sl@0
   833
extern "C" __NAKED__ void send_generic_ipis(TUint32 /*aMask*/)
sl@0
   834
	{
sl@0
   835
	asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
sl@0
   836
	GET_RWNO_TID(ne,r3);
sl@0
   837
	asm("ldrne	r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));	// we assume i_GicDistAddr is the same for all CPUs
sl@0
   838
	__DATA_SYNC_BARRIER_Z__(r1);			// need DSB before sending any IPI
sl@0
   839
	asm("orrne	r0, r0, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
sl@0
   840
	asm("strne	r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
sl@0
   841
	__JUMP(,lr);
sl@0
   842
	}
sl@0
   843
sl@0
   844
/******************************************************************************
sl@0
   845
 * Handle a crash IPI
sl@0
   846
 * Enter in mode_sys or mode_fiq
sl@0
   847
 *	If in mode_sys, R7 = nest count, in which case:
sl@0
   848
 *		If R7>0 nested IRQ so mode_sys stack contains R0...R12 R14sys PC CPSR
sl@0
   849
 *		If R7=0 first IRQ, R5 points to top of mode_svc stack, which contains
sl@0
   850
 *			R0...R12 R13usr R14usr iExcCode PC CPSR
sl@0
   851
 *	If in mode_fiq, FIQ stack contains R0...R7 R8usr...R14usr iExcCode PC CPSR
sl@0
   852
 ******************************************************************************/
sl@0
   853
extern "C" __NAKED__ void handle_crash_ipi()
sl@0
   854
	{
sl@0
   855
	GET_RWNO_TID(,r0);
sl@0
   856
	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
sl@0
   857
	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iExcCode));
sl@0
   858
	asm("cmp	r0, #0 ");
sl@0
   859
	asm("bge	state_already_saved ");		// skip if this CPU has already saved its state (i.e. already crashed)
sl@0
   860
	GET_RWNO_TID(,r0);
sl@0
   861
	asm("ldr	r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_Regs));
sl@0
   862
	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );	// save machine state (NOTE: R0 trashed)
sl@0
   863
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iFlags));	// mode on entry
sl@0
   864
	asm("and	r1, r1, #0x1f ");
sl@0
   865
	asm("cmp	r1, #0x11 ");				// mode_fiq?
sl@0
   866
	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// yes - take registers from FIQ stack
sl@0
   867
	asm("beq	1f ");
sl@0
   868
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR7));	// nest count
sl@0
   869
	asm("cmp	r1, #0 ");					// nested?
sl@0
   870
	asm("ldreq	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR5));	// no - take registers from SVC stack (R5 points to it)
sl@0
   871
	asm("beq	2f ");
sl@0
   872
	asm("ldr	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13));	// nested - take R0...R12 R14usr PC CPSR from mode_sys stack
sl@0
   873
	asm("ldmia	r1!, {r2-r11} ");
sl@0
   874
	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
sl@0
   875
	asm("ldmia	r1!, {r2-r7} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R14usr R6=orig PC R7=orig CPSR
sl@0
   876
	asm("stmia	r0!, {r2-r4} ");			// save original R10-R12
sl@0
   877
	asm("stmia	r0!, {r1,r5,r6,r7} ");		// save original R13usr, R14usr, PC, CPSR
sl@0
   878
	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
sl@0
   879
	asm("mov	r4, r0 ");
sl@0
   880
	asm("b		0f ");
sl@0
   881
sl@0
   882
	asm("1:		");							// R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR
sl@0
   883
	asm("ldmia	r1!, {r2-r11} ");
sl@0
   884
	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
sl@0
   885
	asm("ldmia	r1!, {r2-r9} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR
sl@0
   886
	asm("stmia	r0!, {r2-r6,r8,r9} ");		// save original R10-R12 R13usr R14usr PC CPSR
sl@0
   887
	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
sl@0
   888
	asm("mov	r4, r0 ");
sl@0
   889
	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Fiq));	// save original R13Fiq
sl@0
   890
	asm("b		0f ");
sl@0
   891
sl@0
   892
	asm("2:		");							// R1 points to R0...R12 R13usr R14usr iExcCode PC CPSR
sl@0
   893
	asm("ldmia	r1!, {r2-r11} ");
sl@0
   894
	asm("stmia	r0!, {r2-r11} ");			// save original R0-R9
sl@0
   895
	asm("ldmia	r1!, {r2-r9} ");			// R2=original R10, R3=orig R11, R4=orig R12 R5=orig R13usr R6=orig R14usr R8=orig PC R9=orig CPSR
sl@0
   896
	asm("stmia	r0!, {r2-r6,r8,r9} ");		// save original R10-R12 R13usr R14usr PC CPSR
sl@0
   897
	asm("sub	r0, r0, #68 ");				// R0 back to i_Regs
sl@0
   898
	asm("mov	r4, r0 ");
sl@0
   899
	asm("str	r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet, iN.iR13Svc));	// restore original R13Svc
sl@0
   900
sl@0
   901
	asm("0:		");
sl@0
   902
	asm("state_already_saved: ");
sl@0
   903
	__DATA_SYNC_BARRIER_Z__(r6);
sl@0
   904
sl@0
   905
	USER_MEMORY_GUARD_OFF(,r0,r0);
sl@0
   906
	asm("mov	r0, #0 ");
sl@0
   907
	asm("mov	r1, #0 ");
sl@0
   908
	asm("mov	r2, #0 ");
sl@0
   909
	asm("bl		NKCrashHandler ");		// call NKCrashHandler(0,0,0)
sl@0
   910
sl@0
   911
	__DATA_SYNC_BARRIER__(r6);
sl@0
   912
	GET_RWNO_TID(,r0);
sl@0
   913
	asm("ldr	r7, __CrashStateOut ");
sl@0
   914
	asm("ldr	r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   915
	asm("7: ");
sl@0
   916
	LDREX(1,7);
sl@0
   917
	asm("bic	r1, r1, r2 ");
sl@0
   918
	STREX(3,1,7);						// atomic { CrashStateOut &= ~iCpuMask; }
sl@0
   919
	asm("cmp	r3, #0 ");
sl@0
   920
	asm("bne	7b ");
sl@0
   921
	asm("1: ");
sl@0
   922
	ARM_WFE;
sl@0
   923
	asm("b		1b ");					// all done, just wait to be reset
sl@0
   924
sl@0
   925
	asm("__CrashStateOut: ");
sl@0
   926
	asm(".word CrashStateOut ");
sl@0
   927
	}
sl@0
   928
sl@0
   929
sl@0
   930
/******************************************************************************
sl@0
   931
 * Run TUserModeCallbacks when a thread is about to return to user mode
sl@0
   932
 *
sl@0
   933
 * On entry:
sl@0
   934
 *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
sl@0
   935
 *		R9 points to current NThread
sl@0
   936
 *		We know there is at least one callback on the list
sl@0
   937
 *		Stack not necessarily 8 byte aligned
sl@0
   938
 *		User memory guards on (if in use)
sl@0
   939
 * On return:
sl@0
   940
 *		CPU in mode_svc, interrupts disabled, kernel unlocked, thread not in CS
sl@0
   941
 *		No TUserModeCallbacks outstanding at the point where interrupts were disabled.
sl@0
   942
 *		R0-R12,R14 modified
sl@0
   943
 ******************************************************************************/
sl@0
   944
extern "C" __NAKED__ void DoRunUserModeCallbacks()
sl@0
   945
	{
sl@0
   946
	asm(".global run_user_mode_callbacks ");
sl@0
   947
	asm("run_user_mode_callbacks: ");
sl@0
   948
sl@0
   949
	USER_MEMORY_GUARD_ASSERT_ON(r12);
sl@0
   950
sl@0
   951
#ifdef __CHECK_LOCK_STATE__
sl@0
   952
	asm("ldr	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
sl@0
   953
	asm("cmp	r0, #0 ");
sl@0
   954
	asm("beq	0f ");
sl@0
   955
	__ASM_CRASH();
sl@0
   956
#endif
sl@0
   957
sl@0
   958
	asm("0:		");
sl@0
   959
	__ASM_STI();					// enable interrupts
sl@0
   960
	asm("mov	r10, sp ");			// save stack pointer
sl@0
   961
	asm("mov	r11, lr ");			// save return address
sl@0
   962
	asm("add	r8, r9, #%a0" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
sl@0
   963
	asm("mov	r0, #1 ");			// shouldn't have been in CS to begin with
sl@0
   964
	asm("bic	sp, sp, #4 ");		// align stack to 8 byte boundary
sl@0
   965
	asm("str	r0,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));	// EnterCS()
sl@0
   966
sl@0
   967
	asm("1:		");
sl@0
   968
	LDREX(		7,8);				// r7 = iUserModeCallbacks
sl@0
   969
	asm("mov	r6, #0 ");
sl@0
   970
	STREX(		12,6,8);			// iUserModeCallbacks = 0 if not changed
sl@0
   971
	asm("cmp	r12, #0 ");
sl@0
   972
	asm("bne	1b ");
sl@0
   973
	__DATA_MEMORY_BARRIER__(r6);
sl@0
   974
sl@0
   975
	asm("2:		");
sl@0
   976
	asm("movs	r0, r7 ");			// r0 = pointer to callback
sl@0
   977
	asm("beq	3f ");				// branch out if reached end of list
sl@0
   978
	asm("ldmia	r7, {r7, r12} ");	// r7 = callback->iNext, r12 = callback->iFunc
sl@0
   979
	asm("mov	r1, #%a0" : : "i" ((TInt)KUserModeCallbackUnqueued));
sl@0
   980
	asm("str	r1, [r0, #0] ");	// callback->iNext = 1
sl@0
   981
	__DATA_MEMORY_BARRIER__(r6);
sl@0
   982
	asm("adr	lr, 2b ");			// return to beginning of loop
sl@0
   983
	asm("mov	r1, #%a0" : : "i" ((TInt)EUserModeCallbackRun));
sl@0
   984
	__JUMP(,	r12);				// (*callback->iFunc)(callback, EUserModeCallbackRun);
sl@0
   985
sl@0
   986
	asm("3:		");
sl@0
   987
	__ASM_CLI();					// turn off interrupts
sl@0
   988
	__DATA_MEMORY_BARRIER__(r6);
sl@0
   989
	asm("ldr	r0, [r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
sl@0
   990
	asm("ldr	r1, [r8] ");
sl@0
   991
	asm("cmp	r0, #0 ");			// anything to do in LeaveCS() ?
sl@0
   992
	asm("bne	5f ");				// if yes, jump to slow path
sl@0
   993
	asm("cmp	r1, #0 ");			// no - any more callbacks?
sl@0
   994
	asm("bne	4f ");
sl@0
   995
sl@0
   996
	// no more callbacks, no CsFunction so just LeaveCS() and return
sl@0
   997
	asm("str	r6,	[r9, #%a0]" : : "i" _FOFF(NThreadBase,iCsCount));
sl@0
   998
	asm("mov	sp, r10 ");			// restore stack pointer
sl@0
   999
	__JUMP(,	r11);
sl@0
  1000
sl@0
  1001
	// more callbacks have been queued so loop round and do them
sl@0
  1002
	asm("4:		");
sl@0
  1003
	__ASM_STI();					// enable interrupts
sl@0
  1004
	asm("b		1b ");
sl@0
  1005
sl@0
  1006
	// CsFunction outstanding so do it
sl@0
  1007
	asm("5:		");
sl@0
  1008
	__ASM_STI();					// enable interrupts
sl@0
  1009
	asm("bl		ThreadLeaveCS__5NKern ");
sl@0
  1010
	__ASM_CLI();					// turn off interrupts
sl@0
  1011
	__DATA_MEMORY_BARRIER__(r6);
sl@0
  1012
	asm("ldr	r1, [r8] ");
sl@0
  1013
	asm("mov	sp, r10 ");
sl@0
  1014
	asm("mov	lr, r11 ");
sl@0
  1015
	asm("cmp	r1, #0 ");			// any more callbacks queued?
sl@0
  1016
	asm("bne	0b ");				// yes - go right back to the beginning and do them
sl@0
  1017
	__JUMP(,	r11);				// else return
sl@0
  1018
	}
sl@0
  1019
sl@0
  1020
sl@0
  1021