os/kernelhwsrv/kernel/eka/nkern/arm/vectors.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkern\arm\vectors.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    19
#include <e32cia.h>
sl@0
    20
#include <arm.h>
sl@0
    21
sl@0
    22
void FastMutexNestAttempt();
sl@0
    23
void FastMutexSignalError();
sl@0
    24
extern "C" void ExcFault(TAny*);
sl@0
    25
sl@0
    26
#ifdef __CPU_HAS_MMU
sl@0
    27
#define __USE_CP15_FAULT_INFO__
sl@0
    28
#endif
sl@0
    29
sl@0
    30
#ifdef _DEBUG
sl@0
    31
#define __CHECK_LOCK_STATE__
sl@0
    32
#endif
sl@0
    33
sl@0
    34
//#define __FAULT_ON_FIQ__
sl@0
    35
sl@0
    36
#ifdef __CHECK_LOCK_STATE__
sl@0
    37
// Check that the kernel is unlocked, no fast mutexes are held and that the thread is not in a
sl@0
    38
// critical section.  Called when returning to user mode
sl@0
    39
__NAKED__ void CheckLockState()
sl@0
    40
	{
sl@0
    41
	asm("stmfd sp!, {r14}");
sl@0
    42
	asm("ldr r12, __TheScheduler ");
sl@0
    43
	asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
    44
	asm("cmp r14, #0 ");
sl@0
    45
	asm("movne r12, #0xdd000000 ");
sl@0
    46
	asm("strne r12, [r12, #1] ");
sl@0
    47
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
sl@0
    48
	asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
sl@0
    49
	asm("cmp r14, #0 ");
sl@0
    50
	__CPOPRET(eq, "");
sl@0
    51
	asm("badLockState: ");
sl@0
    52
	asm("mov r12, #0xd7 ");
sl@0
    53
	asm("msr cpsr, r12 ");
sl@0
    54
	asm("mov r12, #0xdd000000 ");
sl@0
    55
	asm("str r12, [r12, #3] ");
sl@0
    56
	}
sl@0
    57
#endif
sl@0
    58
sl@0
    59
__ASSERT_COMPILE(EUserModeCallbackRun == 0);
sl@0
    60
sl@0
    61
__NAKED__ void CallUserModeCallbacks()
sl@0
    62
	{
sl@0
    63
	// called with interrupts disabled
sl@0
    64
	// preserves r0 and r1 in additional to usual registers
sl@0
    65
	// leaves current thread in r2
sl@0
    66
	// the vast majority of times this is called with zero or one callback pending
sl@0
    67
sl@0
    68
	asm(".global callUserModeCallbacks ");
sl@0
    69
	asm("callUserModeCallbacks: ");
sl@0
    70
	
sl@0
    71
	asm("ldr ip, __TheScheduler ");
sl@0
    72
	asm("ldr r2, [ip, #%a0]" : : "i" _FOFF(TScheduler, iCurrentThread));
sl@0
    73
sl@0
    74
	asm("callUserModeCallbacks2: ");
sl@0
    75
sl@0
    76
	USER_MEMORY_GUARD_ASSERT_ON(ip);
sl@0
    77
sl@0
    78
#ifdef __CHECK_LOCK_STATE__
sl@0
    79
	asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread,iCsCount));
sl@0
    80
	asm("cmp ip, #0 ");
sl@0
    81
	asm("bne badLockState ");
sl@0
    82
#endif
sl@0
    83
		
sl@0
    84
	asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserModeCallbacks));
sl@0
    85
	asm("teq ip, #0");
sl@0
    86
	asm("bne 1f");
sl@0
    87
	__JUMP(,lr);
sl@0
    88
sl@0
    89
	asm("1: ");
sl@0
    90
	asm("stmfd sp!, {r0-r2, r4-r11, lr}");
sl@0
    91
	asm("movs r4, r3");
sl@0
    92
	// if r3 != 0 it is the user context type to set the thread to
sl@0
    93
	asm("strneb r3, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType));
sl@0
    94
	
sl@0
    95
	// Remove first callback and enter critical section - we can just set iCsCount to 1 as we are
sl@0
    96
	// guaranteed not be be in a critical section already
sl@0
    97
	asm("ldmia ip, {r1, r3} ");		// HARDCODED: TUserModeCallback layout
sl@0
    98
    asm("mov r0, #1");
sl@0
    99
	asm("str r0, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount));
sl@0
   100
	asm("str r1, [r2, #%a0]" : : "i" _FOFF(NThread,iUserModeCallbacks));
sl@0
   101
	
sl@0
   102
	// Re-enable interrupts and call callback
sl@0
   103
	SET_MODE(r0, MODE_SVC, INTS_ALL_ON);
sl@0
   104
	asm("mov r1, #%a0 " : : "i" ((TInt)KUserModeCallbackUnqueued));
sl@0
   105
	asm("str r1, [ip, #%a0]" : : "i" _FOFF(TUserModeCallback, iNext));
sl@0
   106
	asm("mov r0, ip");
sl@0
   107
	asm("mov r1, #0 ");				// 0 == EUserModeCallbackRun
sl@0
   108
	__JUMPL(3);
sl@0
   109
sl@0
   110
	SET_MODE(r0, MODE_SVC, INTS_ALL_OFF);
sl@0
   111
sl@0
   112
	asm("movs r3, r4");
sl@0
   113
	// Leave critical section, avoid calling NKern::ThreadLeaveCS unless we have to
sl@0
   114
	asm("ldmfd sp!, {r0-r2, r4-r11, lr}");
sl@0
   115
	// reset user context type to undefined if r3 != 0
sl@0
   116
	asm("mov ip, #%a0" : : "i" (NThread::EContextUndefined));
sl@0
   117
	asm("strneb ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType));
sl@0
   118
	asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsFunction));
sl@0
   119
	asm("teq ip, #0");
sl@0
   120
	asm("streq ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount));
sl@0
   121
	asm("beq callUserModeCallbacks2 ");
sl@0
   122
sl@0
   123
	asm("leaveCS:");
sl@0
   124
	asm("sub sp, sp, #48 ");
sl@0
   125
	SET_MODE(r0, MODE_SVC, INTS_ALL_ON);
sl@0
   126
	asm("bl " CSM_ZN5NKern13ThreadLeaveCSEv);
sl@0
   127
	SET_MODE(r0, MODE_SVC, INTS_ALL_OFF);
sl@0
   128
	asm("ldmfd sp!, {r0-r2, r4-r11, lr}");
sl@0
   129
	asm("b callUserModeCallbacks2 ");
sl@0
   130
	}
sl@0
   131
sl@0
   132
/***************************************************************************
sl@0
   133
 * SWI Handler
sl@0
   134
 ***************************************************************************/
sl@0
   135
sl@0
   136
extern "C" __NAKED__ void __ArmVectorSwi()
sl@0
   137
	{
sl@0
   138
	// IRQs disabled, FIQs enabled here
sl@0
   139
	asm("ldr r12, [lr, #-4] ");				// get SWI opcode
sl@0
   140
	asm("stmfd sp!, {r11, lr} ");			// save return address, r11 for 8 byte align
sl@0
   141
	USER_MEMORY_GUARD_ON_IF_MODE_USR(r11);
sl@0
   142
	asm("ldr r11, __TheScheduler ");	
sl@0
   143
	asm("adr lr, fast_swi_exit ");
sl@0
   144
	asm("movs r12, r12, lsl #9 ");			// 512*SWI number into r12
sl@0
   145
	asm("bcc slow_swi ");					// bit 23=0 for slow/unprot
sl@0
   146
	asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
sl@0
   147
	asm("beq wait_for_any_request ");		// special case for Exec::WaitForAnyRequest
sl@0
   148
#ifdef __CPU_ARM_HAS_CPS
sl@0
   149
	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
sl@0
   150
	CPSIDIF;								// all interrupts off
sl@0
   151
	asm("ldr r3, [r2], r12, lsr #7 ");		// r3=limit, r2->dispatch table entry
sl@0
   152
	asm("ldr r2, [r2] ");					// r2->kernel function
sl@0
   153
	asm("cmp r3, r12, lsr #9 ");			// r3-SWI number
sl@0
   154
	__JUMP(hi, r2);							// if SWI number valid, call kernel function
sl@0
   155
#else
sl@0
   156
	SET_INTS(r2, MODE_SVC, INTS_ALL_OFF);
sl@0
   157
	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
sl@0
   158
	asm("ldr r3, [r2], r12, lsr #7 ");		// r3=limit, r2->dispatch table entry
sl@0
   159
	asm("cmp r3, r12, lsr #9 ");			// r3-SWI number
sl@0
   160
	asm("ldrhi pc, [r2] ");					// if SWI number valid, call kernel function
sl@0
   161
#endif
sl@0
   162
	asm("mvn r12, #0 ");					// put invalid SWI number into r12
sl@0
   163
	asm("b slow_swi ");						// go through slow SWI routine to call invalid SWI handler
sl@0
   164
sl@0
   165
	asm("fast_swi_exit: ");
sl@0
   166
#ifdef __CHECK_LOCK_STATE__
sl@0
   167
	asm("mrs r12, spsr ");
sl@0
   168
	asm("tst r12, #0x0f ");
sl@0
   169
	asm("bleq  " CSM_Z14CheckLockStatev);
sl@0
   170
#endif
sl@0
   171
	USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11);
sl@0
   172
	ERRATUM_353494_MODE_CHANGE(,r11);
sl@0
   173
	asm("ldmfd sp!, {r11, pc}^ ");			// return and restore cpsr
sl@0
   174
sl@0
   175
	
sl@0
   176
	asm("slow_swi: ");						// IRQs off, FIQs on here
sl@0
   177
	asm("stmfd sp!, {r3-r10} ");			// save nonvolatile registers, r3 for 8 byte align
sl@0
   178
	asm("ldr r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));	// r9->current thread
sl@0
   179
	SET_INTS(lr, MODE_SVC, INTS_ALL_ON);	// all interrupts on
sl@0
   180
	asm("mov r10, r11 ");					// r10->scheduler
sl@0
   181
	asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
sl@0
   182
	asm("mrs r11, spsr ");					// spsr_svc into r11
sl@0
   183
	asm("adr lr, slow_swi_exit ");
sl@0
   184
	asm("add r6, r4, r12, lsr #6 ");		// r6->dispatch table entry
sl@0
   185
	asm("ldr r5, [r4, #-12] ");				// r5=limit
sl@0
   186
	SET_INTS_1(r7, MODE_SVC, INTS_ALL_OFF);
sl@0
   187
	asm("cmp r5, r12, lsr #9 ");			// r5-SWI number
sl@0
   188
	asm("ldmhiia r6, {r5,r6} ");			// if SWI number OK, flags into r5, function addr into r6
sl@0
   189
	asm("ldrls pc, [r4, #-8] ");			// if SWI number invalid, call invalid handler
sl@0
   190
	asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask));	// extra arguments needed?
sl@0
   191
	asm("addne r2, sp, #4 ");				// if so, point r2 at saved registers on stack
sl@0
   192
	asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagClaim));			// claim system lock?
sl@0
   193
	asm("beq slow_swi_no_wait ");			// skip if not
sl@0
   194
sl@0
   195
	SET_INTS_2(r7, MODE_SVC, INTS_ALL_OFF);	// interrupts off
sl@0
   196
#ifdef _DEBUG
sl@0
   197
	asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
sl@0
   198
	asm("cmp r12, #0 ");
sl@0
   199
	asm("bne  " CSM_Z20FastMutexNestAttemptv);	// debug check that current thread doesn't already hold a fast mutex
sl@0
   200
#endif
sl@0
   201
	asm("ldr r12, [r10, #%a0]!" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// r12=iLock.iHoldingThread
sl@0
   202
	SET_INTS_1(r7, MODE_SVC, INTS_ALL_ON);
sl@0
   203
	asm("cmp r12, #0 ");					// is system lock already held?
sl@0
   204
	asm("bne ss_fast_mutex_held ");			// branch if it is
sl@0
   205
	asm("ss_fast_mutex_obtained: ");
sl@0
   206
	asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));	// current thread->iHeldFastMutex=&iLock
sl@0
   207
	asm("str r9, [r10], #-%a0" : : "i" _FOFF(TScheduler,iLock));		// iLock.iHoldingThread=current thread, r10->scheduler
sl@0
   208
#ifdef BTRACE_FAST_MUTEX
sl@0
   209
	asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   210
	asm("cmp r12, #0");
sl@0
   211
	asm("bne syslock_trace_wait");
sl@0
   212
	asm("syslock_trace_wait_done:");
sl@0
   213
#endif
sl@0
   214
	SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON);	// all interrupts on
sl@0
   215
sl@0
   216
	asm("slow_swi_no_wait: ");
sl@0
   217
	asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess));	// preprocess (handle lookup)? can use r4, r7, r8, r12, r0
sl@0
   218
	asm("mov lr, pc ");
sl@0
   219
	asm("ldrne pc, [r4, #-4] ");			// call preprocess handler if required
sl@0
   220
	asm("mov lr, pc ");
sl@0
   221
	__JUMP(,r6);							// call exec function, preserve r5,r11 if release syslock not required
sl@0
   222
											// preserve r5,r9,r10,r11 if release required
sl@0
   223
	asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagRelease));		// release system lock?
sl@0
   224
	asm("beq slow_swi_exit ");				// skip if not
sl@0
   225
sl@0
   226
	SET_INTS(r12, MODE_SVC, INTS_ALL_OFF);	// disable interrupts
sl@0
   227
#ifdef _DEBUG
sl@0
   228
	asm("add r8, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
sl@0
   229
	asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
sl@0
   230
	asm("cmp r12, r8 ");
sl@0
   231
	asm("bne  " CSM_Z20FastMutexSignalErrorv);	// debug check that current thread holds system lock
sl@0
   232
#endif
sl@0
   233
#ifdef BTRACE_FAST_MUTEX
sl@0
   234
	asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   235
	asm("cmp r12, #0");
sl@0
   236
	asm("bne syslock_trace_signal");
sl@0
   237
	asm("syslock_trace_signal_done:");
sl@0
   238
#endif
sl@0
   239
	asm("mov r12, #0 ");
sl@0
   240
	asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread));	// iLock.iHoldingThread=NULL
sl@0
   241
	asm("str r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));			// current thread->iHeldFastMutex=NULL
sl@0
   242
	asm("ldr r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));		// r3=iLock.iWaiting
sl@0
   243
	asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));		// iLock.iWaiting=0
sl@0
   244
	SET_INTS_1(r8, MODE_SVC, INTS_ALL_ON);
sl@0
   245
	asm("cmp r3, #0 ");						// check waiting flag
sl@0
   246
	asm("bne ss_signal_check ");			// branch if set
sl@0
   247
	asm("ss_signal_done: ");
sl@0
   248
	SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON);	// otherwise reenable interrupts
sl@0
   249
	
sl@0
   250
	asm("slow_swi_exit: ");
sl@0
   251
#ifdef __CHECK_LOCK_STATE__
sl@0
   252
	asm("tst r11, #0x0f ");
sl@0
   253
	asm("bleq  " CSM_Z14CheckLockStatev);
sl@0
   254
#endif
sl@0
   255
	SET_INTS(r12, MODE_SVC, INTS_ALL_OFF);	// disable interrupts
sl@0
   256
	asm("msr spsr, r11 ");					// restore spsr_svc
sl@0
   257
	asm("tst r11, #0x0f ");
sl@0
   258
	asm("mov r3, #0 ");
sl@0
   259
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
sl@0
   260
	asm("nop ");							// ARM Cortex-A9 MPCore erratum 571622 workaround
sl@0
   261
											// Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
sl@0
   262
#endif
sl@0
   263
	asm("bleq callUserModeCallbacks ");		// call user-mode callbacks	
sl@0
   264
	USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11);
sl@0
   265
	ERRATUM_353494_MODE_CHANGE(,r11);
sl@0
   266
	asm("ldmfd sp!, {r3-r11,pc}^ ");		// return from EXEC function
sl@0
   267
sl@0
   268
sl@0
   269
	// Come here if we need to wait for the system lock
sl@0
   270
	// r9->current thread, r10=&iLock, r12=iLock.iHoldingThread
sl@0
   271
	asm("ss_fast_mutex_held: ");
sl@0
   272
	asm("mov r8, #1 ");
sl@0
   273
	asm("str r8, [r10, #%a0]" : : "i" (_FOFF(TScheduler,iKernCSLocked)-_FOFF(TScheduler,iLock)));	// lock the kernel
sl@0
   274
	SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON);	// enable interrupts
sl@0
   275
	asm("str r8, [r10, #4] ");				// iWaiting=1
sl@0
   276
	asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// current thread->iWaitFastMutex=&iLock
sl@0
   277
	asm("stmfd sp!, {r0-r3} ");				// save exec call arguments
sl@0
   278
	asm("mov r0, r12 ");					// parameter for YieldTo
sl@0
   279
	ASM_DEBUG1(NKFMWaitYield,r0);
sl@0
   280
	asm("bl  " CSM_ZN10TScheduler7YieldToEP11NThreadBase);		// yield to the mutex holding thread
sl@0
   281
	// will not return until the mutex is free
sl@0
   282
	// on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
sl@0
   283
	asm("str r1, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex));	// iWaitFastMutex=NULL
sl@0
   284
	asm("ldmfd sp!, {r0-r3} ");				// retrieve exec call arguments
sl@0
   285
	asm("b ss_fast_mutex_obtained ");		// branch back to main code path
sl@0
   286
sl@0
   287
	// Come here if we need to reschedule after releasing the system lock
sl@0
   288
	// kernel unlocked, interrupts enabled, r0 contains return value from Exec call
sl@0
   289
	// r9->current thread, r10=&TheScheduler, r3=1, r8=0x13
sl@0
   290
	asm("ss_signal_check: ");
sl@0
   291
	asm("str r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel (assumes iWaiting always 0 or 1)
sl@0
   292
	SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON);	// reenable interrupts
sl@0
   293
	asm("strb r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
sl@0
   294
	asm("ldr r3, [r9, #%a0]" : : "i" _FOFF(NThread,iCsFunction));	// r3=current thread->iCsFunction
sl@0
   295
	asm("ldr r2, [r9, #%a0]" : : "i" _FOFF(NThread,iCsCount));		// r2=current thread->iCsCount
sl@0
   296
	asm("mov r4, r0 ");						// save return value
sl@0
   297
	asm("cmp r3, #0 ");						// outstanding CS function?
sl@0
   298
	asm("beq 2f ");							// branch if not
sl@0
   299
	asm("cmp r2, #0 ");						// iCsCount!=0 ?
sl@0
   300
	asm("moveq r0, r9 ");					// if iCsCount=0, DoCsFunction()
sl@0
   301
	asm("bleq  " CSM_ZN11NThreadBase12DoCsFunctionEv);
sl@0
   302
	asm("2: ");
sl@0
   303
	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);	// reschedule to allow waiting thread in
sl@0
   304
	asm("mov r0, r4 ");						// recover return value
sl@0
   305
	asm("b ss_signal_done ");				// branch back to main code path
sl@0
   306
sl@0
   307
#ifdef BTRACE_FAST_MUTEX
sl@0
   308
	asm("syslock_trace_wait:");
sl@0
   309
	asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI
sl@0
   310
	asm("mov r8, r3"); // save r3
sl@0
   311
	asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace
sl@0
   312
	asm("ldr r0, fmwait_trace_header");
sl@0
   313
	asm("mov r2, r9"); // current thread
sl@0
   314
	asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
sl@0
   315
	asm("mov lr, pc");
sl@0
   316
	asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
sl@0
   317
	asm("ldmia sp!,{r0-r2,r12}");
sl@0
   318
	asm("mov r3, r8"); // restore r3
sl@0
   319
	asm("b syslock_trace_wait_done");
sl@0
   320
sl@0
   321
	asm("syslock_trace_signal:");
sl@0
   322
	asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI
sl@0
   323
	asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace
sl@0
   324
	asm("ldr r0, fmsignal_trace_header");
sl@0
   325
	asm("mov r2, r9"); // current thread
sl@0
   326
	asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
sl@0
   327
	asm("mov lr, pc");
sl@0
   328
	asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
sl@0
   329
	asm("ldmia sp!,{r0-r2,r12}");
sl@0
   330
	asm("b syslock_trace_signal_done");
sl@0
   331
sl@0
   332
	asm("fmsignal_trace_header:");
sl@0
   333
	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) );
sl@0
   334
	
sl@0
   335
	asm("fmwait_trace_header:");
sl@0
   336
	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) );
sl@0
   337
#endif
sl@0
   338
sl@0
   339
	}
sl@0
   340
sl@0
   341
/***************************************************************************
sl@0
   342
 * IRQ Postamble
sl@0
   343
 * This routine is called after the IRQ has been dispatched
sl@0
   344
 * spsr_irq, r4-r11 are unmodified
sl@0
   345
 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack
sl@0
   346
 ***************************************************************************/
sl@0
   347
sl@0
   348
extern "C" __NAKED__ void __ArmVectorIrq()
sl@0
   349
	{
sl@0
   350
	// FIQs enabled here but not IRQs
sl@0
   351
	asm("ldr r1, __TheScheduler ");
sl@0
   352
	asm("mrs r0, spsr ");														// check interrupted mode
sl@0
   353
	asm("add r12, sp, #%a0 " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS)));		// r12=sp_irq+6 or 8 words
sl@0
   354
	asm("and r2, r0, #0x1f ");
sl@0
   355
	asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));			// r3=KernCSLocked
sl@0
   356
	asm("cmp r2, #0x10 ");														// check for mode_usr
sl@0
   357
	asm("cmpne r2, #0x13 ");													// or mode_svc
sl@0
   358
	asm("cmpeq r3, #0 ");														// and then check if kernel locked
sl@0
   359
	asm("bne IrqExit0 ");														// if wrong mode or locked, return immediately
sl@0
   360
	SET_INTS(r2, MODE_IRQ, INTS_ALL_OFF);										// disable FIQs before we check for reschedule
sl@0
   361
	asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));	// r2=DfcPendingFlag/RescheduleNeededFlag
sl@0
   362
	asm("add r3, r3, #1 ");
sl@0
   363
	SET_MODE_1(lr, MODE_SVC, INTS_ALL_ON);
sl@0
   364
	asm("cmp r2, #0 ");															// check if reschedule needed
sl@0
   365
	asm("beq IrqExit0 ");														// if not, return immediately
sl@0
   366
	asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));	// lock the kernel
sl@0
   367
	SET_MODE_2(lr, MODE_SVC, INTS_ALL_ON);										// mode_svc, interrupts back on
sl@0
   368
sl@0
   369
	asm("ldmdb r12!, {r1-r3} ");												// move saved registers (r0-r3,r12,pc) over to mode_svc stack
sl@0
   370
	asm("stmfd sp!, {r1-r3} ");
sl@0
   371
	asm("ldmdb r12!, {r1-r3} ");
sl@0
   372
	asm("stmfd sp!, {r1-r3} ");
sl@0
   373
	asm("stmfd sp!, {r0,lr} ");													// store lr_svc and interrupted cpsr on current mode_svc stack
sl@0
   374
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   375
	asm("ldmdb r12, {r1-r2} ");
sl@0
   376
	asm("stmfd sp!, {r1-r2} ");													// move user guard over to mode_svc stack
sl@0
   377
#endif
sl@0
   378
sl@0
   379
	SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON);
sl@0
   380
	SET_MODE(lr, MODE_IRQ, INTS_IRQ_OFF);										// mode_irq, IRQs off
sl@0
   381
	asm("add sp, r12, #24 ");													// restore mode_irq stack balance
sl@0
   382
	SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON);										// back to mode_svc, IRQs on
sl@0
   383
sl@0
   384
	// reschedule - this also switches context if necessary
sl@0
   385
	// enter this function in mode_svc, interrupts on, kernel locked
sl@0
   386
	// exit this function in mode_svc, all interrupts off, kernel unlocked
sl@0
   387
	asm("irq_do_resched: ");
sl@0
   388
	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);
sl@0
   389
	asm(".global irq_resched_return ");
sl@0
   390
	asm("irq_resched_return: ");
sl@0
   391
sl@0
   392
	SET_MODE(r2, MODE_SVC, INTS_ALL_OFF);										// all interrupts off
sl@0
   393
	asm("ldr r1, [sp, #%a0] " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS));		// get interrupted cpsr, don't unbalance stack
sl@0
   394
sl@0
   395
#ifdef __CHECK_LOCK_STATE__
sl@0
   396
	asm("mov r2, r12 ");
sl@0
   397
	asm("tst r1, #0x0f ");
sl@0
   398
	asm("bleq  " CSM_Z14CheckLockStatev);
sl@0
   399
	asm("mov r12, r2 ");
sl@0
   400
#endif
sl@0
   401
sl@0
   402
	asm("tst r1, #0x0f ");
sl@0
   403
	asm("mov r3, #%a0 " : : "i" (NThread::EContextUserIntrCallback));
sl@0
   404
	asm("bleq callUserModeCallbacks ");											// call user-mode callbacks		
sl@0
   405
	
sl@0
   406
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   407
	asm("ldr r1, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS));		// pop saved DACR, adjust sp
sl@0
   408
	USER_MEMORY_GUARD_RESTORE(r1,lr);
sl@0
   409
#endif
sl@0
   410
sl@0
   411
	asm("ldmfd sp!, {r1, lr} ");												// restore lr_svc
sl@0
   412
	asm("add sp, sp, #24 ");													// restore mode_svc stack balance
sl@0
   413
	asm("mov r12, sp ");														// r12=address of remaining saved registers
sl@0
   414
sl@0
   415
	SET_MODE(r2, MODE_IRQ, INTS_ALL_OFF);										// back into mode_irq, all interrupts off
sl@0
   416
	
sl@0
   417
	asm("msr spsr, r1 ");														// interrupted cpsr into spsr_irq
sl@0
   418
	ERRATUM_353494_MODE_CHANGE(,r12);
sl@0
   419
	asm("ldmdb r12, {r0-r3,r12,pc}^ ");											// return from interrupt
sl@0
   420
sl@0
   421
sl@0
   422
	asm("IrqExit0: ");
sl@0
   423
#ifdef __CHECK_LOCK_STATE__
sl@0
   424
	asm("tst r0, #0x0f ");
sl@0
   425
	asm("bleq  " CSM_Z14CheckLockStatev);
sl@0
   426
#endif
sl@0
   427
sl@0
   428
	asm("IrqExit1: ");															// entry point for __ArmVectorIrqPostambleNoResched()
sl@0
   429
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   430
	asm("ldr lr, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS));		// pop saved DACR, adjust sp
sl@0
   431
	USER_MEMORY_GUARD_RESTORE(lr,r12);
sl@0
   432
#endif
sl@0
   433
sl@0
   434
#ifdef BTRACE_CPU_USAGE
sl@0
   435
	asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
sl@0
   436
	asm("mov r0, #%a0" : : "i" ((TInt)4 ) );
sl@0
   437
	asm("add r0, r0, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8)) );
sl@0
   438
	asm("cmp r2, #0");
sl@0
   439
	asm("movne lr, pc");
sl@0
   440
	asm("ldrne pc, [r1,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
sl@0
   441
#endif
sl@0
   442
	ERRATUM_353494_MODE_CHANGE(,r12);
sl@0
   443
	asm("ldmfd sp!, {r0-r3,r12,pc}^ ");											// return from interrupt
sl@0
   444
	}
sl@0
   445
sl@0
   446
/***************************************************************************
sl@0
   447
 * IRQ Postamble which will not reschedule (can be returned to by co-resident OS).
sl@0
   448
 * This routine is called after the IRQ has been dispatched
sl@0
   449
 * spsr_irq, r4-r11 are unmodified
sl@0
   450
 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack
sl@0
   451
 ***************************************************************************/
sl@0
   452
sl@0
   453
extern "C" EXPORT_C __NAKED__ void __ArmVectorIrqPostambleNoResched()
sl@0
   454
	{
sl@0
   455
	// FIQs enabled here but not IRQs
sl@0
   456
	asm("ldr r1, __TheScheduler ");
sl@0
   457
	asm("b IrqExit1 ");
sl@0
   458
	}
sl@0
   459
sl@0
   460
sl@0
   461
/***************************************************************************
sl@0
   462
 * FIQ Postamble
sl@0
   463
 * This routine is called after the FIQ has been dispatched
sl@0
   464
 * spsr_fiq, r0-r3 are unmodified
sl@0
   465
 * Return address is on the top of the FIQ stack
sl@0
   466
 ***************************************************************************/
sl@0
   467
sl@0
   468
extern "C" __NAKED__ void __ArmVectorFiq()
sl@0
   469
	{
sl@0
   470
#ifdef __FAULT_ON_FIQ__
sl@0
   471
	asm(".word 0xe7f10f10 ");
sl@0
   472
#endif
sl@0
   473
	// IRQs and FIQs disabled here
sl@0
   474
	// r0-r7 are unaltered from when FIQ occurred
sl@0
   475
	asm("ldr r9, __TheScheduler ");
sl@0
   476
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   477
	asm("ldr r12, [sp], #4 ");													// pop saved DACR
sl@0
   478
#endif
sl@0
   479
	asm("mrs r8, spsr ");														// check interrupted mode
sl@0
   480
	asm("and r10, r8, #0x1f ");
sl@0
   481
	asm("cmp r10, #0x10 ");														// check for mode_usr
sl@0
   482
	asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
   483
	asm("cmpne r10, #0x13 ");													// or mode_svc
sl@0
   484
	asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
sl@0
   485
	asm("cmpeq r11, #0 ");														// and check if kernel locked
sl@0
   486
	asm("bne FiqExit0 ");														// if wrong mode or kernel locked, return immediately
sl@0
   487
	asm("cmp r10, #0 ");														// check if reschedule needed
sl@0
   488
	asm("beq FiqExit0 ");														// if not, return from interrupt
sl@0
   489
sl@0
   490
	// we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed
sl@0
   491
	asm("add r11, r11, #1 ");
sl@0
   492
	asm("str r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));			// lock the kernel
sl@0
   493
	asm("stmfd sp!, {r1-r3} ");													// save interrupted r1-r3 on FIQ stack
sl@0
   494
	asm("mov r1, r8 ");															// r1=interrupted cpsr
sl@0
   495
	asm("mov r3, sp ");															// r3 points to saved registers
sl@0
   496
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   497
	asm("mov r2, r12 ");														// saved DACR into R2
sl@0
   498
#endif
sl@0
   499
	SET_MODE(lr, MODE_SVC, INTS_ALL_ON);										// switch to mode_svc, IRQs and FIQs back on
sl@0
   500
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   501
	asm("str r2, [sp, #%a0]! " : : "i" (-4*(8+USER_MEMORY_GUARD_SAVE_WORDS)));	// save DACR and leave room for spare, cpsr, lr_svc, r0-r3, r12, pc
sl@0
   502
#else
sl@0
   503
	asm("sub sp, sp, #32 ");													// make room for saved registers on mode_svc stack
sl@0
   504
#endif
sl@0
   505
	asm("ldr r2, [r3, #12] ");													// r2=return address
sl@0
   506
	asm("str r12, [sp, #%a0] " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS)));	// save r12 on mode_svc stack
sl@0
   507
	asm("str r2, [sp, #%a0] " : : "i" (4*(7+USER_MEMORY_GUARD_SAVE_WORDS)));	// save return address on mode_svc stack
sl@0
   508
	asm("add r12, sp, #%a0 " : : "i" (4*(USER_MEMORY_GUARD_SAVE_WORDS)));
sl@0
   509
sl@0
   510
	asm("stmia r12!, {r1,lr} ");												// save interrupted cpsr and lr_svc
sl@0
   511
	asm("ldmia r3, {r1,r2,lr} ");												// retrieve original r1-r3 from mode_fiq stack
sl@0
   512
	asm("stmia r12, {r0-r2,lr} ");												// save original r0-r3 - saved register order is now cpsr,lr_svc,r0-r3,r12,pc
sl@0
   513
	SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON);
sl@0
   514
	SET_MODE(lr, MODE_FIQ, INTS_ALL_OFF);										// mode_fiq, IRQs and FIQs off
sl@0
   515
	asm("add sp, r3, #16 ");													// restore mode_fiq stack balance
sl@0
   516
	SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON);										// back to mode_svc, IRQs on
sl@0
   517
	asm("adr lr, irq_resched_return ");
sl@0
   518
	asm("b  " CSM_ZN10TScheduler10RescheduleEv);								// do reschedule and return to irq_resched_return
sl@0
   519
sl@0
   520
	asm("FiqExit0:");															// also entry point for __ArmVectorFiqPostambleNoResched()
sl@0
   521
	USER_MEMORY_GUARD_RESTORE(r12,lr);
sl@0
   522
sl@0
   523
#ifndef BTRACE_CPU_USAGE
sl@0
   524
	ERRATUM_353494_MODE_CHANGE(,r11);
sl@0
   525
	asm("ldmfd sp!, {pc}^ ");													// return from interrupt
sl@0
   526
#else
sl@0
   527
	asm("ldrb r8, [r9,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
sl@0
   528
	asm("mov r10, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EFiqEnd<<BTrace::ESubCategoryIndex*8)) );
sl@0
   529
	asm("adr lr, FiqTraceExit0");
sl@0
   530
	asm("cmp r8, #0");
sl@0
   531
	ERRATUM_353494_MODE_CHANGE(eq,r8);
sl@0
   532
	asm("ldmeqfd sp!, {pc}^ ");													// return from interrupt if trace not enabled
sl@0
   533
	asm("stmfd sp!, {r0-r3} ");
sl@0
   534
	asm("add r0, r10, #%a0" : : "i" ((TInt)4 ) );
sl@0
   535
	asm("ldr pc, [r9,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
sl@0
   536
	asm("FiqTraceExit0:");
sl@0
   537
	ERRATUM_353494_MODE_CHANGE(,r3);
sl@0
   538
	asm("ldmfd sp!, {r0-r3,pc}^ ");												// return from interrupt
sl@0
   539
#endif
sl@0
   540
sl@0
   541
	asm("__TheScheduler: ");
sl@0
   542
	asm(".word TheScheduler ");
sl@0
   543
	}
sl@0
   544
sl@0
   545
/***************************************************************************
sl@0
   546
 * FIQ Postamble which will not reschedule (can be returned to by co-resident OS).
sl@0
   547
 * This routine is called after the FIQ has been dispatched
sl@0
   548
 * spsr_fiq, r0-r3 are unmodified
sl@0
   549
 * Return address is on the top of the FIQ stack
sl@0
   550
 ***************************************************************************/
sl@0
   551
sl@0
   552
extern "C" EXPORT_C __NAKED__ void __ArmVectorFiqPostambleNoResched()
sl@0
   553
	{
sl@0
   554
#ifdef __FAULT_ON_FIQ__
sl@0
   555
	asm(".word 0xe7f10f10 ");
sl@0
   556
#endif
sl@0
   557
	// IRQs and FIQs disabled here
sl@0
   558
	// r0-r7 are unaltered from when FIQ occurred
sl@0
   559
	asm("ldr r9, __TheScheduler ");
sl@0
   560
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   561
	asm("ldr r12, [sp], #4 ");													// pop saved DACR
sl@0
   562
#endif
sl@0
   563
	asm("b FiqExit0 ");
sl@0
   564
	}
sl@0
   565
sl@0
   566
sl@0
   567
extern "C" __NAKED__ void __ArmVectorAbortData()
sl@0
   568
//
sl@0
   569
// Data abort
sl@0
   570
//
sl@0
   571
	{
sl@0
   572
#if defined(__CPU_CORTEX_A8__) && (!defined(__CPU_ARM_A8_ERRATUM_447862_FIXED) || !defined(__CPU_ARM_A8_ERRATUM_451027_FIXED))
sl@0
   573
	ARM_DMBSH;								// ARM Cortex-A8 erratum 447862/451027 workaround
sl@0
   574
#endif
sl@0
   575
	asm("sub lr, lr, #8");					// lr now points to aborted instruction
sl@0
   576
	asm("stmfd	sp!, {r0-r4,r12,lr}");		// save it along with r0-r4,r12
sl@0
   577
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
   578
	CLREX									// reset exclusive monitor 	
sl@0
   579
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
   580
	STREX(12,0,13);							// dummy STREX to reset exclusivity monitor
sl@0
   581
#endif
sl@0
   582
	asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort));
sl@0
   583
	// generic exception handler
sl@0
   584
	// come here with r1=exception code, lr points to aborted instruction, r0-r4,r12,lr saved
sl@0
   585
	asm("handle_exception: ");
sl@0
   586
	asm("mrs r0, spsr ");					// r0=value of cpsr when abort occurred
sl@0
   587
sl@0
   588
	asm("handle_exception2: ");
sl@0
   589
	asm("mrs r12, cpsr ");
sl@0
   590
	asm("and r3, r0, #0x1f ");				// r3=processor mode when abort occurred
sl@0
   591
	asm("bic r12, r12, #0xc0 ");
sl@0
   592
	asm("cmp r3, #0x10 ");					// aborted in user mode?
sl@0
   593
	asm("cmpne r3, #0x13 ");				// if not, aborted in mode_svc?
sl@0
   594
	asm("bne fatal_exception_mode ");		// if neither, fault
sl@0
   595
	asm("msr cpsr, r12 ");					// reenable interrupts - rescheduling disabled by mode_abt/mode_und
sl@0
   596
	asm("ldr r2, __TheScheduler ");
sl@0
   597
	asm("mov r3, sp ");						// r3 points to saved registers
sl@0
   598
	asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
   599
	asm("cmp r4, #0 ");						// exception with kernel locked?
sl@0
   600
	asm("bne fatal_exception_mode ");		// if so, fault
sl@0
   601
	asm("add r4, r4, #1 ");					// lock the kernel
sl@0
   602
	asm("str r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
sl@0
   603
	asm("mov r4, #0x13 ");
sl@0
   604
	asm("msr cpsr, r4 ");					// mode_svc, interrupts on, kernel locked
sl@0
   605
sl@0
   606
	asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
sl@0
   607
	asm("tst r0, #0x0f ");					// check if exception in mode_usr
sl@0
   608
	asm("mov r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
sl@0
   609
 	asm("streqb r2, [r4, #%a0]" : : "i" _FOFF(NThread,iSpare3));	// if so, set iUserContextType = EContextException
sl@0
   610
	asm("add r4, r4, #%a0" : : "i" _FOFF(NThread,iStackBase));
sl@0
   611
	asm("ldmia r4, {r2,r4} ");				// r2=supervisor stack area base, r4=size
sl@0
   612
	asm("subs r2, sp, r2 ");				// r2=amount of mode_svc stack remaining
sl@0
   613
	asm("blo fatal_exception_stack ");		// if stack pointer invalid, fault
sl@0
   614
	asm("cmp r2, r4 ");
sl@0
   615
	asm("bhi fatal_exception_stack ");
sl@0
   616
	asm("cmp r2, #128 ");					// check enough stack to handle exception
sl@0
   617
	asm("blo fatal_exception_stack ");		// if not, fault
sl@0
   618
sl@0
   619
	// At this point we are in mode_svc with interrupts enabled and the kernel locked.
sl@0
   620
	// We know the supervisor stack is valid and has enough free space to store the exception info.
sl@0
   621
	// Registers: R0=aborted cpsr, R1=exception type, R2,R4 scratch, R3 points to saved registers
sl@0
   622
	// on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und).
sl@0
   623
sl@0
   624
	asm("ldr r4, [r3, #16] ");				// restore original r4
sl@0
   625
	asm("mov r2, sp ");						// r2=sp_svc when abort occurred
sl@0
   626
	asm("sub sp, sp, #92 ");				// push 23 words onto mode_svc stack
sl@0
   627
	asm("stmia sp, {r0-r2,r4-r11,lr} ");	// save cpsr, exc id, sp_svc, r4-r11, lr_svc
sl@0
   628
	asm("ldmia r3!, {r4-r10} ");			// get registers from mode_abt or mode_und stack
sl@0
   629
	asm("stmdb r2!, {r4-r7,r9,r10} ");		// transfer saved registers from exception stack except r4
sl@0
   630
	asm("stmdb r2, {r13,r14}^ ");			// save sp_usr and lr_usr
sl@0
   631
	asm("sub r2, r2, #20 ");
sl@0
   632
sl@0
   633
// Set r0 = fault address and r1 = fault status.
sl@0
   634
// For prefetch aborts use IFAR if it exists otherwise use the return address.
sl@0
   635
#ifdef __USE_CP15_FAULT_INFO__
sl@0
   636
	asm("cmp r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
sl@0
   637
#ifdef __CPU_ARM_HAS_SPLIT_FSR
sl@0
   638
	asm("mrcne p15, 0, r1, c5, c0, 0");		// r1 = data fault status
sl@0
   639
	asm("mrcne p15, 0, r0, c6, c0, 0");		// r0 = DFAR fault address
sl@0
   640
	asm("mrceq p15, 0, r1, c5, c0, 1");		// r1 = instruction fault status
sl@0
   641
#ifdef __CPU_ARM_HAS_CP15_IFAR
sl@0
   642
	asm("mrceq p15, 0, r0, c6, c0, 2");		// r0 = IFAR fault address
sl@0
   643
#else 
sl@0
   644
	asm("moveq r0, r10");					// r0 = return address.
sl@0
   645
#endif // __CPU_ARM_HAS_CP15_IFAR
sl@0
   646
#else
sl@0
   647
	asm("mrcne p15, 0, r0, c6, c0");		// r0 = fault address
sl@0
   648
	asm("moveq r0, r10");					// r0 = return address.
sl@0
   649
	asm("mrc p15, 0, r1, c5, c0");			// r1 = fault status
sl@0
   650
#endif // __CPU_ARM_HAS_SPLIT_FSR
sl@0
   651
#endif // __USE_CP15_FAULT_INFO__
sl@0
   652
sl@0
   653
	asm("mrs r3, spsr ");					// r3=spsr_svc
sl@0
   654
	asm("stmia r2, {r0,r1,r3} ");			// save these
sl@0
   655
	asm("msr cpsr, r12 ");					// back into exception mode
sl@0
   656
	asm("add sp, sp, #28 ");				// restore exception stack balance
sl@0
   657
	asm("mov r5, #0x13 ");
sl@0
   658
	asm("msr cpsr, r5 ");					// back into mode_svc
sl@0
   659
sl@0
   660
	// Now we can unlock the kernel and process the exception
sl@0
   661
	asm("bl  " CSM_ZN10TScheduler10RescheduleEv);
sl@0
   662
	asm("msr cpsr, r5 ");					// enable interrupts
sl@0
   663
sl@0
   664
	// call the exception dispatcher, r3 is the current thread
sl@0
   665
	asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iHandlers));
sl@0
   666
	asm("mov r1, r3 "); 
sl@0
   667
	asm("mov r0, sp ");						// r0 points to saved exception information
sl@0
   668
	asm("sub sp, sp, #4 ");					// make room for r0
sl@0
   669
	asm("bic sp, sp, #4 ");					// align stack to 8 byte boundary
sl@0
   670
	asm("str r0, [sp] ");					// save original stack pointer
sl@0
   671
sl@0
   672
	USER_MEMORY_GUARD_ON(,r11,lr);
sl@0
   673
	asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler));
sl@0
   674
	asm("mov lr, pc ");
sl@0
   675
	__JUMP(,r12);							// call exception handler
sl@0
   676
	USER_MEMORY_GUARD_RESTORE(r11,lr);
sl@0
   677
	asm("ldr sp, [sp, #0] ");				// restore stack pointer
sl@0
   678
sl@0
   679
	// return from exception
sl@0
   680
	asm("ldr r0, __TheScheduler ");
sl@0
   681
	asm("mov r3, sp ");
sl@0
   682
	asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
sl@0
   683
	asm("ldr r0, [r3], #12 ");				// r0=cpsr, skip exc id and sp_svc
sl@0
   684
	asm("ldmfd r3!, {r4-r11,lr} ");			// restore r4-r11 and lr_svc
sl@0
   685
	asm("ldr r12, [r3, #8]! ");				// skip fault address and fault status, r12=spsr_svc
sl@0
   686
	asm("ldmib r3, {r13,r14}^ ");			// restore sp_usr and lr_usr
sl@0
   687
	asm("add r1, r3, #12 ");				// r3 points to saved r0-r3,r12,pc
sl@0
   688
	asm("mov r3, #0xd3 ");
sl@0
   689
	asm("msr cpsr, r3 ");					// mode_svc, all interrupts off
sl@0
   690
	asm("msr spsr, r12 ");					// restore spsr_svc
sl@0
   691
	asm("tst r0, #0x0f ");					// check if exception in mode_usr
sl@0
   692
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
sl@0
   693
	asm("nop ");							// ARM Cortex-A9 MPCore erratum 571622 workaround
sl@0
   694
	asm("nop ");							// Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
sl@0
   695
#endif
sl@0
   696
#ifdef __CHECK_LOCK_STATE__
sl@0
   697
	asm("bleq  " CSM_Z14CheckLockStatev);
sl@0
   698
	asm("tst r0, #0x0f ");					// recheck if exception in mode_usr
sl@0
   699
#endif
sl@0
   700
	asm("bne 1f ");
sl@0
   701
sl@0
   702
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
   703
	USER_MEMORY_GUARD_ON(,lr,r12);
sl@0
   704
	asm("tst lr, #0xc0000000 ");			// user memory enabled?
sl@0
   705
	asm("adrne lr, 2f ");					// yes - enable it after callbacks
sl@0
   706
#endif
sl@0
   707
	asm("adreq lr, 1f ");					// no - leave it disabled after callbacks
sl@0
   708
	asm("mov r3, #0 ");
sl@0
   709
	asm("b callUserModeCallbacks2 ");		// call user-mode callbacks
sl@0
   710
	asm("2: ");
sl@0
   711
	USER_MEMORY_GUARD_OFF(,lr,lr);
sl@0
   712
sl@0
   713
	asm("1: ");
sl@0
   714
	asm("tst r0, #0x0f ");					// check if exception in mode_usr
sl@0
   715
	asm("mov r3, #%a0 " : : "i" ((TInt)NThread::EContextUndefined));
sl@0
   716
	asm("streqb r3, [r2, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextUndefined
sl@0
   717
	asm("add sp, r1, #24 ");				// restore mode_svc stack balance
sl@0
   718
	asm("mov r2, #0xd7 ");
sl@0
   719
	asm("msr cpsr, r2 ");					// mode_abt, all interrupts off
sl@0
   720
	asm("msr spsr, r0 ");					// spsr_abt=aborted cpsr
sl@0
   721
	ERRATUM_353494_MODE_CHANGE(,r12);
sl@0
   722
	asm("ldmia r1, {r0-r3,r12,pc}^ ");		// restore r0-r3,r12 and return from exception
sl@0
   723
sl@0
   724
	// get here if exception occurred in mode other than usr or svc
sl@0
   725
	// we are in mode_abt or mode_und with IRQs disabled
sl@0
   726
	asm("fatal_exception_mode: ");
sl@0
   727
	asm("ldr r2, __TheScheduler ");
sl@0
   728
	asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler));
sl@0
   729
	asm("cmp lr, #0 ");
sl@0
   730
	__JUMP(ne,lr);							// if crash debugger running, let it handle exception
sl@0
   731
sl@0
   732
	// get here if mode_svc stack has overflowed
sl@0
   733
	// we are in mode_svc with interrupts enabled and the kernel locked
sl@0
   734
	// R0=original CPSR R1=exc code R12=mode of exception
sl@0
   735
	asm("fatal_exception_stack: ");
sl@0
   736
	asm("orr r3, r12, #0xC0 ");
sl@0
   737
	asm("msr cpsr, r3 ");		// back to exception mode, all interrupts off
sl@0
   738
	asm("mov r2, r0 ");
sl@0
   739
	asm("ldr r0, __TheScheduler ");
sl@0
   740
	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,i_Regs));	// pass in address of stored registers
sl@0
   741
	asm("bl "	CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
sl@0
   742
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode));
sl@0
   743
	asm("str r2, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags));
sl@0
   744
	asm("ldmia sp!, {r3-r7} ");	// get original R0-R4
sl@0
   745
	asm("stmia r0, {r1-r5} ");	// save original R0-R4
sl@0
   746
	asm("ldmia sp!, {r6,r7} ");	// get original R12 and aborted instruction address
sl@0
   747
	asm("str r6, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR12));
sl@0
   748
	asm("str r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15));
sl@0
   749
	asm("mov r1, #13 ");		// r1 = regnum
sl@0
   750
	asm("mrs r2, cpsr ");		// r2 = mode
sl@0
   751
	asm("mov r4, r0 ");
sl@0
   752
	asm("bl " CSM_ZN3Arm3RegER14SFullArmRegSetim );	// r0 = pointer to exception mode R13
sl@0
   753
	asm("str sp, [r0] ");		// save correct original value for exception mode R13
sl@0
   754
sl@0
   755
	// call the exception fault dispatcher
sl@0
   756
	asm("mov r0, #0 ");
sl@0
   757
	asm("b ExcFault ");
sl@0
   758
	}
sl@0
   759
sl@0
   760
extern "C" __NAKED__ void __ArmVectorAbortPrefetch()
sl@0
   761
//
sl@0
   762
// Prefetch abort
sl@0
   763
//
sl@0
   764
	{
sl@0
   765
	asm("sub lr, lr, #4");					// lr now points to instruction whose prefetch was aborted
sl@0
   766
	asm("stmfd	sp!, {r0-r4,r12,lr}");		// save it along with r0-r4,r12
sl@0
   767
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
   768
	CLREX									// reset exclusive monitor 	
sl@0
   769
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
   770
	STREX(12,0,13);							// dummy STREX to reset exclusivity monitor
sl@0
   771
#endif
sl@0
   772
	asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
sl@0
   773
	asm("b handle_exception ");
sl@0
   774
	}
sl@0
   775
sl@0
   776
extern "C" __NAKED__ void __ArmVectorUndef()
sl@0
   777
//
sl@0
   778
// Undefined instruction exception
sl@0
   779
//
sl@0
   780
	{
sl@0
   781
	asm("sub lr, lr, #4");					// lr now points to undefined instruction
sl@0
   782
	asm("stmfd	sp!, {r0-r4,r12,lr}");		// save it along with r0-r4,r12
sl@0
   783
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
   784
	CLREX									// reset exclusive monitor 	
sl@0
   785
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
   786
	STREX(12,0,13);							// dummy STREX to reset exclusivity monitor
sl@0
   787
#endif
sl@0
   788
	asm("mrs r0, spsr ");					// r0=CPSR at time of exception
sl@0
   789
	asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
sl@0
   790
	asm("tst r0, #0x20 ");					// exception in THUMB mode?
sl@0
   791
	asm("addne lr, lr, #2 ");				// if so, correct saved return address
sl@0
   792
	asm("strne lr, [sp, #24] ");
sl@0
   793
	asm("b handle_exception2 ");
sl@0
   794
	}
sl@0
   795