os/kernelhwsrv/kernel/eka/include/nkernsmp/arm/entry.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32/include/nkernsmp/arm/entry.h
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <arm_gic.h>
sl@0
    19
#include <arm_tmr.h>
sl@0
    20
sl@0
    21
sl@0
    22
extern "C" {
sl@0
    23
sl@0
    24
extern void __ArmVectorReset();
sl@0
    25
extern void __ArmVectorUndef();
sl@0
    26
extern void __ArmVectorSwi();
sl@0
    27
extern void __ArmVectorAbortPrefetch();
sl@0
    28
extern void __ArmVectorAbortData();
sl@0
    29
extern void __ArmVectorReserved();
sl@0
    30
extern void __ArmVectorIrq();
sl@0
    31
extern void __ArmVectorFiq();
sl@0
    32
sl@0
    33
#define __DECLARE_UNDEFINED_INSTRUCTION_HANDLER		asm(".word __ArmVectorUndef ")
sl@0
    34
#define __DECLARE_PREFETCH_ABORT_HANDLER			asm(".word __ArmVectorAbortPrefetch ")
sl@0
    35
#define __DECLARE_DATA_ABORT_HANDLER				asm(".word __ArmVectorAbortData ")
sl@0
    36
sl@0
    37
#ifdef BTRACE_CPU_USAGE
sl@0
    38
extern void btrace_irq_entry(TInt);
sl@0
    39
extern void btrace_fiq_entry();
sl@0
    40
#endif
sl@0
    41
sl@0
    42
extern void handle_crash_ipi();
sl@0
    43
sl@0
    44
#ifdef _DEBUG
sl@0
    45
extern void __DebugMsgIrq(TUint aIrqNumber);
sl@0
    46
#endif
sl@0
    47
sl@0
    48
/* NOTE: We must ensure that this code goes at the beginning of the kernel image.
sl@0
    49
*/
sl@0
    50
__NAKED__ void __this_must_go_at_the_beginning_of_the_kernel_image()
sl@0
    51
	{
sl@0
    52
	asm("ldr	pc, __reset_vector ");		// 00 = Reset vector
sl@0
    53
	asm("ldr	pc, __undef_vector ");		// 04 = Undefined instruction vector
sl@0
    54
	asm("ldr	pc, __swi_vector ");		// 08 = SWI vector
sl@0
    55
	asm("ldr	pc, __pabt_vector ");		// 0C = Prefetch abort vector
sl@0
    56
	asm("ldr	pc, __dabt_vector ");		// 10 = Data abort vector
sl@0
    57
	asm("ldr	pc, __unused_vector ");		// 14 = unused
sl@0
    58
	asm("b		HandleIrq ");				// 18 = IRQ vector
sl@0
    59
											// 1C = FIQ vector, code in situ
sl@0
    60
	/*** FIQ entry point ******************************************************/
sl@0
    61
	asm("ldr	r12, __ArmInterrupt ");
sl@0
    62
#ifdef BTRACE_CPU_USAGE
sl@0
    63
	asm("ldr	r11, __BTraceCpuUsageFilter ");
sl@0
    64
#endif
sl@0
    65
	asm("sub	lr, lr, #4 ");
sl@0
    66
	asm("ldr	r10, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iFiqHandler)); // r10 points to FIQ handler
sl@0
    67
	asm("str	lr, [sp, #-4]! ");
sl@0
    68
	// we assume FIQ handler preserves r0-r7 but not r8-r12
sl@0
    69
	// hence must be assembler, so stack misalignment OK
sl@0
    70
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
    71
	CLREX
sl@0
    72
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
    73
	STREX(8,14,13);						// dummy STREX to reset exclusivity monitor
sl@0
    74
#endif
sl@0
    75
#ifdef __USER_MEMORY_GUARDS_ENABLED__
sl@0
    76
	USER_MEMORY_GUARD_ON(,lr,r8);
sl@0
    77
	asm("str lr, [sp, #-4]! ");
sl@0
    78
#endif
sl@0
    79
#ifdef BTRACE_CPU_USAGE
sl@0
    80
	asm("ldrb	r8, [r11] ");
sl@0
    81
	asm("ldr	lr, _ArmVectorFiq ");
sl@0
    82
	asm("cmp	r8, #0 ");
sl@0
    83
	asm("bne	btrace_fiq");
sl@0
    84
	__JUMP(,	r10);					// jump to FIQ handler
sl@0
    85
sl@0
    86
	asm("btrace_fiq: ");				// call trace handler before fiq handler...
sl@0
    87
	asm("stmfd	sp!, {r0-r3,r12,lr} ");
sl@0
    88
	asm("adr	lr, btrace_fiq_return ");
sl@0
    89
	asm("ldr	pc, __btrace_fiq_entry ");
sl@0
    90
	asm("btrace_fiq_return: ");
sl@0
    91
	asm("ldmfd	sp!, {r0-r3,r12,lr} ");
sl@0
    92
	__JUMP(,	r10);					// jump to FIQ handler
sl@0
    93
#endif
sl@0
    94
	asm("ldr lr, _ArmVectorFiq ");
sl@0
    95
	__JUMP(,r10);			// jump to FIQ handler
sl@0
    96
sl@0
    97
sl@0
    98
	/*** Nested IRQ register save *********************************************/
sl@0
    99
	asm("nested_irq: ");
sl@0
   100
	SRSDBW(MODE_SYS);					// save return address and return CPSR to interrupt stack
sl@0
   101
	CPSCHM(MODE_SYS);					// mode_sys, IRQs off
sl@0
   102
	asm("stmfd	sp!, {r0-r12,lr} ");	// save R0-R12,R14 from system mode
sl@0
   103
	GET_RWNO_TID(,r4);
sl@0
   104
	asm("b nested_irq_rejoin ");
sl@0
   105
sl@0
   106
	/*** IRQ entry point ******************************************************/
sl@0
   107
	asm("HandleIrq: ");
sl@0
   108
	asm("mrs	r13, spsr ");
sl@0
   109
	asm("sub	lr, lr, #4 ");
sl@0
   110
	asm("and	r13, r13, #0x1f ");
sl@0
   111
	asm("cmp	r13, #0x1f ");			// interrupted mode_sys?
sl@0
   112
	asm("beq	nested_irq ");			// yes -> nested interrupt
sl@0
   113
	SRSDBW(MODE_SVC);					// save return address and return CPSR to supervisor stack
sl@0
   114
	__ASM_CLI_MODE(MODE_SVC);			// mode_svc, IRQs and FIQs off
sl@0
   115
	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
sl@0
   116
	asm("stmia	sp, {r0-r14}^ ");		// save R0-R12, R13_usr, R14_usr
sl@0
   117
	asm("mov	r1, #%a0" : : "i" ((TInt)SThreadExcStack::EIrq));
sl@0
   118
#if defined(__CPU_ARM_HAS_WORKING_CLREX)
sl@0
   119
	CLREX
sl@0
   120
#elif defined(__CPU_ARM_HAS_LDREX_STREX)
sl@0
   121
	STREX(12, 0, 13);					// dummy STREX to reset exclusivity monitor
sl@0
   122
#endif
sl@0
   123
	GET_RWNO_TID(,r4);
sl@0
   124
	asm("mov	r5, sp ");
sl@0
   125
	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
sl@0
   126
	__ASM_STI2_MODE(MODE_SYS);			// mode_sys, IRQs off, FIQs on
sl@0
   127
	asm("ldr	sp, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqStackTop));
sl@0
   128
	USER_MEMORY_GUARD_ON(,r8,r0);		// r8 = original DACR if user memory guards in use
sl@0
   129
sl@0
   130
	asm("nested_irq_rejoin: ");
sl@0
   131
	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
sl@0
   132
	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
sl@0
   133
	asm("ldr	r12, __ArmInterrupt ");
sl@0
   134
	asm("ldr	r10, _ArmVectorIrq ");
sl@0
   135
	asm("add	r0, r0, #1 ");
sl@0
   136
	asm("add	r7, r7, #1 ");
sl@0
   137
	__DATA_MEMORY_BARRIER_Z__(r2);		// ensure memory accesses in interrupted code are observed before
sl@0
   138
										// the writes to i_IrqCount, i_IrqNestCount
sl@0
   139
	asm("str	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));		// increment i_IrqCount
sl@0
   140
	asm("ldr	r11, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iIrqHandler));	// address of IRQ handler
sl@0
   141
	asm("ldr	r6, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr));
sl@0
   142
	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));	// increment i_IrqNestCount
sl@0
   143
sl@0
   144
	asm("1: ");
sl@0
   145
#ifdef BTRACE_CPU_USAGE
sl@0
   146
	asm("ldr	r2, __BTraceCpuUsageFilter ");
sl@0
   147
#endif
sl@0
   148
	asm("mov	r1, #%a0" : : "i" ((TInt)E_GicIntId_Spurious+1));
sl@0
   149
	asm("ldr	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iAck));		// r0 = number of interrupt to service
sl@0
   150
#ifdef BTRACE_CPU_USAGE
sl@0
   151
	asm("ldrb	r2, [r2] ");
sl@0
   152
#endif
sl@0
   153
	asm("sub	r1, r1, #1 ");
sl@0
   154
	asm("cmp	r0, r1 ");				// any more interrupts pending?
sl@0
   155
	asm("beq	2f ");					// if not, branch out
sl@0
   156
#ifdef BTRACE_CPU_USAGE
sl@0
   157
	asm("cmp	r2, #0 ");
sl@0
   158
	asm("beq	9f ");
sl@0
   159
	asm("stmfd	sp!, {r0-r3} ");
sl@0
   160
	asm("adr	lr, btrace_irq_return ");
sl@0
   161
	asm("ldr	pc, __btrace_irq_entry ");
sl@0
   162
	asm("btrace_irq_return: ");
sl@0
   163
	asm("ldmfd	sp!, {r0-r3} ");
sl@0
   164
	asm("9: ");
sl@0
   165
#endif	// BTRACE_CPU_USAGE
sl@0
   166
	ASM_DEBUG1(_longjump_Irq,r0);
sl@0
   167
	asm("adr	lr, 1b ");
sl@0
   168
	asm("tst	r0, #0x3e0 ");			// check for interrupt numbers 0-31
sl@0
   169
	asm("beq	3f ");					// branch out if so
sl@0
   170
	__JUMP(,r11);						// jump to dispatcher, R0 = interrupt number, return to 1:
sl@0
   171
										// dispatcher acknowledges interrupt
sl@0
   172
sl@0
   173
	// No more interrupts pending - jump to postamble in the kernel
sl@0
   174
	// R4->TSubScheduler at this point, R5->saved registers on SVC stack if not nested IRQ
sl@0
   175
	// R6->GIC CPU interface
sl@0
   176
	asm("2: ");
sl@0
   177
	__JUMP(,r10);
sl@0
   178
sl@0
   179
	// Kernel IPI
sl@0
   180
	asm("3: ");
sl@0
   181
	asm("and	r2, r0, #31 ");			// r2 = interrupt number 0...31
sl@0
   182
	asm("cmp	r2, #%a0" : : "i" ((TInt)TIMESLICE_VECTOR));
sl@0
   183
	asm("beq	do_timeslice_irq ");
sl@0
   184
	asm("cmp	r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));
sl@0
   185
	asm("beq	do_resched_ipi ");
sl@0
   186
	asm("cmp	r2, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
sl@0
   187
	asm("beq	do_generic_ipi ");
sl@0
   188
	asm("cmp	r2, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
sl@0
   189
	asm("beq	do_transferred_ipi ");
sl@0
   190
	asm("cmp	r2, #15 ");
sl@0
   191
	__JUMP(hi,	r11);					// if >15 but not TIMESLICE_VECTOR, call dispatcher
sl@0
   192
sl@0
   193
	// else assume CRASH_IPI
sl@0
   194
	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
sl@0
   195
	__DATA_SYNC_BARRIER_Z__(r1);
sl@0
   196
	asm("ldr	r1, __HandleCrashIPI ");
sl@0
   197
	__JUMP(,	r1);					// CRASH IPI, so crash
sl@0
   198
sl@0
   199
	// TIMESLICE, RESCHED or TRANSFERRED
sl@0
   200
	asm("do_timeslice_irq: ");
sl@0
   201
	asm("ldr	r2, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_LocalTimerAddr));
sl@0
   202
	asm("mov	r1, #1 ");
sl@0
   203
	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer, iTimerIntStatus));	// clear timer event flag
sl@0
   204
	asm("do_resched_ipi: ");
sl@0
   205
	asm("mov	r1, #1 ");
sl@0
   206
	asm("strb	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   207
	asm("do_transferred_ipi: ");
sl@0
   208
	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
sl@0
   209
	__DATA_SYNC_BARRIER_Z__(r1);		// ensure writes to i_IrqCount, i_IrqNestCount, iRescheduleNeededFlag complete before SEV
sl@0
   210
										// also ensure EOI is written before we return from the interrupt
sl@0
   211
	ARM_SEV;							// kick any CPUs waiting for us to enter the ISR
sl@0
   212
	asm("b		1b ");
sl@0
   213
sl@0
   214
	// GENERIC_IPI
sl@0
   215
	asm("do_generic_ipi: ");
sl@0
   216
	asm("ldr	r2, _GenericIPIIsr ");
sl@0
   217
	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
sl@0
   218
	asm("mov	r0, r4 ");				// r0->SubScheduler
sl@0
   219
	__DATA_SYNC_BARRIER_Z__(r1);
sl@0
   220
	__JUMP(,	r2);
sl@0
   221
sl@0
   222
	asm("__DebugMsg_longjump_Irq: ");
sl@0
   223
	asm("ldr	pc, _dmIrq ");
sl@0
   224
sl@0
   225
	asm("__reset_vector:");
sl@0
   226
	asm(".word	__ArmVectorReset "); 
sl@0
   227
	asm("__undef_vector:");
sl@0
   228
	__DECLARE_UNDEFINED_INSTRUCTION_HANDLER;
sl@0
   229
	asm("__swi_vector:");
sl@0
   230
	asm(".word	__ArmVectorSwi "); 
sl@0
   231
	asm("__pabt_vector:");
sl@0
   232
	__DECLARE_PREFETCH_ABORT_HANDLER;
sl@0
   233
	asm("__dabt_vector:");
sl@0
   234
	__DECLARE_DATA_ABORT_HANDLER;
sl@0
   235
	asm("__unused_vector:");
sl@0
   236
	asm(".word	__ArmVectorReserved ");
sl@0
   237
sl@0
   238
	asm("__ArmInterrupt: ");
sl@0
   239
	asm(".word	ArmInterruptInfo ");
sl@0
   240
	asm("_ArmVectorIrq: ");
sl@0
   241
	asm(".word	__ArmVectorIrq ");
sl@0
   242
	asm("_GenericIPIIsr: ");
sl@0
   243
	asm(".word generic_ipi_isr ");
sl@0
   244
	asm("_ArmVectorFiq: ");
sl@0
   245
	asm(".word	__ArmVectorFiq ");
sl@0
   246
	asm("__HandleCrashIPI: ");
sl@0
   247
	asm(".word handle_crash_ipi ");
sl@0
   248
#ifdef BTRACE_CPU_USAGE
sl@0
   249
	asm("__BTraceCpuUsageFilter: ");
sl@0
   250
	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
sl@0
   251
	asm("__btrace_irq_entry: ");
sl@0
   252
	asm(".word btrace_irq_entry ");
sl@0
   253
	asm("__btrace_fiq_entry: ");
sl@0
   254
	asm(".word btrace_fiq_entry ");
sl@0
   255
#endif
sl@0
   256
	asm("_dmIrq: ");
sl@0
   257
	asm(".word __DebugMsgIrq ");
sl@0
   258
	}
sl@0
   259
}
sl@0
   260