os/kernelhwsrv/kernel/eka/include/nkernsmp/arm/entry.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/include/nkernsmp/arm/entry.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,260 @@
     1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32/include/nkernsmp/arm/entry.h
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +#include <arm_gic.h>
    1.22 +#include <arm_tmr.h>
    1.23 +
    1.24 +
    1.25 +extern "C" {
    1.26 +
    1.27 +extern void __ArmVectorReset();
    1.28 +extern void __ArmVectorUndef();
    1.29 +extern void __ArmVectorSwi();
    1.30 +extern void __ArmVectorAbortPrefetch();
    1.31 +extern void __ArmVectorAbortData();
    1.32 +extern void __ArmVectorReserved();
    1.33 +extern void __ArmVectorIrq();
    1.34 +extern void __ArmVectorFiq();
    1.35 +
    1.36 +#define __DECLARE_UNDEFINED_INSTRUCTION_HANDLER		asm(".word __ArmVectorUndef ")
    1.37 +#define __DECLARE_PREFETCH_ABORT_HANDLER			asm(".word __ArmVectorAbortPrefetch ")
    1.38 +#define __DECLARE_DATA_ABORT_HANDLER				asm(".word __ArmVectorAbortData ")
    1.39 +
    1.40 +#ifdef BTRACE_CPU_USAGE
    1.41 +extern void btrace_irq_entry(TInt);
    1.42 +extern void btrace_fiq_entry();
    1.43 +#endif
    1.44 +
    1.45 +extern void handle_crash_ipi();
    1.46 +
    1.47 +#ifdef _DEBUG
    1.48 +extern void __DebugMsgIrq(TUint aIrqNumber);
    1.49 +#endif
    1.50 +
    1.51 +/* NOTE: We must ensure that this code goes at the beginning of the kernel image.
    1.52 +*/
    1.53 +__NAKED__ void __this_must_go_at_the_beginning_of_the_kernel_image()
    1.54 +	{
    1.55 +	asm("ldr	pc, __reset_vector ");		// 00 = Reset vector
    1.56 +	asm("ldr	pc, __undef_vector ");		// 04 = Undefined instruction vector
    1.57 +	asm("ldr	pc, __swi_vector ");		// 08 = SWI vector
    1.58 +	asm("ldr	pc, __pabt_vector ");		// 0C = Prefetch abort vector
    1.59 +	asm("ldr	pc, __dabt_vector ");		// 10 = Data abort vector
    1.60 +	asm("ldr	pc, __unused_vector ");		// 14 = unused
    1.61 +	asm("b		HandleIrq ");				// 18 = IRQ vector
    1.62 +											// 1C = FIQ vector, code in situ
    1.63 +	/*** FIQ entry point ******************************************************/
    1.64 +	asm("ldr	r12, __ArmInterrupt ");
    1.65 +#ifdef BTRACE_CPU_USAGE
    1.66 +	asm("ldr	r11, __BTraceCpuUsageFilter ");
    1.67 +#endif
    1.68 +	asm("sub	lr, lr, #4 ");
    1.69 +	asm("ldr	r10, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iFiqHandler)); // r10 points to FIQ handler
    1.70 +	asm("str	lr, [sp, #-4]! ");
    1.71 +	// we assume FIQ handler preserves r0-r7 but not r8-r12
    1.72 +	// hence must be assembler, so stack misalignment OK
    1.73 +#if defined(__CPU_ARM_HAS_WORKING_CLREX)
    1.74 +	CLREX
    1.75 +#elif defined(__CPU_ARM_HAS_LDREX_STREX)
    1.76 +	STREX(8,14,13);						// dummy STREX to reset exclusivity monitor
    1.77 +#endif
    1.78 +#ifdef __USER_MEMORY_GUARDS_ENABLED__
    1.79 +	USER_MEMORY_GUARD_ON(,lr,r8);
    1.80 +	asm("str lr, [sp, #-4]! ");
    1.81 +#endif
    1.82 +#ifdef BTRACE_CPU_USAGE
    1.83 +	asm("ldrb	r8, [r11] ");
    1.84 +	asm("ldr	lr, _ArmVectorFiq ");
    1.85 +	asm("cmp	r8, #0 ");
    1.86 +	asm("bne	btrace_fiq");
    1.87 +	__JUMP(,	r10);					// jump to FIQ handler
    1.88 +
    1.89 +	asm("btrace_fiq: ");				// call trace handler before fiq handler...
    1.90 +	asm("stmfd	sp!, {r0-r3,r12,lr} ");
    1.91 +	asm("adr	lr, btrace_fiq_return ");
    1.92 +	asm("ldr	pc, __btrace_fiq_entry ");
    1.93 +	asm("btrace_fiq_return: ");
    1.94 +	asm("ldmfd	sp!, {r0-r3,r12,lr} ");
    1.95 +	__JUMP(,	r10);					// jump to FIQ handler
    1.96 +#endif
    1.97 +	asm("ldr lr, _ArmVectorFiq ");
    1.98 +	__JUMP(,r10);			// jump to FIQ handler
    1.99 +
   1.100 +
   1.101 +	/*** Nested IRQ register save *********************************************/
   1.102 +	asm("nested_irq: ");
   1.103 +	SRSDBW(MODE_SYS);					// save return address and return CPSR to interrupt stack
   1.104 +	CPSCHM(MODE_SYS);					// mode_sys, IRQs off
   1.105 +	asm("stmfd	sp!, {r0-r12,lr} ");	// save R0-R12,R14 from system mode
   1.106 +	GET_RWNO_TID(,r4);
   1.107 +	asm("b nested_irq_rejoin ");
   1.108 +
   1.109 +	/*** IRQ entry point ******************************************************/
   1.110 +	asm("HandleIrq: ");
   1.111 +	asm("mrs	r13, spsr ");
   1.112 +	asm("sub	lr, lr, #4 ");
   1.113 +	asm("and	r13, r13, #0x1f ");
   1.114 +	asm("cmp	r13, #0x1f ");			// interrupted mode_sys?
   1.115 +	asm("beq	nested_irq ");			// yes -> nested interrupt
   1.116 +	SRSDBW(MODE_SVC);					// save return address and return CPSR to supervisor stack
   1.117 +	__ASM_CLI_MODE(MODE_SVC);			// mode_svc, IRQs and FIQs off
   1.118 +	asm("sub	sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15));
   1.119 +	asm("stmia	sp, {r0-r14}^ ");		// save R0-R12, R13_usr, R14_usr
   1.120 +	asm("mov	r1, #%a0" : : "i" ((TInt)SThreadExcStack::EIrq));
   1.121 +#if defined(__CPU_ARM_HAS_WORKING_CLREX)
   1.122 +	CLREX
   1.123 +#elif defined(__CPU_ARM_HAS_LDREX_STREX)
   1.124 +	STREX(12, 0, 13);					// dummy STREX to reset exclusivity monitor
   1.125 +#endif
   1.126 +	GET_RWNO_TID(,r4);
   1.127 +	asm("mov	r5, sp ");
   1.128 +	asm("str	r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode));	// word describing exception type
   1.129 +	__ASM_STI2_MODE(MODE_SYS);			// mode_sys, IRQs off, FIQs on
   1.130 +	asm("ldr	sp, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqStackTop));
   1.131 +	USER_MEMORY_GUARD_ON(,r8,r0);		// r8 = original DACR if user memory guards in use
   1.132 +
   1.133 +	asm("nested_irq_rejoin: ");
   1.134 +	asm("ldr	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
   1.135 +	asm("ldr	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
   1.136 +	asm("ldr	r12, __ArmInterrupt ");
   1.137 +	asm("ldr	r10, _ArmVectorIrq ");
   1.138 +	asm("add	r0, r0, #1 ");
   1.139 +	asm("add	r7, r7, #1 ");
   1.140 +	__DATA_MEMORY_BARRIER_Z__(r2);		// ensure memory accesses in interrupted code are observed before
   1.141 +										// the writes to i_IrqCount, i_IrqNestCount
   1.142 +	asm("str	r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));		// increment i_IrqCount
   1.143 +	asm("ldr	r11, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iIrqHandler));	// address of IRQ handler
   1.144 +	asm("ldr	r6, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr));
   1.145 +	asm("str	r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));	// increment i_IrqNestCount
   1.146 +
   1.147 +	asm("1: ");
   1.148 +#ifdef BTRACE_CPU_USAGE
   1.149 +	asm("ldr	r2, __BTraceCpuUsageFilter ");
   1.150 +#endif
   1.151 +	asm("mov	r1, #%a0" : : "i" ((TInt)E_GicIntId_Spurious+1));
   1.152 +	asm("ldr	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iAck));		// r0 = number of interrupt to service
   1.153 +#ifdef BTRACE_CPU_USAGE
   1.154 +	asm("ldrb	r2, [r2] ");
   1.155 +#endif
   1.156 +	asm("sub	r1, r1, #1 ");
   1.157 +	asm("cmp	r0, r1 ");				// any more interrupts pending?
   1.158 +	asm("beq	2f ");					// if not, branch out
   1.159 +#ifdef BTRACE_CPU_USAGE
   1.160 +	asm("cmp	r2, #0 ");
   1.161 +	asm("beq	9f ");
   1.162 +	asm("stmfd	sp!, {r0-r3} ");
   1.163 +	asm("adr	lr, btrace_irq_return ");
   1.164 +	asm("ldr	pc, __btrace_irq_entry ");
   1.165 +	asm("btrace_irq_return: ");
   1.166 +	asm("ldmfd	sp!, {r0-r3} ");
   1.167 +	asm("9: ");
   1.168 +#endif	// BTRACE_CPU_USAGE
   1.169 +	ASM_DEBUG1(_longjump_Irq,r0);
   1.170 +	asm("adr	lr, 1b ");
   1.171 +	asm("tst	r0, #0x3e0 ");			// check for interrupt numbers 0-31
   1.172 +	asm("beq	3f ");					// branch out if so
   1.173 +	__JUMP(,r11);						// jump to dispatcher, R0 = interrupt number, return to 1:
   1.174 +										// dispatcher acknowledges interrupt
   1.175 +
   1.176 +	// No more interrupts pending - jump to postamble in the kernel
   1.177 +	// R4->TSubScheduler at this point, R5->saved registers on SVC stack if not nested IRQ
   1.178 +	// R6->GIC CPU interface
   1.179 +	asm("2: ");
   1.180 +	__JUMP(,r10);
   1.181 +
   1.182 +	// Kernel IPI
   1.183 +	asm("3: ");
   1.184 +	asm("and	r2, r0, #31 ");			// r2 = interrupt number 0...31
   1.185 +	asm("cmp	r2, #%a0" : : "i" ((TInt)TIMESLICE_VECTOR));
   1.186 +	asm("beq	do_timeslice_irq ");
   1.187 +	asm("cmp	r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR));
   1.188 +	asm("beq	do_resched_ipi ");
   1.189 +	asm("cmp	r2, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR));
   1.190 +	asm("beq	do_generic_ipi ");
   1.191 +	asm("cmp	r2, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
   1.192 +	asm("beq	do_transferred_ipi ");
   1.193 +	asm("cmp	r2, #15 ");
   1.194 +	__JUMP(hi,	r11);					// if >15 but not TIMESLICE_VECTOR, call dispatcher
   1.195 +
   1.196 +	// else assume CRASH_IPI
   1.197 +	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
   1.198 +	__DATA_SYNC_BARRIER_Z__(r1);
   1.199 +	asm("ldr	r1, __HandleCrashIPI ");
   1.200 +	__JUMP(,	r1);					// CRASH IPI, so crash
   1.201 +
   1.202 +	// TIMESLICE, RESCHED or TRANSFERRED
   1.203 +	asm("do_timeslice_irq: ");
   1.204 +	asm("ldr	r2, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_LocalTimerAddr));
   1.205 +	asm("mov	r1, #1 ");
   1.206 +	asm("str	r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer, iTimerIntStatus));	// clear timer event flag
   1.207 +	asm("do_resched_ipi: ");
   1.208 +	asm("mov	r1, #1 ");
   1.209 +	asm("strb	r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
   1.210 +	asm("do_transferred_ipi: ");
   1.211 +	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
   1.212 +	__DATA_SYNC_BARRIER_Z__(r1);		// ensure writes to i_IrqCount, i_IrqNestCount, iRescheduleNeededFlag complete before SEV
   1.213 +										// also ensure EOI is written before we return from the interrupt
   1.214 +	ARM_SEV;							// kick any CPUs waiting for us to enter the ISR
   1.215 +	asm("b		1b ");
   1.216 +
   1.217 +	// GENERIC_IPI
   1.218 +	asm("do_generic_ipi: ");
   1.219 +	asm("ldr	r2, _GenericIPIIsr ");
   1.220 +	asm("str	r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));		// acknowledge interrupt
   1.221 +	asm("mov	r0, r4 ");				// r0->SubScheduler
   1.222 +	__DATA_SYNC_BARRIER_Z__(r1);
   1.223 +	__JUMP(,	r2);
   1.224 +
   1.225 +	asm("__DebugMsg_longjump_Irq: ");
   1.226 +	asm("ldr	pc, _dmIrq ");
   1.227 +
   1.228 +	asm("__reset_vector:");
   1.229 +	asm(".word	__ArmVectorReset "); 
   1.230 +	asm("__undef_vector:");
   1.231 +	__DECLARE_UNDEFINED_INSTRUCTION_HANDLER;
   1.232 +	asm("__swi_vector:");
   1.233 +	asm(".word	__ArmVectorSwi "); 
   1.234 +	asm("__pabt_vector:");
   1.235 +	__DECLARE_PREFETCH_ABORT_HANDLER;
   1.236 +	asm("__dabt_vector:");
   1.237 +	__DECLARE_DATA_ABORT_HANDLER;
   1.238 +	asm("__unused_vector:");
   1.239 +	asm(".word	__ArmVectorReserved ");
   1.240 +
   1.241 +	asm("__ArmInterrupt: ");
   1.242 +	asm(".word	ArmInterruptInfo ");
   1.243 +	asm("_ArmVectorIrq: ");
   1.244 +	asm(".word	__ArmVectorIrq ");
   1.245 +	asm("_GenericIPIIsr: ");
   1.246 +	asm(".word generic_ipi_isr ");
   1.247 +	asm("_ArmVectorFiq: ");
   1.248 +	asm(".word	__ArmVectorFiq ");
   1.249 +	asm("__HandleCrashIPI: ");
   1.250 +	asm(".word handle_crash_ipi ");
   1.251 +#ifdef BTRACE_CPU_USAGE
   1.252 +	asm("__BTraceCpuUsageFilter: ");
   1.253 +	asm(".word	%a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage]));
   1.254 +	asm("__btrace_irq_entry: ");
   1.255 +	asm(".word btrace_irq_entry ");
   1.256 +	asm("__btrace_fiq_entry: ");
   1.257 +	asm(".word btrace_fiq_entry ");
   1.258 +#endif
   1.259 +	asm("_dmIrq: ");
   1.260 +	asm(".word __DebugMsgIrq ");
   1.261 +	}
   1.262 +}
   1.263 +